diff options
398 files changed, 4137 insertions, 3086 deletions
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt index 44afa0e5057d..4ff65047bb9a 100644 --- a/Documentation/devicetree/bindings/net/macb.txt +++ b/Documentation/devicetree/bindings/net/macb.txt | |||
| @@ -4,7 +4,7 @@ Required properties: | |||
| 4 | - compatible: Should be "cdns,[<chip>-]{macb|gem}" | 4 | - compatible: Should be "cdns,[<chip>-]{macb|gem}" |
| 5 | Use "cdns,at91sam9260-macb" Atmel at91sam9260 and at91sam9263 SoCs. | 5 | Use "cdns,at91sam9260-macb" Atmel at91sam9260 and at91sam9263 SoCs. |
| 6 | Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb". | 6 | Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb". |
| 7 | Use "cnds,pc302-gem" for Picochip picoXcell pc302 and later devices based on | 7 | Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on |
| 8 | the Cadence GEM, or the generic form: "cdns,gem". | 8 | the Cadence GEM, or the generic form: "cdns,gem". |
| 9 | - reg: Address and length of the register set for the device | 9 | - reg: Address and length of the register set for the device |
| 10 | - interrupts: Should contain macb interrupt | 10 | - interrupts: Should contain macb interrupt |
diff --git a/Documentation/devicetree/bindings/drm/exynos/hdmi.txt b/Documentation/devicetree/bindings/video/exynos_hdmi.txt index 589edee37394..589edee37394 100644 --- a/Documentation/devicetree/bindings/drm/exynos/hdmi.txt +++ b/Documentation/devicetree/bindings/video/exynos_hdmi.txt | |||
diff --git a/Documentation/devicetree/bindings/drm/exynos/hdmiddc.txt b/Documentation/devicetree/bindings/video/exynos_hdmiddc.txt index fa166d945809..fa166d945809 100644 --- a/Documentation/devicetree/bindings/drm/exynos/hdmiddc.txt +++ b/Documentation/devicetree/bindings/video/exynos_hdmiddc.txt | |||
diff --git a/Documentation/devicetree/bindings/drm/exynos/hdmiphy.txt b/Documentation/devicetree/bindings/video/exynos_hdmiphy.txt index 858f4f9b902f..858f4f9b902f 100644 --- a/Documentation/devicetree/bindings/drm/exynos/hdmiphy.txt +++ b/Documentation/devicetree/bindings/video/exynos_hdmiphy.txt | |||
diff --git a/Documentation/devicetree/bindings/drm/exynos/mixer.txt b/Documentation/devicetree/bindings/video/exynos_mixer.txt index 9b2ea0343566..9b2ea0343566 100644 --- a/Documentation/devicetree/bindings/drm/exynos/mixer.txt +++ b/Documentation/devicetree/bindings/video/exynos_mixer.txt | |||
diff --git a/Documentation/devicetree/usage-model.txt b/Documentation/devicetree/usage-model.txt index ef9d06c9f8fd..0efedaad5165 100644 --- a/Documentation/devicetree/usage-model.txt +++ b/Documentation/devicetree/usage-model.txt | |||
| @@ -191,9 +191,11 @@ Linux it will look something like this: | |||
| 191 | }; | 191 | }; |
| 192 | 192 | ||
| 193 | The bootargs property contains the kernel arguments, and the initrd-* | 193 | The bootargs property contains the kernel arguments, and the initrd-* |
| 194 | properties define the address and size of an initrd blob. The | 194 | properties define the address and size of an initrd blob. Note that |
| 195 | chosen node may also optionally contain an arbitrary number of | 195 | initrd-end is the first address after the initrd image, so this doesn't |
| 196 | additional properties for platform-specific configuration data. | 196 | match the usual semantic of struct resource. The chosen node may also |
| 197 | optionally contain an arbitrary number of additional properties for | ||
| 198 | platform-specific configuration data. | ||
| 197 | 199 | ||
| 198 | During early boot, the architecture setup code calls of_scan_flat_dt() | 200 | During early boot, the architecture setup code calls of_scan_flat_dt() |
| 199 | several times with different helper callbacks to parse device tree | 201 | several times with different helper callbacks to parse device tree |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index c3bfacb92910..6e3b18a8afc6 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -3005,6 +3005,27 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
| 3005 | Force threading of all interrupt handlers except those | 3005 | Force threading of all interrupt handlers except those |
| 3006 | marked explicitly IRQF_NO_THREAD. | 3006 | marked explicitly IRQF_NO_THREAD. |
| 3007 | 3007 | ||
| 3008 | tmem [KNL,XEN] | ||
| 3009 | Enable the Transcendent memory driver if built-in. | ||
| 3010 | |||
| 3011 | tmem.cleancache=0|1 [KNL, XEN] | ||
| 3012 | Default is on (1). Disable the usage of the cleancache | ||
| 3013 | API to send anonymous pages to the hypervisor. | ||
| 3014 | |||
| 3015 | tmem.frontswap=0|1 [KNL, XEN] | ||
| 3016 | Default is on (1). Disable the usage of the frontswap | ||
| 3017 | API to send swap pages to the hypervisor. If disabled | ||
| 3018 | the selfballooning and selfshrinking are force disabled. | ||
| 3019 | |||
| 3020 | tmem.selfballooning=0|1 [KNL, XEN] | ||
| 3021 | Default is on (1). Disable the driving of swap pages | ||
| 3022 | to the hypervisor. | ||
| 3023 | |||
| 3024 | tmem.selfshrinking=0|1 [KNL, XEN] | ||
| 3025 | Default is on (1). Partial swapoff that immediately | ||
| 3026 | transfers pages from Xen hypervisor back to the | ||
| 3027 | kernel based on different criteria. | ||
| 3028 | |||
| 3008 | topology= [S390] | 3029 | topology= [S390] |
| 3009 | Format: {off | on} | 3030 | Format: {off | on} |
| 3010 | Specify if the kernel should make use of the cpu | 3031 | Specify if the kernel should make use of the cpu |
diff --git a/Documentation/kernel-per-CPU-kthreads.txt b/Documentation/kernel-per-CPU-kthreads.txt new file mode 100644 index 000000000000..cbf7ae412da4 --- /dev/null +++ b/Documentation/kernel-per-CPU-kthreads.txt | |||
| @@ -0,0 +1,202 @@ | |||
| 1 | REDUCING OS JITTER DUE TO PER-CPU KTHREADS | ||
| 2 | |||
| 3 | This document lists per-CPU kthreads in the Linux kernel and presents | ||
| 4 | options to control their OS jitter. Note that non-per-CPU kthreads are | ||
| 5 | not listed here. To reduce OS jitter from non-per-CPU kthreads, bind | ||
| 6 | them to a "housekeeping" CPU dedicated to such work. | ||
| 7 | |||
| 8 | |||
| 9 | REFERENCES | ||
| 10 | |||
| 11 | o Documentation/IRQ-affinity.txt: Binding interrupts to sets of CPUs. | ||
| 12 | |||
| 13 | o Documentation/cgroups: Using cgroups to bind tasks to sets of CPUs. | ||
| 14 | |||
| 15 | o man taskset: Using the taskset command to bind tasks to sets | ||
| 16 | of CPUs. | ||
| 17 | |||
| 18 | o man sched_setaffinity: Using the sched_setaffinity() system | ||
| 19 | call to bind tasks to sets of CPUs. | ||
| 20 | |||
| 21 | o /sys/devices/system/cpu/cpuN/online: Control CPU N's hotplug state, | ||
| 22 | writing "0" to offline and "1" to online. | ||
| 23 | |||
| 24 | o In order to locate kernel-generated OS jitter on CPU N: | ||
| 25 | |||
| 26 | cd /sys/kernel/debug/tracing | ||
| 27 | echo 1 > max_graph_depth # Increase the "1" for more detail | ||
| 28 | echo function_graph > current_tracer | ||
| 29 | # run workload | ||
| 30 | cat per_cpu/cpuN/trace | ||
| 31 | |||
| 32 | |||
| 33 | KTHREADS | ||
| 34 | |||
| 35 | Name: ehca_comp/%u | ||
| 36 | Purpose: Periodically process Infiniband-related work. | ||
| 37 | To reduce its OS jitter, do any of the following: | ||
| 38 | 1. Don't use eHCA Infiniband hardware, instead choosing hardware | ||
| 39 | that does not require per-CPU kthreads. This will prevent these | ||
| 40 | kthreads from being created in the first place. (This will | ||
| 41 | work for most people, as this hardware, though important, is | ||
| 42 | relatively old and is produced in relatively low unit volumes.) | ||
| 43 | 2. Do all eHCA-Infiniband-related work on other CPUs, including | ||
| 44 | interrupts. | ||
| 45 | 3. Rework the eHCA driver so that its per-CPU kthreads are | ||
| 46 | provisioned only on selected CPUs. | ||
| 47 | |||
| 48 | |||
| 49 | Name: irq/%d-%s | ||
| 50 | Purpose: Handle threaded interrupts. | ||
| 51 | To reduce its OS jitter, do the following: | ||
| 52 | 1. Use irq affinity to force the irq threads to execute on | ||
| 53 | some other CPU. | ||
| 54 | |||
| 55 | Name: kcmtpd_ctr_%d | ||
| 56 | Purpose: Handle Bluetooth work. | ||
| 57 | To reduce its OS jitter, do one of the following: | ||
| 58 | 1. Don't use Bluetooth, in which case these kthreads won't be | ||
| 59 | created in the first place. | ||
| 60 | 2. Use irq affinity to force Bluetooth-related interrupts to | ||
| 61 | occur on some other CPU and furthermore initiate all | ||
| 62 | Bluetooth activity on some other CPU. | ||
| 63 | |||
| 64 | Name: ksoftirqd/%u | ||
| 65 | Purpose: Execute softirq handlers when threaded or when under heavy load. | ||
| 66 | To reduce its OS jitter, each softirq vector must be handled | ||
| 67 | separately as follows: | ||
| 68 | TIMER_SOFTIRQ: Do all of the following: | ||
| 69 | 1. To the extent possible, keep the CPU out of the kernel when it | ||
| 70 | is non-idle, for example, by avoiding system calls and by forcing | ||
| 71 | both kernel threads and interrupts to execute elsewhere. | ||
| 72 | 2. Build with CONFIG_HOTPLUG_CPU=y. After boot completes, force | ||
| 73 | the CPU offline, then bring it back online. This forces | ||
| 74 | recurring timers to migrate elsewhere. If you are concerned | ||
| 75 | with multiple CPUs, force them all offline before bringing the | ||
| 76 | first one back online. Once you have onlined the CPUs in question, | ||
| 77 | do not offline any other CPUs, because doing so could force the | ||
| 78 | timer back onto one of the CPUs in question. | ||
| 79 | NET_TX_SOFTIRQ and NET_RX_SOFTIRQ: Do all of the following: | ||
| 80 | 1. Force networking interrupts onto other CPUs. | ||
| 81 | 2. Initiate any network I/O on other CPUs. | ||
| 82 | 3. Once your application has started, prevent CPU-hotplug operations | ||
| 83 | from being initiated from tasks that might run on the CPU to | ||
| 84 | be de-jittered. (It is OK to force this CPU offline and then | ||
| 85 | bring it back online before you start your application.) | ||
| 86 | BLOCK_SOFTIRQ: Do all of the following: | ||
| 87 | 1. Force block-device interrupts onto some other CPU. | ||
| 88 | 2. Initiate any block I/O on other CPUs. | ||
| 89 | 3. Once your application has started, prevent CPU-hotplug operations | ||
| 90 | from being initiated from tasks that might run on the CPU to | ||
| 91 | be de-jittered. (It is OK to force this CPU offline and then | ||
| 92 | bring it back online before you start your application.) | ||
| 93 | BLOCK_IOPOLL_SOFTIRQ: Do all of the following: | ||
| 94 | 1. Force block-device interrupts onto some other CPU. | ||
| 95 | 2. Initiate any block I/O and block-I/O polling on other CPUs. | ||
| 96 | 3. Once your application has started, prevent CPU-hotplug operations | ||
| 97 | from being initiated from tasks that might run on the CPU to | ||
| 98 | be de-jittered. (It is OK to force this CPU offline and then | ||
| 99 | bring it back online before you start your application.) | ||
| 100 | TASKLET_SOFTIRQ: Do one or more of the following: | ||
| 101 | 1. Avoid use of drivers that use tasklets. (Such drivers will contain | ||
| 102 | calls to things like tasklet_schedule().) | ||
| 103 | 2. Convert all drivers that you must use from tasklets to workqueues. | ||
| 104 | 3. Force interrupts for drivers using tasklets onto other CPUs, | ||
| 105 | and also do I/O involving these drivers on other CPUs. | ||
| 106 | SCHED_SOFTIRQ: Do all of the following: | ||
| 107 | 1. Avoid sending scheduler IPIs to the CPU to be de-jittered, | ||
| 108 | for example, ensure that at most one runnable kthread is present | ||
| 109 | on that CPU. If a thread that expects to run on the de-jittered | ||
| 110 | CPU awakens, the scheduler will send an IPI that can result in | ||
| 111 | a subsequent SCHED_SOFTIRQ. | ||
| 112 | 2. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y, | ||
| 113 | CONFIG_NO_HZ_FULL=y, and, in addition, ensure that the CPU | ||
| 114 | to be de-jittered is marked as an adaptive-ticks CPU using the | ||
| 115 | "nohz_full=" boot parameter. This reduces the number of | ||
| 116 | scheduler-clock interrupts that the de-jittered CPU receives, | ||
| 117 | minimizing its chances of being selected to do the load balancing | ||
| 118 | work that runs in SCHED_SOFTIRQ context. | ||
| 119 | 3. To the extent possible, keep the CPU out of the kernel when it | ||
| 120 | is non-idle, for example, by avoiding system calls and by | ||
| 121 | forcing both kernel threads and interrupts to execute elsewhere. | ||
| 122 | This further reduces the number of scheduler-clock interrupts | ||
| 123 | received by the de-jittered CPU. | ||
| 124 | HRTIMER_SOFTIRQ: Do all of the following: | ||
| 125 | 1. To the extent possible, keep the CPU out of the kernel when it | ||
| 126 | is non-idle. For example, avoid system calls and force both | ||
| 127 | kernel threads and interrupts to execute elsewhere. | ||
| 128 | 2. Build with CONFIG_HOTPLUG_CPU=y. Once boot completes, force the | ||
| 129 | CPU offline, then bring it back online. This forces recurring | ||
| 130 | timers to migrate elsewhere. If you are concerned with multiple | ||
| 131 | CPUs, force them all offline before bringing the first one | ||
| 132 | back online. Once you have onlined the CPUs in question, do not | ||
| 133 | offline any other CPUs, because doing so could force the timer | ||
| 134 | back onto one of the CPUs in question. | ||
| 135 | RCU_SOFTIRQ: Do at least one of the following: | ||
| 136 | 1. Offload callbacks and keep the CPU in either dyntick-idle or | ||
| 137 | adaptive-ticks state by doing all of the following: | ||
| 138 | a. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y, | ||
| 139 | CONFIG_NO_HZ_FULL=y, and, in addition ensure that the CPU | ||
| 140 | to be de-jittered is marked as an adaptive-ticks CPU using | ||
| 141 | the "nohz_full=" boot parameter. Bind the rcuo kthreads | ||
| 142 | to housekeeping CPUs, which can tolerate OS jitter. | ||
| 143 | b. To the extent possible, keep the CPU out of the kernel | ||
| 144 | when it is non-idle, for example, by avoiding system | ||
| 145 | calls and by forcing both kernel threads and interrupts | ||
| 146 | to execute elsewhere. | ||
| 147 | 2. Enable RCU to do its processing remotely via dyntick-idle by | ||
| 148 | doing all of the following: | ||
| 149 | a. Build with CONFIG_NO_HZ=y and CONFIG_RCU_FAST_NO_HZ=y. | ||
| 150 | b. Ensure that the CPU goes idle frequently, allowing other | ||
| 151 | CPUs to detect that it has passed through an RCU quiescent | ||
| 152 | state. If the kernel is built with CONFIG_NO_HZ_FULL=y, | ||
| 153 | userspace execution also allows other CPUs to detect that | ||
| 154 | the CPU in question has passed through a quiescent state. | ||
| 155 | c. To the extent possible, keep the CPU out of the kernel | ||
| 156 | when it is non-idle, for example, by avoiding system | ||
| 157 | calls and by forcing both kernel threads and interrupts | ||
| 158 | to execute elsewhere. | ||
| 159 | |||
| 160 | Name: rcuc/%u | ||
| 161 | Purpose: Execute RCU callbacks in CONFIG_RCU_BOOST=y kernels. | ||
| 162 | To reduce its OS jitter, do at least one of the following: | ||
| 163 | 1. Build the kernel with CONFIG_PREEMPT=n. This prevents these | ||
| 164 | kthreads from being created in the first place, and also obviates | ||
| 165 | the need for RCU priority boosting. This approach is feasible | ||
| 166 | for workloads that do not require high degrees of responsiveness. | ||
| 167 | 2. Build the kernel with CONFIG_RCU_BOOST=n. This prevents these | ||
| 168 | kthreads from being created in the first place. This approach | ||
| 169 | is feasible only if your workload never requires RCU priority | ||
| 170 | boosting, for example, if you ensure frequent idle time on all | ||
| 171 | CPUs that might execute within the kernel. | ||
| 172 | 3. Build with CONFIG_RCU_NOCB_CPU=y and CONFIG_RCU_NOCB_CPU_ALL=y, | ||
| 173 | which offloads all RCU callbacks to kthreads that can be moved | ||
| 174 | off of CPUs susceptible to OS jitter. This approach prevents the | ||
| 175 | rcuc/%u kthreads from having any work to do, so that they are | ||
| 176 | never awakened. | ||
| 177 | 4. Ensure that the CPU never enters the kernel, and, in particular, | ||
| 178 | avoid initiating any CPU hotplug operations on this CPU. This is | ||
| 179 | another way of preventing any callbacks from being queued on the | ||
| 180 | CPU, again preventing the rcuc/%u kthreads from having any work | ||
| 181 | to do. | ||
| 182 | |||
| 183 | Name: rcuob/%d, rcuop/%d, and rcuos/%d | ||
| 184 | Purpose: Offload RCU callbacks from the corresponding CPU. | ||
| 185 | To reduce its OS jitter, do at least one of the following: | ||
| 186 | 1. Use affinity, cgroups, or other mechanism to force these kthreads | ||
| 187 | to execute on some other CPU. | ||
| 188 | 2. Build with CONFIG_RCU_NOCB_CPUS=n, which will prevent these | ||
| 189 | kthreads from being created in the first place. However, please | ||
| 190 | note that this will not eliminate OS jitter, but will instead | ||
| 191 | shift it to RCU_SOFTIRQ. | ||
| 192 | |||
| 193 | Name: watchdog/%u | ||
| 194 | Purpose: Detect software lockups on each CPU. | ||
| 195 | To reduce its OS jitter, do at least one of the following: | ||
| 196 | 1. Build with CONFIG_LOCKUP_DETECTOR=n, which will prevent these | ||
| 197 | kthreads from being created in the first place. | ||
| 198 | 2. Echo a zero to /proc/sys/kernel/watchdog to disable the | ||
| 199 | watchdog timer. | ||
| 200 | 3. Echo a large number of /proc/sys/kernel/watchdog_thresh in | ||
| 201 | order to reduce the frequency of OS jitter due to the watchdog | ||
| 202 | timer down to a level that is acceptable for your workload. | ||
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt index 504dfe4d52eb..a66c9821b5ce 100644 --- a/Documentation/power/devices.txt +++ b/Documentation/power/devices.txt | |||
| @@ -268,7 +268,7 @@ situations. | |||
| 268 | System Power Management Phases | 268 | System Power Management Phases |
| 269 | ------------------------------ | 269 | ------------------------------ |
| 270 | Suspending or resuming the system is done in several phases. Different phases | 270 | Suspending or resuming the system is done in several phases. Different phases |
| 271 | are used for standby or memory sleep states ("suspend-to-RAM") and the | 271 | are used for freeze, standby, and memory sleep states ("suspend-to-RAM") and the |
| 272 | hibernation state ("suspend-to-disk"). Each phase involves executing callbacks | 272 | hibernation state ("suspend-to-disk"). Each phase involves executing callbacks |
| 273 | for every device before the next phase begins. Not all busses or classes | 273 | for every device before the next phase begins. Not all busses or classes |
| 274 | support all these callbacks and not all drivers use all the callbacks. The | 274 | support all these callbacks and not all drivers use all the callbacks. The |
| @@ -309,7 +309,8 @@ execute the corresponding method from dev->driver->pm instead if there is one. | |||
| 309 | 309 | ||
| 310 | Entering System Suspend | 310 | Entering System Suspend |
| 311 | ----------------------- | 311 | ----------------------- |
| 312 | When the system goes into the standby or memory sleep state, the phases are: | 312 | When the system goes into the freeze, standby or memory sleep state, |
| 313 | the phases are: | ||
| 313 | 314 | ||
| 314 | prepare, suspend, suspend_late, suspend_noirq. | 315 | prepare, suspend, suspend_late, suspend_noirq. |
| 315 | 316 | ||
| @@ -368,7 +369,7 @@ the devices that were suspended. | |||
| 368 | 369 | ||
| 369 | Leaving System Suspend | 370 | Leaving System Suspend |
| 370 | ---------------------- | 371 | ---------------------- |
| 371 | When resuming from standby or memory sleep, the phases are: | 372 | When resuming from freeze, standby or memory sleep, the phases are: |
| 372 | 373 | ||
| 373 | resume_noirq, resume_early, resume, complete. | 374 | resume_noirq, resume_early, resume, complete. |
| 374 | 375 | ||
| @@ -433,8 +434,8 @@ the system log. | |||
| 433 | 434 | ||
| 434 | Entering Hibernation | 435 | Entering Hibernation |
| 435 | -------------------- | 436 | -------------------- |
| 436 | Hibernating the system is more complicated than putting it into the standby or | 437 | Hibernating the system is more complicated than putting it into the other |
| 437 | memory sleep state, because it involves creating and saving a system image. | 438 | sleep states, because it involves creating and saving a system image. |
| 438 | Therefore there are more phases for hibernation, with a different set of | 439 | Therefore there are more phases for hibernation, with a different set of |
| 439 | callbacks. These phases always run after tasks have been frozen and memory has | 440 | callbacks. These phases always run after tasks have been frozen and memory has |
| 440 | been freed. | 441 | been freed. |
| @@ -485,8 +486,8 @@ image forms an atomic snapshot of the system state. | |||
| 485 | 486 | ||
| 486 | At this point the system image is saved, and the devices then need to be | 487 | At this point the system image is saved, and the devices then need to be |
| 487 | prepared for the upcoming system shutdown. This is much like suspending them | 488 | prepared for the upcoming system shutdown. This is much like suspending them |
| 488 | before putting the system into the standby or memory sleep state, and the phases | 489 | before putting the system into the freeze, standby or memory sleep state, |
| 489 | are similar. | 490 | and the phases are similar. |
| 490 | 491 | ||
| 491 | 9. The prepare phase is discussed above. | 492 | 9. The prepare phase is discussed above. |
| 492 | 493 | ||
diff --git a/Documentation/power/interface.txt b/Documentation/power/interface.txt index c537834af005..f1f0f59a7c47 100644 --- a/Documentation/power/interface.txt +++ b/Documentation/power/interface.txt | |||
| @@ -7,8 +7,8 @@ running. The interface exists in /sys/power/ directory (assuming sysfs | |||
| 7 | is mounted at /sys). | 7 | is mounted at /sys). |
| 8 | 8 | ||
| 9 | /sys/power/state controls system power state. Reading from this file | 9 | /sys/power/state controls system power state. Reading from this file |
| 10 | returns what states are supported, which is hard-coded to 'standby' | 10 | returns what states are supported, which is hard-coded to 'freeze', |
| 11 | (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk' | 11 | 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk' |
| 12 | (Suspend-to-Disk). | 12 | (Suspend-to-Disk). |
| 13 | 13 | ||
| 14 | Writing to this file one of those strings causes the system to | 14 | Writing to this file one of those strings causes the system to |
diff --git a/Documentation/power/notifiers.txt b/Documentation/power/notifiers.txt index c2a4a346c0d9..a81fa254303d 100644 --- a/Documentation/power/notifiers.txt +++ b/Documentation/power/notifiers.txt | |||
| @@ -15,8 +15,10 @@ A suspend/hibernation notifier may be used for this purpose. | |||
| 15 | The subsystems or drivers having such needs can register suspend notifiers that | 15 | The subsystems or drivers having such needs can register suspend notifiers that |
| 16 | will be called upon the following events by the PM core: | 16 | will be called upon the following events by the PM core: |
| 17 | 17 | ||
| 18 | PM_HIBERNATION_PREPARE The system is going to hibernate or suspend, tasks will | 18 | PM_HIBERNATION_PREPARE The system is going to hibernate, tasks will be frozen |
| 19 | be frozen immediately. | 19 | immediately. This is different from PM_SUSPEND_PREPARE |
| 20 | below because here we do additional work between notifiers | ||
| 21 | and drivers freezing. | ||
| 20 | 22 | ||
| 21 | PM_POST_HIBERNATION The system memory state has been restored from a | 23 | PM_POST_HIBERNATION The system memory state has been restored from a |
| 22 | hibernation image or an error occurred during | 24 | hibernation image or an error occurred during |
diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt index 4416b28630df..442d43df9b25 100644 --- a/Documentation/power/states.txt +++ b/Documentation/power/states.txt | |||
| @@ -2,12 +2,26 @@ | |||
| 2 | System Power Management States | 2 | System Power Management States |
| 3 | 3 | ||
| 4 | 4 | ||
| 5 | The kernel supports three power management states generically, though | 5 | The kernel supports four power management states generically, though |
| 6 | each is dependent on platform support code to implement the low-level | 6 | one is generic and the other three are dependent on platform support |
| 7 | details for each state. This file describes each state, what they are | 7 | code to implement the low-level details for each state. |
| 8 | This file describes each state, what they are | ||
| 8 | commonly called, what ACPI state they map to, and what string to write | 9 | commonly called, what ACPI state they map to, and what string to write |
| 9 | to /sys/power/state to enter that state | 10 | to /sys/power/state to enter that state |
| 10 | 11 | ||
| 12 | state: Freeze / Low-Power Idle | ||
| 13 | ACPI state: S0 | ||
| 14 | String: "freeze" | ||
| 15 | |||
| 16 | This state is a generic, pure software, light-weight, low-power state. | ||
| 17 | It allows more energy to be saved relative to idle by freezing user | ||
| 18 | space and putting all I/O devices into low-power states (possibly | ||
| 19 | lower-power than available at run time), such that the processors can | ||
| 20 | spend more time in their idle states. | ||
| 21 | This state can be used for platforms without Standby/Suspend-to-RAM | ||
| 22 | support, or it can be used in addition to Suspend-to-RAM (memory sleep) | ||
| 23 | to provide reduced resume latency. | ||
| 24 | |||
| 11 | 25 | ||
| 12 | State: Standby / Power-On Suspend | 26 | State: Standby / Power-On Suspend |
| 13 | ACPI State: S1 | 27 | ACPI State: S1 |
| @@ -22,9 +36,6 @@ We try to put devices in a low-power state equivalent to D1, which | |||
| 22 | also offers low power savings, but low resume latency. Not all devices | 36 | also offers low power savings, but low resume latency. Not all devices |
| 23 | support D1, and those that don't are left on. | 37 | support D1, and those that don't are left on. |
| 24 | 38 | ||
| 25 | A transition from Standby to the On state should take about 1-2 | ||
| 26 | seconds. | ||
| 27 | |||
| 28 | 39 | ||
| 29 | State: Suspend-to-RAM | 40 | State: Suspend-to-RAM |
| 30 | ACPI State: S3 | 41 | ACPI State: S3 |
| @@ -42,9 +53,6 @@ transition back to the On state. | |||
| 42 | For at least ACPI, STR requires some minimal boot-strapping code to | 53 | For at least ACPI, STR requires some minimal boot-strapping code to |
| 43 | resume the system from STR. This may be true on other platforms. | 54 | resume the system from STR. This may be true on other platforms. |
| 44 | 55 | ||
| 45 | A transition from Suspend-to-RAM to the On state should take about | ||
| 46 | 3-5 seconds. | ||
| 47 | |||
| 48 | 56 | ||
| 49 | State: Suspend-to-disk | 57 | State: Suspend-to-disk |
| 50 | ACPI State: S4 | 58 | ACPI State: S4 |
| @@ -74,7 +82,3 @@ low-power state (like ACPI S4), or it may simply power down. Powering | |||
| 74 | down offers greater savings, and allows this mechanism to work on any | 82 | down offers greater savings, and allows this mechanism to work on any |
| 75 | system. However, entering a real low-power state allows the user to | 83 | system. However, entering a real low-power state allows the user to |
| 76 | trigger wake up events (e.g. pressing a key or opening a laptop lid). | 84 | trigger wake up events (e.g. pressing a key or opening a laptop lid). |
| 77 | |||
| 78 | A transition from Suspend-to-Disk to the On state should take about 30 | ||
| 79 | seconds, though it's typically a bit more with the current | ||
| 80 | implementation. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 3d7782b9f90d..829c0321108b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -4976,6 +4976,13 @@ S: Maintained | |||
| 4976 | F: Documentation/hwmon/lm90 | 4976 | F: Documentation/hwmon/lm90 |
| 4977 | F: drivers/hwmon/lm90.c | 4977 | F: drivers/hwmon/lm90.c |
| 4978 | 4978 | ||
| 4979 | LM95234 HARDWARE MONITOR DRIVER | ||
| 4980 | M: Guenter Roeck <linux@roeck-us.net> | ||
| 4981 | L: lm-sensors@lm-sensors.org | ||
| 4982 | S: Maintained | ||
| 4983 | F: Documentation/hwmon/lm95234 | ||
| 4984 | F: drivers/hwmon/lm95234.c | ||
| 4985 | |||
| 4979 | LME2510 MEDIA DRIVER | 4986 | LME2510 MEDIA DRIVER |
| 4980 | M: Malcolm Priestley <tvboxspy@gmail.com> | 4987 | M: Malcolm Priestley <tvboxspy@gmail.com> |
| 4981 | L: linux-media@vger.kernel.org | 4988 | L: linux-media@vger.kernel.org |
| @@ -7854,7 +7861,7 @@ L: linux-scsi@vger.kernel.org | |||
| 7854 | L: target-devel@vger.kernel.org | 7861 | L: target-devel@vger.kernel.org |
| 7855 | L: http://groups.google.com/group/linux-iscsi-target-dev | 7862 | L: http://groups.google.com/group/linux-iscsi-target-dev |
| 7856 | W: http://www.linux-iscsi.org | 7863 | W: http://www.linux-iscsi.org |
| 7857 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core.git master | 7864 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master |
| 7858 | S: Supported | 7865 | S: Supported |
| 7859 | F: drivers/target/ | 7866 | F: drivers/target/ |
| 7860 | F: include/target/ | 7867 | F: include/target/ |
| @@ -8182,6 +8189,13 @@ F: drivers/mmc/host/sh_mobile_sdhi.c | |||
| 8182 | F: include/linux/mmc/tmio.h | 8189 | F: include/linux/mmc/tmio.h |
| 8183 | F: include/linux/mmc/sh_mobile_sdhi.h | 8190 | F: include/linux/mmc/sh_mobile_sdhi.h |
| 8184 | 8191 | ||
| 8192 | TMP401 HARDWARE MONITOR DRIVER | ||
| 8193 | M: Guenter Roeck <linux@roeck-us.net> | ||
| 8194 | L: lm-sensors@lm-sensors.org | ||
| 8195 | S: Maintained | ||
| 8196 | F: Documentation/hwmon/tmp401 | ||
| 8197 | F: drivers/hwmon/tmp401.c | ||
| 8198 | |||
| 8185 | TMPFS (SHMEM FILESYSTEM) | 8199 | TMPFS (SHMEM FILESYSTEM) |
| 8186 | M: Hugh Dickins <hughd@google.com> | 8200 | M: Hugh Dickins <hughd@google.com> |
| 8187 | L: linux-mm@kvack.org | 8201 | L: linux-mm@kvack.org |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 3 | 1 | VERSION = 3 |
| 2 | PATCHLEVEL = 10 | 2 | PATCHLEVEL = 10 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = -rc2 |
| 5 | NAME = Unicycling Gorilla | 5 | NAME = Unicycling Gorilla |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/Kconfig b/arch/Kconfig index dd0e8eb8042f..a4429bcd609e 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
| @@ -213,6 +213,9 @@ config USE_GENERIC_SMP_HELPERS | |||
| 213 | config GENERIC_SMP_IDLE_THREAD | 213 | config GENERIC_SMP_IDLE_THREAD |
| 214 | bool | 214 | bool |
| 215 | 215 | ||
| 216 | config GENERIC_IDLE_POLL_SETUP | ||
| 217 | bool | ||
| 218 | |||
| 216 | # Select if arch init_task initializer is different to init/init_task.c | 219 | # Select if arch init_task initializer is different to init/init_task.c |
| 217 | config ARCH_INIT_TASK | 220 | config ARCH_INIT_TASK |
| 218 | bool | 221 | bool |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index d423d58f938d..49d993cee512 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
| @@ -38,6 +38,7 @@ config ARM | |||
| 38 | select HAVE_GENERIC_HARDIRQS | 38 | select HAVE_GENERIC_HARDIRQS |
| 39 | select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) | 39 | select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) |
| 40 | select HAVE_IDE if PCI || ISA || PCMCIA | 40 | select HAVE_IDE if PCI || ISA || PCMCIA |
| 41 | select HAVE_IRQ_TIME_ACCOUNTING | ||
| 41 | select HAVE_KERNEL_GZIP | 42 | select HAVE_KERNEL_GZIP |
| 42 | select HAVE_KERNEL_LZMA | 43 | select HAVE_KERNEL_LZMA |
| 43 | select HAVE_KERNEL_LZO | 44 | select HAVE_KERNEL_LZO |
| @@ -488,7 +489,7 @@ config ARCH_IXP4XX | |||
| 488 | config ARCH_DOVE | 489 | config ARCH_DOVE |
| 489 | bool "Marvell Dove" | 490 | bool "Marvell Dove" |
| 490 | select ARCH_REQUIRE_GPIOLIB | 491 | select ARCH_REQUIRE_GPIOLIB |
| 491 | select CPU_V7 | 492 | select CPU_PJ4 |
| 492 | select GENERIC_CLOCKEVENTS | 493 | select GENERIC_CLOCKEVENTS |
| 493 | select MIGHT_HAVE_PCI | 494 | select MIGHT_HAVE_PCI |
| 494 | select PINCTRL | 495 | select PINCTRL |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 47374085befd..1ba358ba16b8 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
| @@ -309,7 +309,7 @@ define archhelp | |||
| 309 | echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' | 309 | echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' |
| 310 | echo '* xipImage - XIP kernel image, if configured (arch/$(ARCH)/boot/xipImage)' | 310 | echo '* xipImage - XIP kernel image, if configured (arch/$(ARCH)/boot/xipImage)' |
| 311 | echo ' uImage - U-Boot wrapped zImage' | 311 | echo ' uImage - U-Boot wrapped zImage' |
| 312 | echo ' bootpImage - Combined zImage and initial RAM disk' | 312 | echo ' bootpImage - Combined zImage and initial RAM disk' |
| 313 | echo ' (supply initrd image via make variable INITRD=<path>)' | 313 | echo ' (supply initrd image via make variable INITRD=<path>)' |
| 314 | echo '* dtbs - Build device tree blobs for enabled boards' | 314 | echo '* dtbs - Build device tree blobs for enabled boards' |
| 315 | echo ' install - Install uncompressed kernel' | 315 | echo ' install - Install uncompressed kernel' |
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi index 272bbc65fab0..550eb772c30e 100644 --- a/arch/arm/boot/dts/armada-370-xp.dtsi +++ b/arch/arm/boot/dts/armada-370-xp.dtsi | |||
| @@ -33,7 +33,8 @@ | |||
| 33 | #size-cells = <1>; | 33 | #size-cells = <1>; |
| 34 | compatible = "simple-bus"; | 34 | compatible = "simple-bus"; |
| 35 | interrupt-parent = <&mpic>; | 35 | interrupt-parent = <&mpic>; |
| 36 | ranges = <0 0 0xd0000000 0x100000>; | 36 | ranges = <0 0 0xd0000000 0x0100000 /* internal registers */ |
| 37 | 0xe0000000 0 0xe0000000 0x8100000 /* PCIe */>; | ||
| 37 | 38 | ||
| 38 | internal-regs { | 39 | internal-regs { |
| 39 | compatible = "simple-bus"; | 40 | compatible = "simple-bus"; |
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi index b2c1b5af9749..aee2b1866ce2 100644 --- a/arch/arm/boot/dts/armada-370.dtsi +++ b/arch/arm/boot/dts/armada-370.dtsi | |||
| @@ -29,7 +29,8 @@ | |||
| 29 | }; | 29 | }; |
| 30 | 30 | ||
| 31 | soc { | 31 | soc { |
| 32 | ranges = <0 0xd0000000 0x100000>; | 32 | ranges = <0 0xd0000000 0x0100000 /* internal registers */ |
| 33 | 0xe0000000 0xe0000000 0x8100000 /* PCIe */>; | ||
| 33 | internal-regs { | 34 | internal-regs { |
| 34 | system-controller@18200 { | 35 | system-controller@18200 { |
| 35 | compatible = "marvell,armada-370-xp-system-controller"; | 36 | compatible = "marvell,armada-370-xp-system-controller"; |
| @@ -38,12 +39,12 @@ | |||
| 38 | 39 | ||
| 39 | L2: l2-cache { | 40 | L2: l2-cache { |
| 40 | compatible = "marvell,aurora-outer-cache"; | 41 | compatible = "marvell,aurora-outer-cache"; |
| 41 | reg = <0xd0008000 0x1000>; | 42 | reg = <0x08000 0x1000>; |
| 42 | cache-id-part = <0x100>; | 43 | cache-id-part = <0x100>; |
| 43 | wt-override; | 44 | wt-override; |
| 44 | }; | 45 | }; |
| 45 | 46 | ||
| 46 | mpic: interrupt-controller@20000 { | 47 | interrupt-controller@20000 { |
| 47 | reg = <0x20a00 0x1d0>, <0x21870 0x58>; | 48 | reg = <0x20a00 0x1d0>, <0x21870 0x58>; |
| 48 | }; | 49 | }; |
| 49 | 50 | ||
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts index 26ad06fc147e..3ee63d128e27 100644 --- a/arch/arm/boot/dts/armada-xp-gp.dts +++ b/arch/arm/boot/dts/armada-xp-gp.dts | |||
| @@ -39,6 +39,9 @@ | |||
| 39 | }; | 39 | }; |
| 40 | 40 | ||
| 41 | soc { | 41 | soc { |
| 42 | ranges = <0 0 0xd0000000 0x100000 | ||
| 43 | 0xf0000000 0 0xf0000000 0x1000000>; | ||
| 44 | |||
| 42 | internal-regs { | 45 | internal-regs { |
| 43 | serial@12000 { | 46 | serial@12000 { |
| 44 | clock-frequency = <250000000>; | 47 | clock-frequency = <250000000>; |
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts index f14d36c46159..46b785064dd8 100644 --- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts +++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts | |||
| @@ -27,6 +27,9 @@ | |||
| 27 | }; | 27 | }; |
| 28 | 28 | ||
| 29 | soc { | 29 | soc { |
| 30 | ranges = <0 0 0xd0000000 0x100000 | ||
| 31 | 0xf0000000 0 0xf0000000 0x8000000>; | ||
| 32 | |||
| 30 | internal-regs { | 33 | internal-regs { |
| 31 | serial@12000 { | 34 | serial@12000 { |
| 32 | clock-frequency = <250000000>; | 35 | clock-frequency = <250000000>; |
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi index bacab11c10dc..5b902f9a3af2 100644 --- a/arch/arm/boot/dts/armada-xp.dtsi +++ b/arch/arm/boot/dts/armada-xp.dtsi | |||
| @@ -31,7 +31,7 @@ | |||
| 31 | wt-override; | 31 | wt-override; |
| 32 | }; | 32 | }; |
| 33 | 33 | ||
| 34 | mpic: interrupt-controller@20000 { | 34 | interrupt-controller@20000 { |
| 35 | reg = <0x20a00 0x2d0>, <0x21070 0x58>; | 35 | reg = <0x20a00 0x2d0>, <0x21070 0x58>; |
| 36 | }; | 36 | }; |
| 37 | 37 | ||
diff --git a/arch/arm/boot/dts/at91sam9x25ek.dts b/arch/arm/boot/dts/at91sam9x25ek.dts index 3b40d11d65e7..315250b4995e 100644 --- a/arch/arm/boot/dts/at91sam9x25ek.dts +++ b/arch/arm/boot/dts/at91sam9x25ek.dts | |||
| @@ -11,7 +11,7 @@ | |||
| 11 | /include/ "at91sam9x5ek.dtsi" | 11 | /include/ "at91sam9x5ek.dtsi" |
| 12 | 12 | ||
| 13 | / { | 13 | / { |
| 14 | model = "Atmel AT91SAM9G25-EK"; | 14 | model = "Atmel AT91SAM9X25-EK"; |
| 15 | compatible = "atmel,at91sam9x25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; | 15 | compatible = "atmel,at91sam9x25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; |
| 16 | 16 | ||
| 17 | ahb { | 17 | ahb { |
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index 82a404da1c0d..99ba6e14ebf3 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi | |||
| @@ -516,7 +516,7 @@ | |||
| 516 | usb_otg_hs: usb_otg_hs@480ab000 { | 516 | usb_otg_hs: usb_otg_hs@480ab000 { |
| 517 | compatible = "ti,omap3-musb"; | 517 | compatible = "ti,omap3-musb"; |
| 518 | reg = <0x480ab000 0x1000>; | 518 | reg = <0x480ab000 0x1000>; |
| 519 | interrupts = <0 92 0x4>, <0 93 0x4>; | 519 | interrupts = <92>, <93>; |
| 520 | interrupt-names = "mc", "dma"; | 520 | interrupt-names = "mc", "dma"; |
| 521 | ti,hwmods = "usb_otg_hs"; | 521 | ti,hwmods = "usb_otg_hs"; |
| 522 | multipoint = <1>; | 522 | multipoint = <1>; |
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi index 2e643ea51cce..5000e0d42849 100644 --- a/arch/arm/boot/dts/sama5d3.dtsi +++ b/arch/arm/boot/dts/sama5d3.dtsi | |||
| @@ -75,11 +75,6 @@ | |||
| 75 | compatible = "atmel,at91sam9x5-spi"; | 75 | compatible = "atmel,at91sam9x5-spi"; |
| 76 | reg = <0xf0004000 0x100>; | 76 | reg = <0xf0004000 0x100>; |
| 77 | interrupts = <24 4 3>; | 77 | interrupts = <24 4 3>; |
| 78 | cs-gpios = <&pioD 13 0 | ||
| 79 | &pioD 14 0 /* conflicts with SCK0 and CANRX0 */ | ||
| 80 | &pioD 15 0 /* conflicts with CTS0 and CANTX0 */ | ||
| 81 | &pioD 16 0 /* conflicts with RTS0 and PWMFI3 */ | ||
| 82 | >; | ||
| 83 | pinctrl-names = "default"; | 78 | pinctrl-names = "default"; |
| 84 | pinctrl-0 = <&pinctrl_spi0>; | 79 | pinctrl-0 = <&pinctrl_spi0>; |
| 85 | status = "disabled"; | 80 | status = "disabled"; |
| @@ -156,7 +151,7 @@ | |||
| 156 | }; | 151 | }; |
| 157 | 152 | ||
| 158 | macb0: ethernet@f0028000 { | 153 | macb0: ethernet@f0028000 { |
| 159 | compatible = "cnds,pc302-gem", "cdns,gem"; | 154 | compatible = "cdns,pc302-gem", "cdns,gem"; |
| 160 | reg = <0xf0028000 0x100>; | 155 | reg = <0xf0028000 0x100>; |
| 161 | interrupts = <34 4 3>; | 156 | interrupts = <34 4 3>; |
| 162 | pinctrl-names = "default"; | 157 | pinctrl-names = "default"; |
| @@ -203,11 +198,6 @@ | |||
| 203 | compatible = "atmel,at91sam9x5-spi"; | 198 | compatible = "atmel,at91sam9x5-spi"; |
| 204 | reg = <0xf8008000 0x100>; | 199 | reg = <0xf8008000 0x100>; |
| 205 | interrupts = <25 4 3>; | 200 | interrupts = <25 4 3>; |
| 206 | cs-gpios = <&pioC 25 0 | ||
| 207 | &pioC 26 0 /* conflitcs with TWD1 and ISI_D11 */ | ||
| 208 | &pioC 27 0 /* conflitcs with TWCK1 and ISI_D10 */ | ||
| 209 | &pioC 28 0 /* conflitcs with PWMFI0 and ISI_D9 */ | ||
| 210 | >; | ||
| 211 | pinctrl-names = "default"; | 201 | pinctrl-names = "default"; |
| 212 | pinctrl-0 = <&pinctrl_spi1>; | 202 | pinctrl-0 = <&pinctrl_spi1>; |
| 213 | status = "disabled"; | 203 | status = "disabled"; |
diff --git a/arch/arm/boot/dts/sama5d3xcm.dtsi b/arch/arm/boot/dts/sama5d3xcm.dtsi index 1f8ed404626c..b336e7787cb3 100644 --- a/arch/arm/boot/dts/sama5d3xcm.dtsi +++ b/arch/arm/boot/dts/sama5d3xcm.dtsi | |||
| @@ -32,6 +32,10 @@ | |||
| 32 | 32 | ||
| 33 | ahb { | 33 | ahb { |
| 34 | apb { | 34 | apb { |
| 35 | spi0: spi@f0004000 { | ||
| 36 | cs-gpios = <&pioD 13 0>, <0>, <0>, <0>; | ||
| 37 | }; | ||
| 38 | |||
| 35 | macb0: ethernet@f0028000 { | 39 | macb0: ethernet@f0028000 { |
| 36 | phy-mode = "rgmii"; | 40 | phy-mode = "rgmii"; |
| 37 | }; | 41 | }; |
diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts index b28fbf3408e3..6f82d9368948 100644 --- a/arch/arm/boot/dts/ste-nomadik-s8815.dts +++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts | |||
| @@ -14,13 +14,19 @@ | |||
| 14 | bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk"; | 14 | bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk"; |
| 15 | }; | 15 | }; |
| 16 | 16 | ||
| 17 | /* This is where the interrupt is routed on the S8815 board */ | ||
| 18 | external-bus@34000000 { | ||
| 19 | ethernet@300 { | ||
| 20 | interrupt-parent = <&gpio3>; | ||
| 21 | interrupts = <8 0x1>; | ||
| 22 | }; | ||
| 23 | }; | ||
| 24 | |||
| 17 | /* Custom board node with GPIO pins to active etc */ | 25 | /* Custom board node with GPIO pins to active etc */ |
| 18 | usb-s8815 { | 26 | usb-s8815 { |
| 19 | /* The S8815 is using this very GPIO pin for the SMSC91x IRQs */ | 27 | /* The S8815 is using this very GPIO pin for the SMSC91x IRQs */ |
| 20 | ethernet-gpio { | 28 | ethernet-gpio { |
| 21 | gpios = <&gpio3 19 0x1>; | 29 | gpios = <&gpio3 8 0x1>; |
| 22 | interrupts = <19 0x1>; | ||
| 23 | interrupt-parent = <&gpio3>; | ||
| 24 | }; | 30 | }; |
| 25 | /* This will bias the MMC/SD card detect line */ | 31 | /* This will bias the MMC/SD card detect line */ |
| 26 | mmcsd-gpio { | 32 | mmcsd-gpio { |
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c index 52b88d81b7bb..3caed0db6986 100644 --- a/arch/arm/common/mcpm_platsmp.c +++ b/arch/arm/common/mcpm_platsmp.c | |||
| @@ -15,8 +15,6 @@ | |||
| 15 | #include <linux/smp.h> | 15 | #include <linux/smp.h> |
| 16 | #include <linux/spinlock.h> | 16 | #include <linux/spinlock.h> |
| 17 | 17 | ||
| 18 | #include <linux/irqchip/arm-gic.h> | ||
| 19 | |||
| 20 | #include <asm/mcpm.h> | 18 | #include <asm/mcpm.h> |
| 21 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
| 22 | #include <asm/smp_plat.h> | 20 | #include <asm/smp_plat.h> |
| @@ -49,7 +47,6 @@ static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *i | |||
| 49 | static void __cpuinit mcpm_secondary_init(unsigned int cpu) | 47 | static void __cpuinit mcpm_secondary_init(unsigned int cpu) |
| 50 | { | 48 | { |
| 51 | mcpm_cpu_powered_up(); | 49 | mcpm_cpu_powered_up(); |
| 52 | gic_secondary_init(0); | ||
| 53 | } | 50 | } |
| 54 | 51 | ||
| 55 | #ifdef CONFIG_HOTPLUG_CPU | 52 | #ifdef CONFIG_HOTPLUG_CPU |
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index c1ef64bc5abd..f59090210ec9 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig | |||
| @@ -20,6 +20,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y | |||
| 20 | CONFIG_MODVERSIONS=y | 20 | CONFIG_MODVERSIONS=y |
| 21 | CONFIG_MODULE_SRCVERSION_ALL=y | 21 | CONFIG_MODULE_SRCVERSION_ALL=y |
| 22 | # CONFIG_BLK_DEV_BSG is not set | 22 | # CONFIG_BLK_DEV_BSG is not set |
| 23 | CONFIG_ARCH_MULTI_V6=y | ||
| 23 | CONFIG_ARCH_OMAP2PLUS=y | 24 | CONFIG_ARCH_OMAP2PLUS=y |
| 24 | CONFIG_OMAP_RESET_CLOCKS=y | 25 | CONFIG_OMAP_RESET_CLOCKS=y |
| 25 | CONFIG_OMAP_MUX_DEBUG=y | 26 | CONFIG_OMAP_MUX_DEBUG=y |
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig index a5f0485133cf..f7ba316164d4 100644 --- a/arch/arm/configs/tegra_defconfig +++ b/arch/arm/configs/tegra_defconfig | |||
| @@ -153,6 +153,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y | |||
| 153 | CONFIG_MEDIA_USB_SUPPORT=y | 153 | CONFIG_MEDIA_USB_SUPPORT=y |
| 154 | CONFIG_USB_VIDEO_CLASS=m | 154 | CONFIG_USB_VIDEO_CLASS=m |
| 155 | CONFIG_DRM=y | 155 | CONFIG_DRM=y |
| 156 | CONFIG_TEGRA_HOST1X=y | ||
| 156 | CONFIG_DRM_TEGRA=y | 157 | CONFIG_DRM_TEGRA=y |
| 157 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | 158 | CONFIG_BACKLIGHT_LCD_SUPPORT=y |
| 158 | # CONFIG_LCD_CLASS_DEVICE is not set | 159 | # CONFIG_LCD_CLASS_DEVICE is not set |
| @@ -202,7 +203,7 @@ CONFIG_TEGRA20_APB_DMA=y | |||
| 202 | CONFIG_STAGING=y | 203 | CONFIG_STAGING=y |
| 203 | CONFIG_SENSORS_ISL29018=y | 204 | CONFIG_SENSORS_ISL29018=y |
| 204 | CONFIG_SENSORS_ISL29028=y | 205 | CONFIG_SENSORS_ISL29028=y |
| 205 | CONFIG_SENSORS_AK8975=y | 206 | CONFIG_AK8975=y |
| 206 | CONFIG_MFD_NVEC=y | 207 | CONFIG_MFD_NVEC=y |
| 207 | CONFIG_KEYBOARD_NVEC=y | 208 | CONFIG_KEYBOARD_NVEC=y |
| 208 | CONFIG_SERIO_NVEC_PS2=y | 209 | CONFIG_SERIO_NVEC_PS2=y |
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h index 7eb18c1d8d6c..4f009c10540d 100644 --- a/arch/arm/include/asm/cmpxchg.h +++ b/arch/arm/include/asm/cmpxchg.h | |||
| @@ -233,15 +233,15 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, | |||
| 233 | ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \ | 233 | ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \ |
| 234 | atomic64_t, \ | 234 | atomic64_t, \ |
| 235 | counter), \ | 235 | counter), \ |
| 236 | (unsigned long)(o), \ | 236 | (unsigned long long)(o), \ |
| 237 | (unsigned long)(n))) | 237 | (unsigned long long)(n))) |
| 238 | 238 | ||
| 239 | #define cmpxchg64_local(ptr, o, n) \ | 239 | #define cmpxchg64_local(ptr, o, n) \ |
| 240 | ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \ | 240 | ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \ |
| 241 | local64_t, \ | 241 | local64_t, \ |
| 242 | a), \ | 242 | a), \ |
| 243 | (unsigned long)(o), \ | 243 | (unsigned long long)(o), \ |
| 244 | (unsigned long)(n))) | 244 | (unsigned long long)(n))) |
| 245 | 245 | ||
| 246 | #endif /* __LINUX_ARM_ARCH__ >= 6 */ | 246 | #endif /* __LINUX_ARM_ARCH__ >= 6 */ |
| 247 | 247 | ||
diff --git a/arch/arm/include/debug/ux500.S b/arch/arm/include/debug/ux500.S index 2848857f5b62..fbd24beeb1fa 100644 --- a/arch/arm/include/debug/ux500.S +++ b/arch/arm/include/debug/ux500.S | |||
| @@ -24,9 +24,9 @@ | |||
| 24 | #define U8500_UART0_PHYS_BASE (0x80120000) | 24 | #define U8500_UART0_PHYS_BASE (0x80120000) |
| 25 | #define U8500_UART1_PHYS_BASE (0x80121000) | 25 | #define U8500_UART1_PHYS_BASE (0x80121000) |
| 26 | #define U8500_UART2_PHYS_BASE (0x80007000) | 26 | #define U8500_UART2_PHYS_BASE (0x80007000) |
| 27 | #define U8500_UART0_VIRT_BASE (0xa8120000) | 27 | #define U8500_UART0_VIRT_BASE (0xf8120000) |
| 28 | #define U8500_UART1_VIRT_BASE (0xa8121000) | 28 | #define U8500_UART1_VIRT_BASE (0xf8121000) |
| 29 | #define U8500_UART2_VIRT_BASE (0xa8007000) | 29 | #define U8500_UART2_VIRT_BASE (0xf8007000) |
| 30 | #define __UX500_PHYS_UART(n) U8500_UART##n##_PHYS_BASE | 30 | #define __UX500_PHYS_UART(n) U8500_UART##n##_PHYS_BASE |
| 31 | #define __UX500_VIRT_UART(n) U8500_UART##n##_VIRT_BASE | 31 | #define __UX500_VIRT_UART(n) U8500_UART##n##_VIRT_BASE |
| 32 | #endif | 32 | #endif |
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index 2acdff4c1dfe..180b3024bec3 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c | |||
| @@ -174,6 +174,7 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev) | |||
| 174 | static struct clock_event_device clkevt = { | 174 | static struct clock_event_device clkevt = { |
| 175 | .name = "at91_tick", | 175 | .name = "at91_tick", |
| 176 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 176 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
| 177 | .shift = 32, | ||
| 177 | .rating = 150, | 178 | .rating = 150, |
| 178 | .set_next_event = clkevt32k_next_event, | 179 | .set_next_event = clkevt32k_next_event, |
| 179 | .set_mode = clkevt32k_mode, | 180 | .set_mode = clkevt32k_mode, |
| @@ -264,9 +265,11 @@ void __init at91rm9200_timer_init(void) | |||
| 264 | at91_st_write(AT91_ST_RTMR, 1); | 265 | at91_st_write(AT91_ST_RTMR, 1); |
| 265 | 266 | ||
| 266 | /* Setup timer clockevent, with minimum of two ticks (important!!) */ | 267 | /* Setup timer clockevent, with minimum of two ticks (important!!) */ |
| 268 | clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift); | ||
| 269 | clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); | ||
| 270 | clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; | ||
| 267 | clkevt.cpumask = cpumask_of(0); | 271 | clkevt.cpumask = cpumask_of(0); |
| 268 | clockevents_config_and_register(&clkevt, AT91_SLOW_CLOCK, | 272 | clockevents_register_device(&clkevt); |
| 269 | 2, AT91_ST_ALMV); | ||
| 270 | 273 | ||
| 271 | /* register clocksource */ | 274 | /* register clocksource */ |
| 272 | clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK); | 275 | clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK); |
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index 151259003086..dda9a2bd3acb 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c | |||
| @@ -177,7 +177,8 @@ int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode) | |||
| 177 | static const char *step_sels[] = { "osc", "pll2_pfd2_396m", }; | 177 | static const char *step_sels[] = { "osc", "pll2_pfd2_396m", }; |
| 178 | static const char *pll1_sw_sels[] = { "pll1_sys", "step", }; | 178 | static const char *pll1_sw_sels[] = { "pll1_sys", "step", }; |
| 179 | static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", }; | 179 | static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", }; |
| 180 | static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", }; | 180 | static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", "dummy", }; |
| 181 | static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "pll2_bus", }; | ||
| 181 | static const char *periph_sels[] = { "periph_pre", "periph_clk2", }; | 182 | static const char *periph_sels[] = { "periph_pre", "periph_clk2", }; |
| 182 | static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", }; | 183 | static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", }; |
| 183 | static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "pll3_pfd1_540m", }; | 184 | static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "pll3_pfd1_540m", }; |
| @@ -185,7 +186,7 @@ static const char *audio_sels[] = { "pll4_post_div", "pll3_pfd2_508m", "pll3_pfd | |||
| 185 | static const char *gpu_axi_sels[] = { "axi", "ahb", }; | 186 | static const char *gpu_axi_sels[] = { "axi", "ahb", }; |
| 186 | static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", }; | 187 | static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", }; |
| 187 | static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; | 188 | static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; |
| 188 | static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", }; | 189 | static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", }; |
| 189 | static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; | 190 | static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; |
| 190 | static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", }; | 191 | static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", }; |
| 191 | static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; | 192 | static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; |
| @@ -369,8 +370,8 @@ int __init mx6q_clocks_init(void) | |||
| 369 | clk[pll1_sw] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels)); | 370 | clk[pll1_sw] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels)); |
| 370 | clk[periph_pre] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); | 371 | clk[periph_pre] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); |
| 371 | clk[periph2_pre] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); | 372 | clk[periph2_pre] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); |
| 372 | clk[periph_clk2_sel] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 1, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); | 373 | clk[periph_clk2_sel] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); |
| 373 | clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); | 374 | clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels)); |
| 374 | clk[axi_sel] = imx_clk_mux("axi_sel", base + 0x14, 6, 2, axi_sels, ARRAY_SIZE(axi_sels)); | 375 | clk[axi_sel] = imx_clk_mux("axi_sel", base + 0x14, 6, 2, axi_sels, ARRAY_SIZE(axi_sels)); |
| 375 | clk[esai_sel] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); | 376 | clk[esai_sel] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); |
| 376 | clk[asrc_sel] = imx_clk_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); | 377 | clk[asrc_sel] = imx_clk_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); |
| @@ -498,7 +499,7 @@ int __init mx6q_clocks_init(void) | |||
| 498 | clk[ldb_di1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14); | 499 | clk[ldb_di1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14); |
| 499 | clk[ipu2_di1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10); | 500 | clk[ipu2_di1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10); |
| 500 | clk[hsi_tx] = imx_clk_gate2("hsi_tx", "hsi_tx_podf", base + 0x74, 16); | 501 | clk[hsi_tx] = imx_clk_gate2("hsi_tx", "hsi_tx_podf", base + 0x74, 16); |
| 501 | clk[mlb] = imx_clk_gate2("mlb", "pll8_mlb", base + 0x74, 18); | 502 | clk[mlb] = imx_clk_gate2("mlb", "axi", base + 0x74, 18); |
| 502 | clk[mmdc_ch0_axi] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20); | 503 | clk[mmdc_ch0_axi] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20); |
| 503 | clk[mmdc_ch1_axi] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22); | 504 | clk[mmdc_ch1_axi] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22); |
| 504 | clk[ocram] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28); | 505 | clk[ocram] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28); |
diff --git a/arch/arm/mach-imx/headsmp.S b/arch/arm/mach-imx/headsmp.S index 67b9c48dcafe..627f16f0e9d1 100644 --- a/arch/arm/mach-imx/headsmp.S +++ b/arch/arm/mach-imx/headsmp.S | |||
| @@ -18,8 +18,20 @@ | |||
| 18 | .section ".text.head", "ax" | 18 | .section ".text.head", "ax" |
| 19 | 19 | ||
| 20 | #ifdef CONFIG_SMP | 20 | #ifdef CONFIG_SMP |
| 21 | diag_reg_offset: | ||
| 22 | .word g_diag_reg - . | ||
| 23 | |||
| 24 | .macro set_diag_reg | ||
| 25 | adr r0, diag_reg_offset | ||
| 26 | ldr r1, [r0] | ||
| 27 | add r1, r1, r0 @ r1 = physical &g_diag_reg | ||
| 28 | ldr r0, [r1] | ||
| 29 | mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register | ||
| 30 | .endm | ||
| 31 | |||
| 21 | ENTRY(v7_secondary_startup) | 32 | ENTRY(v7_secondary_startup) |
| 22 | bl v7_invalidate_l1 | 33 | bl v7_invalidate_l1 |
| 34 | set_diag_reg | ||
| 23 | b secondary_startup | 35 | b secondary_startup |
| 24 | ENDPROC(v7_secondary_startup) | 36 | ENDPROC(v7_secondary_startup) |
| 25 | #endif | 37 | #endif |
diff --git a/arch/arm/mach-imx/platsmp.c b/arch/arm/mach-imx/platsmp.c index 4a69305db65e..c6e1ab544882 100644 --- a/arch/arm/mach-imx/platsmp.c +++ b/arch/arm/mach-imx/platsmp.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | 12 | ||
| 13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
| 14 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
| 15 | #include <asm/cacheflush.h> | ||
| 15 | #include <asm/page.h> | 16 | #include <asm/page.h> |
| 16 | #include <asm/smp_scu.h> | 17 | #include <asm/smp_scu.h> |
| 17 | #include <asm/mach/map.h> | 18 | #include <asm/mach/map.h> |
| @@ -21,6 +22,7 @@ | |||
| 21 | 22 | ||
| 22 | #define SCU_STANDBY_ENABLE (1 << 5) | 23 | #define SCU_STANDBY_ENABLE (1 << 5) |
| 23 | 24 | ||
| 25 | u32 g_diag_reg; | ||
| 24 | static void __iomem *scu_base; | 26 | static void __iomem *scu_base; |
| 25 | 27 | ||
| 26 | static struct map_desc scu_io_desc __initdata = { | 28 | static struct map_desc scu_io_desc __initdata = { |
| @@ -80,6 +82,18 @@ void imx_smp_prepare(void) | |||
| 80 | static void __init imx_smp_prepare_cpus(unsigned int max_cpus) | 82 | static void __init imx_smp_prepare_cpus(unsigned int max_cpus) |
| 81 | { | 83 | { |
| 82 | imx_smp_prepare(); | 84 | imx_smp_prepare(); |
| 85 | |||
| 86 | /* | ||
| 87 | * The diagnostic register holds the errata bits. Mostly bootloader | ||
| 88 | * does not bring up secondary cores, so that when errata bits are set | ||
| 89 | * in bootloader, they are set only for boot cpu. But on a SMP | ||
| 90 | * configuration, it should be equally done on every single core. | ||
| 91 | * Read the register from boot cpu here, and will replicate it into | ||
| 92 | * secondary cores when booting them. | ||
| 93 | */ | ||
| 94 | asm("mrc p15, 0, %0, c15, c0, 1" : "=r" (g_diag_reg) : : "cc"); | ||
| 95 | __cpuc_flush_dcache_area(&g_diag_reg, sizeof(g_diag_reg)); | ||
| 96 | outer_clean_range(__pa(&g_diag_reg), __pa(&g_diag_reg + 1)); | ||
| 83 | } | 97 | } |
| 84 | 98 | ||
| 85 | struct smp_operations imx_smp_ops __initdata = { | 99 | struct smp_operations imx_smp_ops __initdata = { |
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index c2cae69e6d2b..f38922897563 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c | |||
| @@ -528,12 +528,6 @@ void __init kirkwood_init_early(void) | |||
| 528 | { | 528 | { |
| 529 | orion_time_set_base(TIMER_VIRT_BASE); | 529 | orion_time_set_base(TIMER_VIRT_BASE); |
| 530 | 530 | ||
| 531 | /* | ||
| 532 | * Some Kirkwood devices allocate their coherent buffers from atomic | ||
| 533 | * context. Increase size of atomic coherent pool to make sure such | ||
| 534 | * the allocations won't fail. | ||
| 535 | */ | ||
| 536 | init_dma_coherent_pool_size(SZ_1M); | ||
| 537 | mvebu_mbus_init("marvell,kirkwood-mbus", | 531 | mvebu_mbus_init("marvell,kirkwood-mbus", |
| 538 | BRIDGE_WINS_BASE, BRIDGE_WINS_SZ, | 532 | BRIDGE_WINS_BASE, BRIDGE_WINS_SZ, |
| 539 | DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ); | 533 | DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ); |
diff --git a/arch/arm/mach-kirkwood/ts219-setup.c b/arch/arm/mach-kirkwood/ts219-setup.c index 283abff90228..e1267d6b468f 100644 --- a/arch/arm/mach-kirkwood/ts219-setup.c +++ b/arch/arm/mach-kirkwood/ts219-setup.c | |||
| @@ -124,7 +124,7 @@ static void __init qnap_ts219_init(void) | |||
| 124 | static int __init ts219_pci_init(void) | 124 | static int __init ts219_pci_init(void) |
| 125 | { | 125 | { |
| 126 | if (machine_is_ts219()) | 126 | if (machine_is_ts219()) |
| 127 | kirkwood_pcie_init(KW_PCIE0); | 127 | kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0); |
| 128 | 128 | ||
| 129 | return 0; | 129 | return 0; |
| 130 | } | 130 | } |
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig index e11acbb0a46d..80a8bcacd9d5 100644 --- a/arch/arm/mach-mvebu/Kconfig +++ b/arch/arm/mach-mvebu/Kconfig | |||
| @@ -15,6 +15,7 @@ config ARCH_MVEBU | |||
| 15 | select MVEBU_CLK_GATING | 15 | select MVEBU_CLK_GATING |
| 16 | select MVEBU_MBUS | 16 | select MVEBU_MBUS |
| 17 | select ZONE_DMA if ARM_LPAE | 17 | select ZONE_DMA if ARM_LPAE |
| 18 | select ARCH_REQUIRE_GPIOLIB | ||
| 18 | 19 | ||
| 19 | if ARCH_MVEBU | 20 | if ARCH_MVEBU |
| 20 | 21 | ||
diff --git a/arch/arm/mach-mvebu/armada-370-xp.c b/arch/arm/mach-mvebu/armada-370-xp.c index 42a4cb3087e2..1c48890bb72b 100644 --- a/arch/arm/mach-mvebu/armada-370-xp.c +++ b/arch/arm/mach-mvebu/armada-370-xp.c | |||
| @@ -54,13 +54,6 @@ void __init armada_370_xp_init_early(void) | |||
| 54 | char *mbus_soc_name; | 54 | char *mbus_soc_name; |
| 55 | 55 | ||
| 56 | /* | 56 | /* |
| 57 | * Some Armada 370/XP devices allocate their coherent buffers | ||
| 58 | * from atomic context. Increase size of atomic coherent pool | ||
| 59 | * to make sure such the allocations won't fail. | ||
| 60 | */ | ||
| 61 | init_dma_coherent_pool_size(SZ_1M); | ||
| 62 | |||
| 63 | /* | ||
| 64 | * This initialization will be replaced by a DT-based | 57 | * This initialization will be replaced by a DT-based |
| 65 | * initialization once the mvebu-mbus driver gains DT support. | 58 | * initialization once the mvebu-mbus driver gains DT support. |
| 66 | */ | 59 | */ |
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c index 68ab858e27b7..a94b3a718d1a 100644 --- a/arch/arm/mach-omap1/dma.c +++ b/arch/arm/mach-omap1/dma.c | |||
| @@ -345,6 +345,7 @@ static int __init omap1_system_dma_init(void) | |||
| 345 | dev_err(&pdev->dev, | 345 | dev_err(&pdev->dev, |
| 346 | "%s: Memory allocation failed for d->chan!\n", | 346 | "%s: Memory allocation failed for d->chan!\n", |
| 347 | __func__); | 347 | __func__); |
| 348 | ret = -ENOMEM; | ||
| 348 | goto exit_release_d; | 349 | goto exit_release_d; |
| 349 | } | 350 | } |
| 350 | 351 | ||
diff --git a/arch/arm/mach-omap2/cclock33xx_data.c b/arch/arm/mach-omap2/cclock33xx_data.c index 6ebc7803bc3e..af3544ce4f02 100644 --- a/arch/arm/mach-omap2/cclock33xx_data.c +++ b/arch/arm/mach-omap2/cclock33xx_data.c | |||
| @@ -454,9 +454,29 @@ DEFINE_CLK_GATE(cefuse_fck, "sys_clkin_ck", &sys_clkin_ck, 0x0, | |||
| 454 | */ | 454 | */ |
| 455 | DEFINE_CLK_FIXED_FACTOR(clkdiv32k_ck, "clk_24mhz", &clk_24mhz, 0x0, 1, 732); | 455 | DEFINE_CLK_FIXED_FACTOR(clkdiv32k_ck, "clk_24mhz", &clk_24mhz, 0x0, 1, 732); |
| 456 | 456 | ||
| 457 | DEFINE_CLK_GATE(clkdiv32k_ick, "clkdiv32k_ck", &clkdiv32k_ck, 0x0, | 457 | static struct clk clkdiv32k_ick; |
| 458 | AM33XX_CM_PER_CLKDIV32K_CLKCTRL, AM33XX_MODULEMODE_SWCTRL_SHIFT, | 458 | |
| 459 | 0x0, NULL); | 459 | static const char *clkdiv32k_ick_parent_names[] = { |
| 460 | "clkdiv32k_ck", | ||
| 461 | }; | ||
| 462 | |||
| 463 | static const struct clk_ops clkdiv32k_ick_ops = { | ||
| 464 | .enable = &omap2_dflt_clk_enable, | ||
| 465 | .disable = &omap2_dflt_clk_disable, | ||
| 466 | .is_enabled = &omap2_dflt_clk_is_enabled, | ||
| 467 | .init = &omap2_init_clk_clkdm, | ||
| 468 | }; | ||
| 469 | |||
| 470 | static struct clk_hw_omap clkdiv32k_ick_hw = { | ||
| 471 | .hw = { | ||
| 472 | .clk = &clkdiv32k_ick, | ||
| 473 | }, | ||
| 474 | .enable_reg = AM33XX_CM_PER_CLKDIV32K_CLKCTRL, | ||
| 475 | .enable_bit = AM33XX_MODULEMODE_SWCTRL_SHIFT, | ||
| 476 | .clkdm_name = "clk_24mhz_clkdm", | ||
| 477 | }; | ||
| 478 | |||
| 479 | DEFINE_STRUCT_CLK(clkdiv32k_ick, clkdiv32k_ick_parent_names, clkdiv32k_ick_ops); | ||
| 460 | 480 | ||
| 461 | /* "usbotg_fck" is an additional clock and not really a modulemode */ | 481 | /* "usbotg_fck" is an additional clock and not really a modulemode */ |
| 462 | DEFINE_CLK_GATE(usbotg_fck, "dpll_per_ck", &dpll_per_ck, 0x0, | 482 | DEFINE_CLK_GATE(usbotg_fck, "dpll_per_ck", &dpll_per_ck, 0x0, |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index d25a95fe9921..7341eff63f56 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
| @@ -1356,13 +1356,27 @@ static void _enable_sysc(struct omap_hwmod *oh) | |||
| 1356 | 1356 | ||
| 1357 | clkdm = _get_clkdm(oh); | 1357 | clkdm = _get_clkdm(oh); |
| 1358 | if (sf & SYSC_HAS_SIDLEMODE) { | 1358 | if (sf & SYSC_HAS_SIDLEMODE) { |
| 1359 | if (oh->flags & HWMOD_SWSUP_SIDLE || | ||
| 1360 | oh->flags & HWMOD_SWSUP_SIDLE_ACT) { | ||
| 1361 | idlemode = HWMOD_IDLEMODE_NO; | ||
| 1362 | } else { | ||
| 1363 | if (sf & SYSC_HAS_ENAWAKEUP) | ||
| 1364 | _enable_wakeup(oh, &v); | ||
| 1365 | if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) | ||
| 1366 | idlemode = HWMOD_IDLEMODE_SMART_WKUP; | ||
| 1367 | else | ||
| 1368 | idlemode = HWMOD_IDLEMODE_SMART; | ||
| 1369 | } | ||
| 1370 | |||
| 1371 | /* | ||
| 1372 | * This is special handling for some IPs like | ||
| 1373 | * 32k sync timer. Force them to idle! | ||
| 1374 | */ | ||
| 1359 | clkdm_act = (clkdm && clkdm->flags & CLKDM_ACTIVE_WITH_MPU); | 1375 | clkdm_act = (clkdm && clkdm->flags & CLKDM_ACTIVE_WITH_MPU); |
| 1360 | if (clkdm_act && !(oh->class->sysc->idlemodes & | 1376 | if (clkdm_act && !(oh->class->sysc->idlemodes & |
| 1361 | (SIDLE_SMART | SIDLE_SMART_WKUP))) | 1377 | (SIDLE_SMART | SIDLE_SMART_WKUP))) |
| 1362 | idlemode = HWMOD_IDLEMODE_FORCE; | 1378 | idlemode = HWMOD_IDLEMODE_FORCE; |
| 1363 | else | 1379 | |
| 1364 | idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ? | ||
| 1365 | HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART; | ||
| 1366 | _set_slave_idlemode(oh, idlemode, &v); | 1380 | _set_slave_idlemode(oh, idlemode, &v); |
| 1367 | } | 1381 | } |
| 1368 | 1382 | ||
| @@ -1391,10 +1405,6 @@ static void _enable_sysc(struct omap_hwmod *oh) | |||
| 1391 | (sf & SYSC_HAS_CLOCKACTIVITY)) | 1405 | (sf & SYSC_HAS_CLOCKACTIVITY)) |
| 1392 | _set_clockactivity(oh, oh->class->sysc->clockact, &v); | 1406 | _set_clockactivity(oh, oh->class->sysc->clockact, &v); |
| 1393 | 1407 | ||
| 1394 | /* If slave is in SMARTIDLE, also enable wakeup */ | ||
| 1395 | if ((sf & SYSC_HAS_SIDLEMODE) && !(oh->flags & HWMOD_SWSUP_SIDLE)) | ||
| 1396 | _enable_wakeup(oh, &v); | ||
| 1397 | |||
| 1398 | _write_sysconfig(v, oh); | 1408 | _write_sysconfig(v, oh); |
| 1399 | 1409 | ||
| 1400 | /* | 1410 | /* |
| @@ -1430,13 +1440,16 @@ static void _idle_sysc(struct omap_hwmod *oh) | |||
| 1430 | sf = oh->class->sysc->sysc_flags; | 1440 | sf = oh->class->sysc->sysc_flags; |
| 1431 | 1441 | ||
| 1432 | if (sf & SYSC_HAS_SIDLEMODE) { | 1442 | if (sf & SYSC_HAS_SIDLEMODE) { |
| 1433 | /* XXX What about HWMOD_IDLEMODE_SMART_WKUP? */ | 1443 | if (oh->flags & HWMOD_SWSUP_SIDLE) { |
| 1434 | if (oh->flags & HWMOD_SWSUP_SIDLE || | ||
| 1435 | !(oh->class->sysc->idlemodes & | ||
| 1436 | (SIDLE_SMART | SIDLE_SMART_WKUP))) | ||
| 1437 | idlemode = HWMOD_IDLEMODE_FORCE; | 1444 | idlemode = HWMOD_IDLEMODE_FORCE; |
| 1438 | else | 1445 | } else { |
| 1439 | idlemode = HWMOD_IDLEMODE_SMART; | 1446 | if (sf & SYSC_HAS_ENAWAKEUP) |
| 1447 | _enable_wakeup(oh, &v); | ||
| 1448 | if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) | ||
| 1449 | idlemode = HWMOD_IDLEMODE_SMART_WKUP; | ||
| 1450 | else | ||
| 1451 | idlemode = HWMOD_IDLEMODE_SMART; | ||
| 1452 | } | ||
| 1440 | _set_slave_idlemode(oh, idlemode, &v); | 1453 | _set_slave_idlemode(oh, idlemode, &v); |
| 1441 | } | 1454 | } |
| 1442 | 1455 | ||
| @@ -1455,10 +1468,6 @@ static void _idle_sysc(struct omap_hwmod *oh) | |||
| 1455 | _set_master_standbymode(oh, idlemode, &v); | 1468 | _set_master_standbymode(oh, idlemode, &v); |
| 1456 | } | 1469 | } |
| 1457 | 1470 | ||
| 1458 | /* If slave is in SMARTIDLE, also enable wakeup */ | ||
| 1459 | if ((sf & SYSC_HAS_SIDLEMODE) && !(oh->flags & HWMOD_SWSUP_SIDLE)) | ||
| 1460 | _enable_wakeup(oh, &v); | ||
| 1461 | |||
| 1462 | _write_sysconfig(v, oh); | 1471 | _write_sysconfig(v, oh); |
| 1463 | } | 1472 | } |
| 1464 | 1473 | ||
| @@ -2065,7 +2074,7 @@ static int _omap4_get_context_lost(struct omap_hwmod *oh) | |||
| 2065 | * do so is present in the hwmod data, then call it and pass along the | 2074 | * do so is present in the hwmod data, then call it and pass along the |
| 2066 | * return value; otherwise, return 0. | 2075 | * return value; otherwise, return 0. |
| 2067 | */ | 2076 | */ |
| 2068 | static int __init _enable_preprogram(struct omap_hwmod *oh) | 2077 | static int _enable_preprogram(struct omap_hwmod *oh) |
| 2069 | { | 2078 | { |
| 2070 | if (!oh->class->enable_preprogram) | 2079 | if (!oh->class->enable_preprogram) |
| 2071 | return 0; | 2080 | return 0; |
| @@ -2246,42 +2255,6 @@ static int _idle(struct omap_hwmod *oh) | |||
| 2246 | } | 2255 | } |
| 2247 | 2256 | ||
| 2248 | /** | 2257 | /** |
| 2249 | * omap_hwmod_set_ocp_autoidle - set the hwmod's OCP autoidle bit | ||
| 2250 | * @oh: struct omap_hwmod * | ||
| 2251 | * @autoidle: desired AUTOIDLE bitfield value (0 or 1) | ||
| 2252 | * | ||
| 2253 | * Sets the IP block's OCP autoidle bit in hardware, and updates our | ||
| 2254 | * local copy. Intended to be used by drivers that require | ||
| 2255 | * direct manipulation of the AUTOIDLE bits. | ||
| 2256 | * Returns -EINVAL if @oh is null or is not in the ENABLED state, or passes | ||
| 2257 | * along the return value from _set_module_autoidle(). | ||
| 2258 | * | ||
| 2259 | * Any users of this function should be scrutinized carefully. | ||
| 2260 | */ | ||
| 2261 | int omap_hwmod_set_ocp_autoidle(struct omap_hwmod *oh, u8 autoidle) | ||
| 2262 | { | ||
| 2263 | u32 v; | ||
| 2264 | int retval = 0; | ||
| 2265 | unsigned long flags; | ||
| 2266 | |||
| 2267 | if (!oh || oh->_state != _HWMOD_STATE_ENABLED) | ||
| 2268 | return -EINVAL; | ||
| 2269 | |||
| 2270 | spin_lock_irqsave(&oh->_lock, flags); | ||
| 2271 | |||
| 2272 | v = oh->_sysc_cache; | ||
| 2273 | |||
| 2274 | retval = _set_module_autoidle(oh, autoidle, &v); | ||
| 2275 | |||
| 2276 | if (!retval) | ||
| 2277 | _write_sysconfig(v, oh); | ||
| 2278 | |||
| 2279 | spin_unlock_irqrestore(&oh->_lock, flags); | ||
| 2280 | |||
| 2281 | return retval; | ||
| 2282 | } | ||
| 2283 | |||
| 2284 | /** | ||
| 2285 | * _shutdown - shutdown an omap_hwmod | 2258 | * _shutdown - shutdown an omap_hwmod |
| 2286 | * @oh: struct omap_hwmod * | 2259 | * @oh: struct omap_hwmod * |
| 2287 | * | 2260 | * |
| @@ -3180,38 +3153,6 @@ error: | |||
| 3180 | } | 3153 | } |
| 3181 | 3154 | ||
| 3182 | /** | 3155 | /** |
| 3183 | * omap_hwmod_set_slave_idlemode - set the hwmod's OCP slave idlemode | ||
| 3184 | * @oh: struct omap_hwmod * | ||
| 3185 | * @idlemode: SIDLEMODE field bits (shifted to bit 0) | ||
| 3186 | * | ||
| 3187 | * Sets the IP block's OCP slave idlemode in hardware, and updates our | ||
| 3188 | * local copy. Intended to be used by drivers that have some erratum | ||
| 3189 | * that requires direct manipulation of the SIDLEMODE bits. Returns | ||
| 3190 | * -EINVAL if @oh is null, or passes along the return value from | ||
| 3191 | * _set_slave_idlemode(). | ||
| 3192 | * | ||
| 3193 | * XXX Does this function have any current users? If not, we should | ||
| 3194 | * remove it; it is better to let the rest of the hwmod code handle this. | ||
| 3195 | * Any users of this function should be scrutinized carefully. | ||
| 3196 | */ | ||
| 3197 | int omap_hwmod_set_slave_idlemode(struct omap_hwmod *oh, u8 idlemode) | ||
| 3198 | { | ||
| 3199 | u32 v; | ||
| 3200 | int retval = 0; | ||
| 3201 | |||
| 3202 | if (!oh) | ||
| 3203 | return -EINVAL; | ||
| 3204 | |||
| 3205 | v = oh->_sysc_cache; | ||
| 3206 | |||
| 3207 | retval = _set_slave_idlemode(oh, idlemode, &v); | ||
| 3208 | if (!retval) | ||
| 3209 | _write_sysconfig(v, oh); | ||
| 3210 | |||
| 3211 | return retval; | ||
| 3212 | } | ||
| 3213 | |||
| 3214 | /** | ||
| 3215 | * omap_hwmod_lookup - look up a registered omap_hwmod by name | 3156 | * omap_hwmod_lookup - look up a registered omap_hwmod by name |
| 3216 | * @name: name of the omap_hwmod to look up | 3157 | * @name: name of the omap_hwmod to look up |
| 3217 | * | 3158 | * |
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index fe5962921f07..0c898f58ac9b 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h | |||
| @@ -463,6 +463,9 @@ struct omap_hwmod_omap4_prcm { | |||
| 463 | * is kept in force-standby mode. Failing to do so causes PM problems | 463 | * is kept in force-standby mode. Failing to do so causes PM problems |
| 464 | * with musb on OMAP3630 at least. Note that musb has a dedicated register | 464 | * with musb on OMAP3630 at least. Note that musb has a dedicated register |
| 465 | * to control MSTANDBY signal when MIDLEMODE is set to force-standby. | 465 | * to control MSTANDBY signal when MIDLEMODE is set to force-standby. |
| 466 | * HWMOD_SWSUP_SIDLE_ACT: omap_hwmod code should manually bring the module | ||
| 467 | * out of idle, but rely on smart-idle to the put it back in idle, | ||
| 468 | * so the wakeups are still functional (Only known case for now is UART) | ||
| 466 | */ | 469 | */ |
| 467 | #define HWMOD_SWSUP_SIDLE (1 << 0) | 470 | #define HWMOD_SWSUP_SIDLE (1 << 0) |
| 468 | #define HWMOD_SWSUP_MSTANDBY (1 << 1) | 471 | #define HWMOD_SWSUP_MSTANDBY (1 << 1) |
| @@ -476,6 +479,7 @@ struct omap_hwmod_omap4_prcm { | |||
| 476 | #define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) | 479 | #define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) |
| 477 | #define HWMOD_BLOCK_WFI (1 << 10) | 480 | #define HWMOD_BLOCK_WFI (1 << 10) |
| 478 | #define HWMOD_FORCE_MSTANDBY (1 << 11) | 481 | #define HWMOD_FORCE_MSTANDBY (1 << 11) |
| 482 | #define HWMOD_SWSUP_SIDLE_ACT (1 << 12) | ||
| 479 | 483 | ||
| 480 | /* | 484 | /* |
| 481 | * omap_hwmod._int_flags definitions | 485 | * omap_hwmod._int_flags definitions |
| @@ -641,9 +645,6 @@ int omap_hwmod_read_hardreset(struct omap_hwmod *oh, const char *name); | |||
| 641 | int omap_hwmod_enable_clocks(struct omap_hwmod *oh); | 645 | int omap_hwmod_enable_clocks(struct omap_hwmod *oh); |
| 642 | int omap_hwmod_disable_clocks(struct omap_hwmod *oh); | 646 | int omap_hwmod_disable_clocks(struct omap_hwmod *oh); |
| 643 | 647 | ||
| 644 | int omap_hwmod_set_slave_idlemode(struct omap_hwmod *oh, u8 idlemode); | ||
| 645 | int omap_hwmod_set_ocp_autoidle(struct omap_hwmod *oh, u8 autoidle); | ||
| 646 | |||
| 647 | int omap_hwmod_reset(struct omap_hwmod *oh); | 648 | int omap_hwmod_reset(struct omap_hwmod *oh); |
| 648 | void omap_hwmod_ocp_barrier(struct omap_hwmod *oh); | 649 | void omap_hwmod_ocp_barrier(struct omap_hwmod *oh); |
| 649 | 650 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c index c8c64b3e1acc..d05fc7b54567 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c | |||
| @@ -512,6 +512,7 @@ struct omap_hwmod omap2xxx_uart1_hwmod = { | |||
| 512 | .mpu_irqs = omap2_uart1_mpu_irqs, | 512 | .mpu_irqs = omap2_uart1_mpu_irqs, |
| 513 | .sdma_reqs = omap2_uart1_sdma_reqs, | 513 | .sdma_reqs = omap2_uart1_sdma_reqs, |
| 514 | .main_clk = "uart1_fck", | 514 | .main_clk = "uart1_fck", |
| 515 | .flags = HWMOD_SWSUP_SIDLE_ACT, | ||
| 515 | .prcm = { | 516 | .prcm = { |
| 516 | .omap2 = { | 517 | .omap2 = { |
| 517 | .module_offs = CORE_MOD, | 518 | .module_offs = CORE_MOD, |
| @@ -531,6 +532,7 @@ struct omap_hwmod omap2xxx_uart2_hwmod = { | |||
| 531 | .mpu_irqs = omap2_uart2_mpu_irqs, | 532 | .mpu_irqs = omap2_uart2_mpu_irqs, |
| 532 | .sdma_reqs = omap2_uart2_sdma_reqs, | 533 | .sdma_reqs = omap2_uart2_sdma_reqs, |
| 533 | .main_clk = "uart2_fck", | 534 | .main_clk = "uart2_fck", |
| 535 | .flags = HWMOD_SWSUP_SIDLE_ACT, | ||
| 534 | .prcm = { | 536 | .prcm = { |
| 535 | .omap2 = { | 537 | .omap2 = { |
| 536 | .module_offs = CORE_MOD, | 538 | .module_offs = CORE_MOD, |
| @@ -550,6 +552,7 @@ struct omap_hwmod omap2xxx_uart3_hwmod = { | |||
| 550 | .mpu_irqs = omap2_uart3_mpu_irqs, | 552 | .mpu_irqs = omap2_uart3_mpu_irqs, |
| 551 | .sdma_reqs = omap2_uart3_sdma_reqs, | 553 | .sdma_reqs = omap2_uart3_sdma_reqs, |
| 552 | .main_clk = "uart3_fck", | 554 | .main_clk = "uart3_fck", |
| 555 | .flags = HWMOD_SWSUP_SIDLE_ACT, | ||
| 553 | .prcm = { | 556 | .prcm = { |
| 554 | .omap2 = { | 557 | .omap2 = { |
| 555 | .module_offs = CORE_MOD, | 558 | .module_offs = CORE_MOD, |
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c index 01d8f324450a..075f7cc51026 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c | |||
| @@ -1995,6 +1995,7 @@ static struct omap_hwmod am33xx_uart1_hwmod = { | |||
| 1995 | .name = "uart1", | 1995 | .name = "uart1", |
| 1996 | .class = &uart_class, | 1996 | .class = &uart_class, |
| 1997 | .clkdm_name = "l4_wkup_clkdm", | 1997 | .clkdm_name = "l4_wkup_clkdm", |
| 1998 | .flags = HWMOD_SWSUP_SIDLE_ACT, | ||
| 1998 | .mpu_irqs = am33xx_uart1_irqs, | 1999 | .mpu_irqs = am33xx_uart1_irqs, |
| 1999 | .sdma_reqs = uart1_edma_reqs, | 2000 | .sdma_reqs = uart1_edma_reqs, |
| 2000 | .main_clk = "dpll_per_m2_div4_wkupdm_ck", | 2001 | .main_clk = "dpll_per_m2_div4_wkupdm_ck", |
| @@ -2015,6 +2016,7 @@ static struct omap_hwmod am33xx_uart2_hwmod = { | |||
| 2015 | .name = "uart2", | 2016 | .name = "uart2", |
| 2016 | .class = &uart_class, | 2017 | .class = &uart_class, |
| 2017 | .clkdm_name = "l4ls_clkdm", | 2018 | .clkdm_name = "l4ls_clkdm", |
| 2019 | .flags = HWMOD_SWSUP_SIDLE_ACT, | ||
| 2018 | .mpu_irqs = am33xx_uart2_irqs, | 2020 | .mpu_irqs = am33xx_uart2_irqs, |
| 2019 | .sdma_reqs = uart1_edma_reqs, | 2021 | .sdma_reqs = uart1_edma_reqs, |
| 2020 | .main_clk = "dpll_per_m2_div4_ck", | 2022 | .main_clk = "dpll_per_m2_div4_ck", |
| @@ -2042,6 +2044,7 @@ static struct omap_hwmod am33xx_uart3_hwmod = { | |||
| 2042 | .name = "uart3", | 2044 | .name = "uart3", |
| 2043 | .class = &uart_class, | 2045 | .class = &uart_class, |
| 2044 | .clkdm_name = "l4ls_clkdm", | 2046 | .clkdm_name = "l4ls_clkdm", |
| 2047 | .flags = HWMOD_SWSUP_SIDLE_ACT, | ||
| 2045 | .mpu_irqs = am33xx_uart3_irqs, | 2048 | .mpu_irqs = am33xx_uart3_irqs, |
| 2046 | .sdma_reqs = uart3_edma_reqs, | 2049 | .sdma_reqs = uart3_edma_reqs, |
| 2047 | .main_clk = "dpll_per_m2_div4_ck", | 2050 | .main_clk = "dpll_per_m2_div4_ck", |
| @@ -2062,6 +2065,7 @@ static struct omap_hwmod am33xx_uart4_hwmod = { | |||
| 2062 | .name = "uart4", | 2065 | .name = "uart4", |
| 2063 | .class = &uart_class, | 2066 | .class = &uart_class, |
| 2064 | .clkdm_name = "l4ls_clkdm", | 2067 | .clkdm_name = "l4ls_clkdm", |
| 2068 | .flags = HWMOD_SWSUP_SIDLE_ACT, | ||
| 2065 | .mpu_irqs = am33xx_uart4_irqs, | 2069 | .mpu_irqs = am33xx_uart4_irqs, |
| 2066 | .sdma_reqs = uart1_edma_reqs, | 2070 | .sdma_reqs = uart1_edma_reqs, |
| 2067 | .main_clk = "dpll_per_m2_div4_ck", | 2071 | .main_clk = "dpll_per_m2_div4_ck", |
| @@ -2082,6 +2086,7 @@ static struct omap_hwmod am33xx_uart5_hwmod = { | |||
| 2082 | .name = "uart5", | 2086 | .name = "uart5", |
| 2083 | .class = &uart_class, | 2087 | .class = &uart_class, |
| 2084 | .clkdm_name = "l4ls_clkdm", | 2088 | .clkdm_name = "l4ls_clkdm", |
| 2089 | .flags = HWMOD_SWSUP_SIDLE_ACT, | ||
| 2085 | .mpu_irqs = am33xx_uart5_irqs, | 2090 | .mpu_irqs = am33xx_uart5_irqs, |
| 2086 | .sdma_reqs = uart1_edma_reqs, | 2091 | .sdma_reqs = uart1_edma_reqs, |
| 2087 | .main_clk = "dpll_per_m2_div4_ck", | 2092 | .main_clk = "dpll_per_m2_div4_ck", |
| @@ -2102,6 +2107,7 @@ static struct omap_hwmod am33xx_uart6_hwmod = { | |||
| 2102 | .name = "uart6", | 2107 | .name = "uart6", |
| 2103 | .class = &uart_class, | 2108 | .class = &uart_class, |
| 2104 | .clkdm_name = "l4ls_clkdm", | 2109 | .clkdm_name = "l4ls_clkdm", |
| 2110 | .flags = HWMOD_SWSUP_SIDLE_ACT, | ||
| 2105 | .mpu_irqs = am33xx_uart6_irqs, | 2111 | .mpu_irqs = am33xx_uart6_irqs, |
| 2106 | .sdma_reqs = uart1_edma_reqs, | 2112 | .sdma_reqs = uart1_edma_reqs, |
| 2107 | .main_clk = "dpll_per_m2_div4_ck", | 2113 | .main_clk = "dpll_per_m2_div4_ck", |
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 4083606ea1da..31c7126eb3bb 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | |||
| @@ -490,6 +490,7 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = { | |||
| 490 | .mpu_irqs = omap2_uart1_mpu_irqs, | 490 | .mpu_irqs = omap2_uart1_mpu_irqs, |
| 491 | .sdma_reqs = omap2_uart1_sdma_reqs, | 491 | .sdma_reqs = omap2_uart1_sdma_reqs, |
| 492 | .main_clk = "uart1_fck", | 492 | .main_clk = "uart1_fck", |
| 493 | .flags = HWMOD_SWSUP_SIDLE_ACT, | ||
| 493 | .prcm = { | 494 | .prcm = { |
| 494 | .omap2 = { | 495 | .omap2 = { |
| 495 | .module_offs = CORE_MOD, | 496 | .module_offs = CORE_MOD, |
| @@ -508,6 +509,7 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = { | |||
| 508 | .mpu_irqs = omap2_uart2_mpu_irqs, | 509 | .mpu_irqs = omap2_uart2_mpu_irqs, |
| 509 | .sdma_reqs = omap2_uart2_sdma_reqs, | 510 | .sdma_reqs = omap2_uart2_sdma_reqs, |
| 510 | .main_clk = "uart2_fck", | 511 | .main_clk = "uart2_fck", |
| 512 | .flags = HWMOD_SWSUP_SIDLE_ACT, | ||
| 511 | .prcm = { | 513 | .prcm = { |
| 512 | .omap2 = { | 514 | .omap2 = { |
| 513 | .module_offs = CORE_MOD, | 515 | .module_offs = CORE_MOD, |
| @@ -526,6 +528,7 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = { | |||
| 526 | .mpu_irqs = omap2_uart3_mpu_irqs, | 528 | .mpu_irqs = omap2_uart3_mpu_irqs, |
| 527 | .sdma_reqs = omap2_uart3_sdma_reqs, | 529 | .sdma_reqs = omap2_uart3_sdma_reqs, |
| 528 | .main_clk = "uart3_fck", | 530 | .main_clk = "uart3_fck", |
| 531 | .flags = HWMOD_SWSUP_SIDLE_ACT, | ||
| 529 | .prcm = { | 532 | .prcm = { |
| 530 | .omap2 = { | 533 | .omap2 = { |
| 531 | .module_offs = OMAP3430_PER_MOD, | 534 | .module_offs = OMAP3430_PER_MOD, |
| @@ -555,6 +558,7 @@ static struct omap_hwmod omap36xx_uart4_hwmod = { | |||
| 555 | .mpu_irqs = uart4_mpu_irqs, | 558 | .mpu_irqs = uart4_mpu_irqs, |
| 556 | .sdma_reqs = uart4_sdma_reqs, | 559 | .sdma_reqs = uart4_sdma_reqs, |
| 557 | .main_clk = "uart4_fck", | 560 | .main_clk = "uart4_fck", |
| 561 | .flags = HWMOD_SWSUP_SIDLE_ACT, | ||
| 558 | .prcm = { | 562 | .prcm = { |
| 559 | .omap2 = { | 563 | .omap2 = { |
| 560 | .module_offs = OMAP3430_PER_MOD, | 564 | .module_offs = OMAP3430_PER_MOD, |
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index eaba9dc91a0d..848b6dc67590 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c | |||
| @@ -3434,6 +3434,7 @@ static struct omap_hwmod omap44xx_uart1_hwmod = { | |||
| 3434 | .name = "uart1", | 3434 | .name = "uart1", |
| 3435 | .class = &omap44xx_uart_hwmod_class, | 3435 | .class = &omap44xx_uart_hwmod_class, |
| 3436 | .clkdm_name = "l4_per_clkdm", | 3436 | .clkdm_name = "l4_per_clkdm", |
| 3437 | .flags = HWMOD_SWSUP_SIDLE_ACT, | ||
| 3437 | .mpu_irqs = omap44xx_uart1_irqs, | 3438 | .mpu_irqs = omap44xx_uart1_irqs, |
| 3438 | .sdma_reqs = omap44xx_uart1_sdma_reqs, | 3439 | .sdma_reqs = omap44xx_uart1_sdma_reqs, |
| 3439 | .main_clk = "func_48m_fclk", | 3440 | .main_clk = "func_48m_fclk", |
| @@ -3462,6 +3463,7 @@ static struct omap_hwmod omap44xx_uart2_hwmod = { | |||
| 3462 | .name = "uart2", | 3463 | .name = "uart2", |
| 3463 | .class = &omap44xx_uart_hwmod_class, | 3464 | .class = &omap44xx_uart_hwmod_class, |
| 3464 | .clkdm_name = "l4_per_clkdm", | 3465 | .clkdm_name = "l4_per_clkdm", |
| 3466 | .flags = HWMOD_SWSUP_SIDLE_ACT, | ||
| 3465 | .mpu_irqs = omap44xx_uart2_irqs, | 3467 | .mpu_irqs = omap44xx_uart2_irqs, |
| 3466 | .sdma_reqs = omap44xx_uart2_sdma_reqs, | 3468 | .sdma_reqs = omap44xx_uart2_sdma_reqs, |
| 3467 | .main_clk = "func_48m_fclk", | 3469 | .main_clk = "func_48m_fclk", |
| @@ -3490,7 +3492,8 @@ static struct omap_hwmod omap44xx_uart3_hwmod = { | |||
| 3490 | .name = "uart3", | 3492 | .name = "uart3", |
| 3491 | .class = &omap44xx_uart_hwmod_class, | 3493 | .class = &omap44xx_uart_hwmod_class, |
| 3492 | .clkdm_name = "l4_per_clkdm", | 3494 | .clkdm_name = "l4_per_clkdm", |
| 3493 | .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET, | 3495 | .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET | |
| 3496 | HWMOD_SWSUP_SIDLE_ACT, | ||
| 3494 | .mpu_irqs = omap44xx_uart3_irqs, | 3497 | .mpu_irqs = omap44xx_uart3_irqs, |
| 3495 | .sdma_reqs = omap44xx_uart3_sdma_reqs, | 3498 | .sdma_reqs = omap44xx_uart3_sdma_reqs, |
| 3496 | .main_clk = "func_48m_fclk", | 3499 | .main_clk = "func_48m_fclk", |
| @@ -3519,6 +3522,7 @@ static struct omap_hwmod omap44xx_uart4_hwmod = { | |||
| 3519 | .name = "uart4", | 3522 | .name = "uart4", |
| 3520 | .class = &omap44xx_uart_hwmod_class, | 3523 | .class = &omap44xx_uart_hwmod_class, |
| 3521 | .clkdm_name = "l4_per_clkdm", | 3524 | .clkdm_name = "l4_per_clkdm", |
| 3525 | .flags = HWMOD_SWSUP_SIDLE_ACT, | ||
| 3522 | .mpu_irqs = omap44xx_uart4_irqs, | 3526 | .mpu_irqs = omap44xx_uart4_irqs, |
| 3523 | .sdma_reqs = omap44xx_uart4_sdma_reqs, | 3527 | .sdma_reqs = omap44xx_uart4_sdma_reqs, |
| 3524 | .main_clk = "func_48m_fclk", | 3528 | .main_clk = "func_48m_fclk", |
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c index 8396b5b7e912..f6601563aa69 100644 --- a/arch/arm/mach-omap2/serial.c +++ b/arch/arm/mach-omap2/serial.c | |||
| @@ -95,38 +95,9 @@ static void omap_uart_enable_wakeup(struct device *dev, bool enable) | |||
| 95 | omap_hwmod_disable_wakeup(od->hwmods[0]); | 95 | omap_hwmod_disable_wakeup(od->hwmods[0]); |
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | /* | ||
| 99 | * Errata i291: [UART]:Cannot Acknowledge Idle Requests | ||
| 100 | * in Smartidle Mode When Configured for DMA Operations. | ||
| 101 | * WA: configure uart in force idle mode. | ||
| 102 | */ | ||
| 103 | static void omap_uart_set_noidle(struct device *dev) | ||
| 104 | { | ||
| 105 | struct platform_device *pdev = to_platform_device(dev); | ||
| 106 | struct omap_device *od = to_omap_device(pdev); | ||
| 107 | |||
| 108 | omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_NO); | ||
| 109 | } | ||
| 110 | |||
| 111 | static void omap_uart_set_smartidle(struct device *dev) | ||
| 112 | { | ||
| 113 | struct platform_device *pdev = to_platform_device(dev); | ||
| 114 | struct omap_device *od = to_omap_device(pdev); | ||
| 115 | u8 idlemode; | ||
| 116 | |||
| 117 | if (od->hwmods[0]->class->sysc->idlemodes & SIDLE_SMART_WKUP) | ||
| 118 | idlemode = HWMOD_IDLEMODE_SMART_WKUP; | ||
| 119 | else | ||
| 120 | idlemode = HWMOD_IDLEMODE_SMART; | ||
| 121 | |||
| 122 | omap_hwmod_set_slave_idlemode(od->hwmods[0], idlemode); | ||
| 123 | } | ||
| 124 | |||
| 125 | #else | 98 | #else |
| 126 | static void omap_uart_enable_wakeup(struct device *dev, bool enable) | 99 | static void omap_uart_enable_wakeup(struct device *dev, bool enable) |
| 127 | {} | 100 | {} |
| 128 | static void omap_uart_set_noidle(struct device *dev) {} | ||
| 129 | static void omap_uart_set_smartidle(struct device *dev) {} | ||
| 130 | #endif /* CONFIG_PM */ | 101 | #endif /* CONFIG_PM */ |
| 131 | 102 | ||
| 132 | #ifdef CONFIG_OMAP_MUX | 103 | #ifdef CONFIG_OMAP_MUX |
| @@ -299,8 +270,6 @@ void __init omap_serial_init_port(struct omap_board_data *bdata, | |||
| 299 | omap_up.uartclk = OMAP24XX_BASE_BAUD * 16; | 270 | omap_up.uartclk = OMAP24XX_BASE_BAUD * 16; |
| 300 | omap_up.flags = UPF_BOOT_AUTOCONF; | 271 | omap_up.flags = UPF_BOOT_AUTOCONF; |
| 301 | omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count; | 272 | omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count; |
| 302 | omap_up.set_forceidle = omap_uart_set_smartidle; | ||
| 303 | omap_up.set_noidle = omap_uart_set_noidle; | ||
| 304 | omap_up.enable_wakeup = omap_uart_enable_wakeup; | 273 | omap_up.enable_wakeup = omap_uart_enable_wakeup; |
| 305 | omap_up.dma_rx_buf_size = info->dma_rx_buf_size; | 274 | omap_up.dma_rx_buf_size = info->dma_rx_buf_size; |
| 306 | omap_up.dma_rx_timeout = info->dma_rx_timeout; | 275 | omap_up.dma_rx_timeout = info->dma_rx_timeout; |
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index b97fd672e89d..f8a6db9239bf 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c | |||
| @@ -199,13 +199,6 @@ void __init orion5x_init_early(void) | |||
| 199 | 199 | ||
| 200 | orion_time_set_base(TIMER_VIRT_BASE); | 200 | orion_time_set_base(TIMER_VIRT_BASE); |
| 201 | 201 | ||
| 202 | /* | ||
| 203 | * Some Orion5x devices allocate their coherent buffers from atomic | ||
| 204 | * context. Increase size of atomic coherent pool to make sure such | ||
| 205 | * the allocations won't fail. | ||
| 206 | */ | ||
| 207 | init_dma_coherent_pool_size(SZ_1M); | ||
| 208 | |||
| 209 | /* Initialize the MBUS driver */ | 202 | /* Initialize the MBUS driver */ |
| 210 | orion5x_pcie_id(&dev, &rev); | 203 | orion5x_pcie_id(&dev, &rev); |
| 211 | if (dev == MV88F5281_DEV_ID) | 204 | if (dev == MV88F5281_DEV_ID) |
diff --git a/arch/arm/mach-tegra/tegra2_emc.c b/arch/arm/mach-tegra/tegra2_emc.c index 9e8bdfa2b369..31e69a019bdd 100644 --- a/arch/arm/mach-tegra/tegra2_emc.c +++ b/arch/arm/mach-tegra/tegra2_emc.c | |||
| @@ -307,11 +307,6 @@ static int tegra_emc_probe(struct platform_device *pdev) | |||
| 307 | } | 307 | } |
| 308 | 308 | ||
| 309 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 309 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 310 | if (!res) { | ||
| 311 | dev_err(&pdev->dev, "missing register base\n"); | ||
| 312 | return -ENOMEM; | ||
| 313 | } | ||
| 314 | |||
| 315 | emc_regbase = devm_ioremap_resource(&pdev->dev, res); | 310 | emc_regbase = devm_ioremap_resource(&pdev->dev, res); |
| 316 | if (IS_ERR(emc_regbase)) | 311 | if (IS_ERR(emc_regbase)) |
| 317 | return PTR_ERR(emc_regbase); | 312 | return PTR_ERR(emc_regbase); |
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig index 6a4387e39df8..b19b07204aaf 100644 --- a/arch/arm/mach-ux500/Kconfig +++ b/arch/arm/mach-ux500/Kconfig | |||
| @@ -51,6 +51,7 @@ config MACH_MOP500 | |||
| 51 | bool "U8500 Development platform, MOP500 versions" | 51 | bool "U8500 Development platform, MOP500 versions" |
| 52 | select I2C | 52 | select I2C |
| 53 | select I2C_NOMADIK | 53 | select I2C_NOMADIK |
| 54 | select REGULATOR | ||
| 54 | select REGULATOR_FIXED_VOLTAGE | 55 | select REGULATOR_FIXED_VOLTAGE |
| 55 | select SOC_BUS | 56 | select SOC_BUS |
| 56 | select UX500_SOC_DB8500 | 57 | select UX500_SOC_DB8500 |
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index 3cd555ac6d0a..78389de94dde 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c | |||
| @@ -623,7 +623,7 @@ static void __init mop500_init_machine(void) | |||
| 623 | sdi0_reg_info.gpios[0].gpio = GPIO_SDMMC_1V8_3V_SEL; | 623 | sdi0_reg_info.gpios[0].gpio = GPIO_SDMMC_1V8_3V_SEL; |
| 624 | 624 | ||
| 625 | mop500_pinmaps_init(); | 625 | mop500_pinmaps_init(); |
| 626 | parent = u8500_init_devices(&ab8500_platdata); | 626 | parent = u8500_init_devices(); |
| 627 | 627 | ||
| 628 | for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) | 628 | for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) |
| 629 | mop500_platform_devs[i]->dev.parent = parent; | 629 | mop500_platform_devs[i]->dev.parent = parent; |
| @@ -660,7 +660,7 @@ static void __init snowball_init_machine(void) | |||
| 660 | sdi0_reg_info.gpios[0].gpio = SNOWBALL_SDMMC_1V8_3V_GPIO; | 660 | sdi0_reg_info.gpios[0].gpio = SNOWBALL_SDMMC_1V8_3V_GPIO; |
| 661 | 661 | ||
| 662 | snowball_pinmaps_init(); | 662 | snowball_pinmaps_init(); |
| 663 | parent = u8500_init_devices(&ab8500_platdata); | 663 | parent = u8500_init_devices(); |
| 664 | 664 | ||
| 665 | for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++) | 665 | for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++) |
| 666 | snowball_platform_devs[i]->dev.parent = parent; | 666 | snowball_platform_devs[i]->dev.parent = parent; |
| @@ -698,7 +698,7 @@ static void __init hrefv60_init_machine(void) | |||
| 698 | sdi0_reg_info.gpios[0].gpio = HREFV60_SDMMC_1V8_3V_GPIO; | 698 | sdi0_reg_info.gpios[0].gpio = HREFV60_SDMMC_1V8_3V_GPIO; |
| 699 | 699 | ||
| 700 | hrefv60_pinmaps_init(); | 700 | hrefv60_pinmaps_init(); |
| 701 | parent = u8500_init_devices(&ab8500_platdata); | 701 | parent = u8500_init_devices(); |
| 702 | 702 | ||
| 703 | for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) | 703 | for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) |
| 704 | mop500_platform_devs[i]->dev.parent = parent; | 704 | mop500_platform_devs[i]->dev.parent = parent; |
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index e90b5ab23b6d..46cca52890bc 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c | |||
| @@ -206,7 +206,7 @@ static struct device * __init db8500_soc_device_init(void) | |||
| 206 | /* | 206 | /* |
| 207 | * This function is called from the board init | 207 | * This function is called from the board init |
| 208 | */ | 208 | */ |
| 209 | struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500) | 209 | struct device * __init u8500_init_devices(void) |
| 210 | { | 210 | { |
| 211 | struct device *parent; | 211 | struct device *parent; |
| 212 | int i; | 212 | int i; |
| @@ -220,8 +220,6 @@ struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500) | |||
| 220 | for (i = 0; i < ARRAY_SIZE(platform_devs); i++) | 220 | for (i = 0; i < ARRAY_SIZE(platform_devs); i++) |
| 221 | platform_devs[i]->dev.parent = parent; | 221 | platform_devs[i]->dev.parent = parent; |
| 222 | 222 | ||
| 223 | db8500_prcmu_device.dev.platform_data = ab8500; | ||
| 224 | |||
| 225 | platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs)); | 223 | platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs)); |
| 226 | 224 | ||
| 227 | return parent; | 225 | return parent; |
| @@ -278,7 +276,7 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = { | |||
| 278 | OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL), | 276 | OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL), |
| 279 | OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu", | 277 | OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu", |
| 280 | &db8500_prcmu_pdata), | 278 | &db8500_prcmu_pdata), |
| 281 | OF_DEV_AUXDATA("smsc,lan9115", 0x50000000, "smsc911x", NULL), | 279 | OF_DEV_AUXDATA("smsc,lan9115", 0x50000000, "smsc911x.0", NULL), |
| 282 | /* Requires device name bindings. */ | 280 | /* Requires device name bindings. */ |
| 283 | OF_DEV_AUXDATA("stericsson,nmk-pinctrl", U8500_PRCMU_BASE, | 281 | OF_DEV_AUXDATA("stericsson,nmk-pinctrl", U8500_PRCMU_BASE, |
| 284 | "pinctrl-db8500", NULL), | 282 | "pinctrl-db8500", NULL), |
diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h index bddce2b49372..cad3ca86c540 100644 --- a/arch/arm/mach-ux500/setup.h +++ b/arch/arm/mach-ux500/setup.h | |||
| @@ -18,7 +18,7 @@ | |||
| 18 | void __init ux500_map_io(void); | 18 | void __init ux500_map_io(void); |
| 19 | extern void __init u8500_map_io(void); | 19 | extern void __init u8500_map_io(void); |
| 20 | 20 | ||
| 21 | extern struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500); | 21 | extern struct device * __init u8500_init_devices(void); |
| 22 | 22 | ||
| 23 | extern void __init ux500_init_irq(void); | 23 | extern void __init ux500_init_irq(void); |
| 24 | extern void __init ux500_init_late(void); | 24 | extern void __init ux500_init_late(void); |
diff --git a/arch/arm/mach-vt8500/vt8500.c b/arch/arm/mach-vt8500/vt8500.c index 1dd281efc020..f5c33df7a597 100644 --- a/arch/arm/mach-vt8500/vt8500.c +++ b/arch/arm/mach-vt8500/vt8500.c | |||
| @@ -173,6 +173,7 @@ static const char * const vt8500_dt_compat[] = { | |||
| 173 | "wm,wm8505", | 173 | "wm,wm8505", |
| 174 | "wm,wm8750", | 174 | "wm,wm8750", |
| 175 | "wm,wm8850", | 175 | "wm,wm8850", |
| 176 | NULL | ||
| 176 | }; | 177 | }; |
| 177 | 178 | ||
| 178 | DT_MACHINE_START(WMT_DT, "VIA/Wondermedia SoC (Device Tree Support)") | 179 | DT_MACHINE_START(WMT_DT, "VIA/Wondermedia SoC (Device Tree Support)") |
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index 251f827271e9..c019b7aaf776 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c | |||
| @@ -383,7 +383,7 @@ static struct resource orion_ge10_shared_resources[] = { | |||
| 383 | 383 | ||
| 384 | static struct platform_device orion_ge10_shared = { | 384 | static struct platform_device orion_ge10_shared = { |
| 385 | .name = MV643XX_ETH_SHARED_NAME, | 385 | .name = MV643XX_ETH_SHARED_NAME, |
| 386 | .id = 1, | 386 | .id = 2, |
| 387 | .dev = { | 387 | .dev = { |
| 388 | .platform_data = &orion_ge10_shared_data, | 388 | .platform_data = &orion_ge10_shared_data, |
| 389 | }, | 389 | }, |
| @@ -398,8 +398,8 @@ static struct resource orion_ge10_resources[] = { | |||
| 398 | 398 | ||
| 399 | static struct platform_device orion_ge10 = { | 399 | static struct platform_device orion_ge10 = { |
| 400 | .name = MV643XX_ETH_NAME, | 400 | .name = MV643XX_ETH_NAME, |
| 401 | .id = 1, | 401 | .id = 2, |
| 402 | .num_resources = 2, | 402 | .num_resources = 1, |
| 403 | .resource = orion_ge10_resources, | 403 | .resource = orion_ge10_resources, |
| 404 | .dev = { | 404 | .dev = { |
| 405 | .coherent_dma_mask = DMA_BIT_MASK(32), | 405 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| @@ -432,7 +432,7 @@ static struct resource orion_ge11_shared_resources[] = { | |||
| 432 | 432 | ||
| 433 | static struct platform_device orion_ge11_shared = { | 433 | static struct platform_device orion_ge11_shared = { |
| 434 | .name = MV643XX_ETH_SHARED_NAME, | 434 | .name = MV643XX_ETH_SHARED_NAME, |
| 435 | .id = 1, | 435 | .id = 3, |
| 436 | .dev = { | 436 | .dev = { |
| 437 | .platform_data = &orion_ge11_shared_data, | 437 | .platform_data = &orion_ge11_shared_data, |
| 438 | }, | 438 | }, |
| @@ -447,8 +447,8 @@ static struct resource orion_ge11_resources[] = { | |||
| 447 | 447 | ||
| 448 | static struct platform_device orion_ge11 = { | 448 | static struct platform_device orion_ge11 = { |
| 449 | .name = MV643XX_ETH_NAME, | 449 | .name = MV643XX_ETH_NAME, |
| 450 | .id = 1, | 450 | .id = 3, |
| 451 | .num_resources = 2, | 451 | .num_resources = 1, |
| 452 | .resource = orion_ge11_resources, | 452 | .resource = orion_ge11_resources, |
| 453 | .dev = { | 453 | .dev = { |
| 454 | .coherent_dma_mask = DMA_BIT_MASK(32), | 454 | .coherent_dma_mask = DMA_BIT_MASK(32), |
diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h index e06fc5fefa14..d9a24f605a2b 100644 --- a/arch/arm/plat-orion/include/plat/common.h +++ b/arch/arm/plat-orion/include/plat/common.h | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | 10 | ||
| 11 | #ifndef __PLAT_COMMON_H | 11 | #ifndef __PLAT_COMMON_H |
| 12 | #include <linux/mv643xx_eth.h> | 12 | #include <linux/mv643xx_eth.h> |
| 13 | #include <linux/platform_data/usb-ehci-orion.h> | ||
| 13 | 14 | ||
| 14 | struct dsa_platform_data; | 15 | struct dsa_platform_data; |
| 15 | struct mv_sata_platform_data; | 16 | struct mv_sata_platform_data; |
diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c index ca07cb1b155a..79690f2f6d3f 100644 --- a/arch/arm/plat-samsung/adc.c +++ b/arch/arm/plat-samsung/adc.c | |||
| @@ -381,11 +381,6 @@ static int s3c_adc_probe(struct platform_device *pdev) | |||
| 381 | } | 381 | } |
| 382 | 382 | ||
| 383 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 383 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 384 | if (!regs) { | ||
| 385 | dev_err(dev, "failed to find registers\n"); | ||
| 386 | return -ENXIO; | ||
| 387 | } | ||
| 388 | |||
| 389 | adc->regs = devm_ioremap_resource(dev, regs); | 384 | adc->regs = devm_ioremap_resource(dev, regs); |
| 390 | if (IS_ERR(adc->regs)) | 385 | if (IS_ERR(adc->regs)) |
| 391 | return PTR_ERR(adc->regs); | 386 | return PTR_ERR(adc->regs); |
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index d30042e39974..13609e01f4b7 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c | |||
| @@ -152,11 +152,12 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, | |||
| 152 | } | 152 | } |
| 153 | EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); | 153 | EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); |
| 154 | 154 | ||
| 155 | static int __init xen_secondary_init(unsigned int cpu) | 155 | static void __init xen_percpu_init(void *unused) |
| 156 | { | 156 | { |
| 157 | struct vcpu_register_vcpu_info info; | 157 | struct vcpu_register_vcpu_info info; |
| 158 | struct vcpu_info *vcpup; | 158 | struct vcpu_info *vcpup; |
| 159 | int err; | 159 | int err; |
| 160 | int cpu = get_cpu(); | ||
| 160 | 161 | ||
| 161 | pr_info("Xen: initializing cpu%d\n", cpu); | 162 | pr_info("Xen: initializing cpu%d\n", cpu); |
| 162 | vcpup = per_cpu_ptr(xen_vcpu_info, cpu); | 163 | vcpup = per_cpu_ptr(xen_vcpu_info, cpu); |
| @@ -165,14 +166,10 @@ static int __init xen_secondary_init(unsigned int cpu) | |||
| 165 | info.offset = offset_in_page(vcpup); | 166 | info.offset = offset_in_page(vcpup); |
| 166 | 167 | ||
| 167 | err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); | 168 | err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); |
| 168 | if (err) { | 169 | BUG_ON(err); |
| 169 | pr_debug("register_vcpu_info failed: err=%d\n", err); | 170 | per_cpu(xen_vcpu, cpu) = vcpup; |
| 170 | } else { | 171 | |
| 171 | /* This cpu is using the registered vcpu info, even if | 172 | enable_percpu_irq(xen_events_irq, 0); |
| 172 | later ones fail to. */ | ||
| 173 | per_cpu(xen_vcpu, cpu) = vcpup; | ||
| 174 | } | ||
| 175 | return 0; | ||
| 176 | } | 173 | } |
| 177 | 174 | ||
| 178 | static void xen_restart(char str, const char *cmd) | 175 | static void xen_restart(char str, const char *cmd) |
| @@ -208,7 +205,6 @@ static int __init xen_guest_init(void) | |||
| 208 | const char *version = NULL; | 205 | const char *version = NULL; |
| 209 | const char *xen_prefix = "xen,xen-"; | 206 | const char *xen_prefix = "xen,xen-"; |
| 210 | struct resource res; | 207 | struct resource res; |
| 211 | int i; | ||
| 212 | 208 | ||
| 213 | node = of_find_compatible_node(NULL, NULL, "xen,xen"); | 209 | node = of_find_compatible_node(NULL, NULL, "xen,xen"); |
| 214 | if (!node) { | 210 | if (!node) { |
| @@ -265,19 +261,23 @@ static int __init xen_guest_init(void) | |||
| 265 | sizeof(struct vcpu_info)); | 261 | sizeof(struct vcpu_info)); |
| 266 | if (xen_vcpu_info == NULL) | 262 | if (xen_vcpu_info == NULL) |
| 267 | return -ENOMEM; | 263 | return -ENOMEM; |
| 268 | for_each_online_cpu(i) | ||
| 269 | xen_secondary_init(i); | ||
| 270 | 264 | ||
| 271 | gnttab_init(); | 265 | gnttab_init(); |
| 272 | if (!xen_initial_domain()) | 266 | if (!xen_initial_domain()) |
| 273 | xenbus_probe(NULL); | 267 | xenbus_probe(NULL); |
| 274 | 268 | ||
| 269 | return 0; | ||
| 270 | } | ||
| 271 | core_initcall(xen_guest_init); | ||
| 272 | |||
| 273 | static int __init xen_pm_init(void) | ||
| 274 | { | ||
| 275 | pm_power_off = xen_power_off; | 275 | pm_power_off = xen_power_off; |
| 276 | arm_pm_restart = xen_restart; | 276 | arm_pm_restart = xen_restart; |
| 277 | 277 | ||
| 278 | return 0; | 278 | return 0; |
| 279 | } | 279 | } |
| 280 | core_initcall(xen_guest_init); | 280 | subsys_initcall(xen_pm_init); |
| 281 | 281 | ||
| 282 | static irqreturn_t xen_arm_callback(int irq, void *arg) | 282 | static irqreturn_t xen_arm_callback(int irq, void *arg) |
| 283 | { | 283 | { |
| @@ -285,11 +285,6 @@ static irqreturn_t xen_arm_callback(int irq, void *arg) | |||
| 285 | return IRQ_HANDLED; | 285 | return IRQ_HANDLED; |
| 286 | } | 286 | } |
| 287 | 287 | ||
| 288 | static __init void xen_percpu_enable_events(void *unused) | ||
| 289 | { | ||
| 290 | enable_percpu_irq(xen_events_irq, 0); | ||
| 291 | } | ||
| 292 | |||
| 293 | static int __init xen_init_events(void) | 288 | static int __init xen_init_events(void) |
| 294 | { | 289 | { |
| 295 | if (!xen_domain() || xen_events_irq < 0) | 290 | if (!xen_domain() || xen_events_irq < 0) |
| @@ -303,7 +298,7 @@ static int __init xen_init_events(void) | |||
| 303 | return -EINVAL; | 298 | return -EINVAL; |
| 304 | } | 299 | } |
| 305 | 300 | ||
| 306 | on_each_cpu(xen_percpu_enable_events, NULL, 0); | 301 | on_each_cpu(xen_percpu_init, NULL, 0); |
| 307 | 302 | ||
| 308 | return 0; | 303 | return 0; |
| 309 | } | 304 | } |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 48347dcf0566..56b3f6d447ae 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
| @@ -122,8 +122,6 @@ endmenu | |||
| 122 | 122 | ||
| 123 | menu "Kernel Features" | 123 | menu "Kernel Features" |
| 124 | 124 | ||
| 125 | source "kernel/time/Kconfig" | ||
| 126 | |||
| 127 | config ARM64_64K_PAGES | 125 | config ARM64_64K_PAGES |
| 128 | bool "Enable 64KB pages support" | 126 | bool "Enable 64KB pages support" |
| 129 | help | 127 | help |
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index c8eedc604984..5aceb83b3f5c 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h | |||
| @@ -82,7 +82,7 @@ | |||
| 82 | 82 | ||
| 83 | .macro enable_dbg_if_not_stepping, tmp | 83 | .macro enable_dbg_if_not_stepping, tmp |
| 84 | mrs \tmp, mdscr_el1 | 84 | mrs \tmp, mdscr_el1 |
| 85 | tbnz \tmp, #1, 9990f | 85 | tbnz \tmp, #0, 9990f |
| 86 | enable_dbg | 86 | enable_dbg |
| 87 | 9990: | 87 | 9990: |
| 88 | .endm | 88 | .endm |
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 0c3ba9f51376..f4726dc054b3 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c | |||
| @@ -136,8 +136,6 @@ void disable_debug_monitors(enum debug_el el) | |||
| 136 | */ | 136 | */ |
| 137 | static void clear_os_lock(void *unused) | 137 | static void clear_os_lock(void *unused) |
| 138 | { | 138 | { |
| 139 | asm volatile("msr mdscr_el1, %0" : : "r" (0)); | ||
| 140 | isb(); | ||
| 141 | asm volatile("msr oslar_el1, %0" : : "r" (0)); | 139 | asm volatile("msr oslar_el1, %0" : : "r" (0)); |
| 142 | isb(); | 140 | isb(); |
| 143 | } | 141 | } |
diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c index ac974f48a7a2..fbb6e1843659 100644 --- a/arch/arm64/kernel/early_printk.c +++ b/arch/arm64/kernel/early_printk.c | |||
| @@ -95,7 +95,7 @@ static void early_write(struct console *con, const char *s, unsigned n) | |||
| 95 | } | 95 | } |
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | static struct console early_console = { | 98 | static struct console early_console_dev = { |
| 99 | .name = "earlycon", | 99 | .name = "earlycon", |
| 100 | .write = early_write, | 100 | .write = early_write, |
| 101 | .flags = CON_PRINTBUFFER | CON_BOOT, | 101 | .flags = CON_PRINTBUFFER | CON_BOOT, |
| @@ -145,7 +145,8 @@ static int __init setup_early_printk(char *buf) | |||
| 145 | early_base = early_io_map(paddr, EARLYCON_IOBASE); | 145 | early_base = early_io_map(paddr, EARLYCON_IOBASE); |
| 146 | 146 | ||
| 147 | printch = match->printch; | 147 | printch = match->printch; |
| 148 | register_console(&early_console); | 148 | early_console = &early_console_dev; |
| 149 | register_console(&early_console_dev); | ||
| 149 | 150 | ||
| 150 | return 0; | 151 | return 0; |
| 151 | } | 152 | } |
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 6a9a53292590..add6ea616843 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
| @@ -282,12 +282,13 @@ void __init setup_arch(char **cmdline_p) | |||
| 282 | #endif | 282 | #endif |
| 283 | } | 283 | } |
| 284 | 284 | ||
| 285 | static int __init arm64_of_clk_init(void) | 285 | static int __init arm64_device_init(void) |
| 286 | { | 286 | { |
| 287 | of_clk_init(NULL); | 287 | of_clk_init(NULL); |
| 288 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | ||
| 288 | return 0; | 289 | return 0; |
| 289 | } | 290 | } |
| 290 | arch_initcall(arm64_of_clk_init); | 291 | arch_initcall(arm64_device_init); |
| 291 | 292 | ||
| 292 | static DEFINE_PER_CPU(struct cpu, cpu_data); | 293 | static DEFINE_PER_CPU(struct cpu, cpu_data); |
| 293 | 294 | ||
| @@ -305,13 +306,6 @@ static int __init topology_init(void) | |||
| 305 | } | 306 | } |
| 306 | subsys_initcall(topology_init); | 307 | subsys_initcall(topology_init); |
| 307 | 308 | ||
| 308 | static int __init arm64_device_probe(void) | ||
| 309 | { | ||
| 310 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | ||
| 311 | return 0; | ||
| 312 | } | ||
| 313 | device_initcall(arm64_device_probe); | ||
| 314 | |||
| 315 | static const char *hwcap_str[] = { | 309 | static const char *hwcap_str[] = { |
| 316 | "fp", | 310 | "fp", |
| 317 | "asimd", | 311 | "asimd", |
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index abe69b80cf7f..48a386094fa3 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S | |||
| @@ -52,7 +52,7 @@ loop1: | |||
| 52 | add x2, x2, #4 // add 4 (line length offset) | 52 | add x2, x2, #4 // add 4 (line length offset) |
| 53 | mov x4, #0x3ff | 53 | mov x4, #0x3ff |
| 54 | and x4, x4, x1, lsr #3 // find maximum number on the way size | 54 | and x4, x4, x1, lsr #3 // find maximum number on the way size |
| 55 | clz x5, x4 // find bit position of way size increment | 55 | clz w5, w4 // find bit position of way size increment |
| 56 | mov x7, #0x7fff | 56 | mov x7, #0x7fff |
| 57 | and x7, x7, x1, lsr #13 // extract max number of the index size | 57 | and x7, x7, x1, lsr #13 // extract max number of the index size |
| 58 | loop2: | 58 | loop2: |
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index f1d8b9bbfdad..a82ae8868077 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S | |||
| @@ -119,8 +119,7 @@ ENTRY(__cpu_setup) | |||
| 119 | 119 | ||
| 120 | mov x0, #3 << 20 | 120 | mov x0, #3 << 20 |
| 121 | msr cpacr_el1, x0 // Enable FP/ASIMD | 121 | msr cpacr_el1, x0 // Enable FP/ASIMD |
| 122 | mov x0, #1 | 122 | msr mdscr_el1, xzr // Reset mdscr_el1 |
| 123 | msr oslar_el1, x0 // Set the debug OS lock | ||
| 124 | tlbi vmalle1is // invalidate I + D TLBs | 123 | tlbi vmalle1is // invalidate I + D TLBs |
| 125 | /* | 124 | /* |
| 126 | * Memory region attributes for LPAE: | 125 | * Memory region attributes for LPAE: |
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 1be13727323f..b7e59853fd33 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h | |||
| @@ -118,7 +118,7 @@ static inline void set_io_port_base(unsigned long base) | |||
| 118 | */ | 118 | */ |
| 119 | static inline unsigned long virt_to_phys(volatile const void *address) | 119 | static inline unsigned long virt_to_phys(volatile const void *address) |
| 120 | { | 120 | { |
| 121 | return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET; | 121 | return __pa(address); |
| 122 | } | 122 | } |
| 123 | 123 | ||
| 124 | /* | 124 | /* |
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index e68781e18387..143875c6c95a 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h | |||
| @@ -336,7 +336,7 @@ enum emulation_result { | |||
| 336 | #define VPN2_MASK 0xffffe000 | 336 | #define VPN2_MASK 0xffffe000 |
| 337 | #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G)) | 337 | #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G)) |
| 338 | #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) | 338 | #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) |
| 339 | #define TLB_ASID(x) (ASID_MASK((x).tlb_hi)) | 339 | #define TLB_ASID(x) ((x).tlb_hi & ASID_MASK) |
| 340 | #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V)) | 340 | #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V)) |
| 341 | 341 | ||
| 342 | struct kvm_mips_tlb { | 342 | struct kvm_mips_tlb { |
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 1554721e4808..820116067c10 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h | |||
| @@ -67,68 +67,45 @@ extern unsigned long pgd_current[]; | |||
| 67 | TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) | 67 | TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) |
| 68 | #endif | 68 | #endif |
| 69 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ | 69 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ |
| 70 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | ||
| 70 | 71 | ||
| 71 | #define ASID_INC(asid) \ | 72 | #define ASID_INC 0x40 |
| 72 | ({ \ | 73 | #define ASID_MASK 0xfc0 |
| 73 | unsigned long __asid = asid; \ | 74 | |
| 74 | __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t" \ | 75 | #elif defined(CONFIG_CPU_R8000) |
| 75 | ".section\t__asid_inc,\"a\"\n\t" \ | 76 | |
| 76 | ".word\t1b\n\t" \ | 77 | #define ASID_INC 0x10 |
| 77 | ".previous" \ | 78 | #define ASID_MASK 0xff0 |
| 78 | :"=r" (__asid) \ | 79 | |
| 79 | :"0" (__asid)); \ | 80 | #elif defined(CONFIG_MIPS_MT_SMTC) |
| 80 | __asid; \ | 81 | |
| 81 | }) | 82 | #define ASID_INC 0x1 |
| 82 | #define ASID_MASK(asid) \ | 83 | extern unsigned long smtc_asid_mask; |
| 83 | ({ \ | 84 | #define ASID_MASK (smtc_asid_mask) |
| 84 | unsigned long __asid = asid; \ | 85 | #define HW_ASID_MASK 0xff |
| 85 | __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t" \ | 86 | /* End SMTC/34K debug hack */ |
| 86 | ".section\t__asid_mask,\"a\"\n\t" \ | 87 | #else /* FIXME: not correct for R6000 */ |
| 87 | ".word\t1b\n\t" \ | 88 | |
| 88 | ".previous" \ | 89 | #define ASID_INC 0x1 |
| 89 | :"=r" (__asid) \ | 90 | #define ASID_MASK 0xff |
| 90 | :"r" (__asid)); \ | ||
| 91 | __asid; \ | ||
| 92 | }) | ||
| 93 | #define ASID_VERSION_MASK \ | ||
| 94 | ({ \ | ||
| 95 | unsigned long __asid; \ | ||
| 96 | __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t" \ | ||
| 97 | ".section\t__asid_version_mask,\"a\"\n\t" \ | ||
| 98 | ".word\t1b\n\t" \ | ||
| 99 | ".previous" \ | ||
| 100 | :"=r" (__asid)); \ | ||
| 101 | __asid; \ | ||
| 102 | }) | ||
| 103 | #define ASID_FIRST_VERSION \ | ||
| 104 | ({ \ | ||
| 105 | unsigned long __asid = asid; \ | ||
| 106 | __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t" \ | ||
| 107 | ".section\t__asid_first_version,\"a\"\n\t" \ | ||
| 108 | ".word\t1b\n\t" \ | ||
| 109 | ".previous" \ | ||
| 110 | :"=r" (__asid)); \ | ||
| 111 | __asid; \ | ||
| 112 | }) | ||
| 113 | |||
| 114 | #define ASID_FIRST_VERSION_R3000 0x1000 | ||
| 115 | #define ASID_FIRST_VERSION_R4000 0x100 | ||
| 116 | #define ASID_FIRST_VERSION_R8000 0x1000 | ||
| 117 | #define ASID_FIRST_VERSION_RM9000 0x1000 | ||
| 118 | 91 | ||
| 119 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 120 | #define SMTC_HW_ASID_MASK 0xff | ||
| 121 | extern unsigned int smtc_asid_mask; | ||
| 122 | #endif | 92 | #endif |
| 123 | 93 | ||
| 124 | #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) | 94 | #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) |
| 125 | #define cpu_asid(cpu, mm) ASID_MASK(cpu_context((cpu), (mm))) | 95 | #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) |
| 126 | #define asid_cache(cpu) (cpu_data[cpu].asid_cache) | 96 | #define asid_cache(cpu) (cpu_data[cpu].asid_cache) |
| 127 | 97 | ||
| 128 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 98 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
| 129 | { | 99 | { |
| 130 | } | 100 | } |
| 131 | 101 | ||
| 102 | /* | ||
| 103 | * All unused by hardware upper bits will be considered | ||
| 104 | * as a software asid extension. | ||
| 105 | */ | ||
| 106 | #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) | ||
| 107 | #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) | ||
| 108 | |||
| 132 | #ifndef CONFIG_MIPS_MT_SMTC | 109 | #ifndef CONFIG_MIPS_MT_SMTC |
| 133 | /* Normal, classic MIPS get_new_mmu_context */ | 110 | /* Normal, classic MIPS get_new_mmu_context */ |
| 134 | static inline void | 111 | static inline void |
| @@ -137,7 +114,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
| 137 | extern void kvm_local_flush_tlb_all(void); | 114 | extern void kvm_local_flush_tlb_all(void); |
| 138 | unsigned long asid = asid_cache(cpu); | 115 | unsigned long asid = asid_cache(cpu); |
| 139 | 116 | ||
| 140 | if (!ASID_MASK((asid = ASID_INC(asid)))) { | 117 | if (! ((asid += ASID_INC) & ASID_MASK) ) { |
| 141 | if (cpu_has_vtag_icache) | 118 | if (cpu_has_vtag_icache) |
| 142 | flush_icache_all(); | 119 | flush_icache_all(); |
| 143 | #ifdef CONFIG_VIRTUALIZATION | 120 | #ifdef CONFIG_VIRTUALIZATION |
| @@ -200,7 +177,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
| 200 | * free up the ASID value for use and flush any old | 177 | * free up the ASID value for use and flush any old |
| 201 | * instances of it from the TLB. | 178 | * instances of it from the TLB. |
| 202 | */ | 179 | */ |
| 203 | oldasid = ASID_MASK(read_c0_entryhi()); | 180 | oldasid = (read_c0_entryhi() & ASID_MASK); |
| 204 | if(smtc_live_asid[mytlb][oldasid]) { | 181 | if(smtc_live_asid[mytlb][oldasid]) { |
| 205 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | 182 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); |
| 206 | if(smtc_live_asid[mytlb][oldasid] == 0) | 183 | if(smtc_live_asid[mytlb][oldasid] == 0) |
| @@ -211,7 +188,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
| 211 | * having ASID_MASK smaller than the hardware maximum, | 188 | * having ASID_MASK smaller than the hardware maximum, |
| 212 | * make sure no "soft" bits become "hard"... | 189 | * make sure no "soft" bits become "hard"... |
| 213 | */ | 190 | */ |
| 214 | write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | | 191 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | |
| 215 | cpu_asid(cpu, next)); | 192 | cpu_asid(cpu, next)); |
| 216 | ehb(); /* Make sure it propagates to TCStatus */ | 193 | ehb(); /* Make sure it propagates to TCStatus */ |
| 217 | evpe(mtflags); | 194 | evpe(mtflags); |
| @@ -264,15 +241,15 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) | |||
| 264 | #ifdef CONFIG_MIPS_MT_SMTC | 241 | #ifdef CONFIG_MIPS_MT_SMTC |
| 265 | /* See comments for similar code above */ | 242 | /* See comments for similar code above */ |
| 266 | mtflags = dvpe(); | 243 | mtflags = dvpe(); |
| 267 | oldasid = ASID_MASK(read_c0_entryhi()); | 244 | oldasid = read_c0_entryhi() & ASID_MASK; |
| 268 | if(smtc_live_asid[mytlb][oldasid]) { | 245 | if(smtc_live_asid[mytlb][oldasid]) { |
| 269 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | 246 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); |
| 270 | if(smtc_live_asid[mytlb][oldasid] == 0) | 247 | if(smtc_live_asid[mytlb][oldasid] == 0) |
| 271 | smtc_flush_tlb_asid(oldasid); | 248 | smtc_flush_tlb_asid(oldasid); |
| 272 | } | 249 | } |
| 273 | /* See comments for similar code above */ | 250 | /* See comments for similar code above */ |
| 274 | write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | | 251 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | |
| 275 | cpu_asid(cpu, next)); | 252 | cpu_asid(cpu, next)); |
| 276 | ehb(); /* Make sure it propagates to TCStatus */ | 253 | ehb(); /* Make sure it propagates to TCStatus */ |
| 277 | evpe(mtflags); | 254 | evpe(mtflags); |
| 278 | #else | 255 | #else |
| @@ -309,14 +286,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu) | |||
| 309 | #ifdef CONFIG_MIPS_MT_SMTC | 286 | #ifdef CONFIG_MIPS_MT_SMTC |
| 310 | /* See comments for similar code above */ | 287 | /* See comments for similar code above */ |
| 311 | prevvpe = dvpe(); | 288 | prevvpe = dvpe(); |
| 312 | oldasid = ASID_MASK(read_c0_entryhi()); | 289 | oldasid = (read_c0_entryhi() & ASID_MASK); |
| 313 | if (smtc_live_asid[mytlb][oldasid]) { | 290 | if (smtc_live_asid[mytlb][oldasid]) { |
| 314 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | 291 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); |
| 315 | if(smtc_live_asid[mytlb][oldasid] == 0) | 292 | if(smtc_live_asid[mytlb][oldasid] == 0) |
| 316 | smtc_flush_tlb_asid(oldasid); | 293 | smtc_flush_tlb_asid(oldasid); |
| 317 | } | 294 | } |
| 318 | /* See comments for similar code above */ | 295 | /* See comments for similar code above */ |
| 319 | write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | 296 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
| 320 | | cpu_asid(cpu, mm)); | 297 | | cpu_asid(cpu, mm)); |
| 321 | ehb(); /* Make sure it propagates to TCStatus */ | 298 | ehb(); /* Make sure it propagates to TCStatus */ |
| 322 | evpe(prevvpe); | 299 | evpe(prevvpe); |
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index eab99e536b5c..ec1ca537fbc1 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h | |||
| @@ -46,7 +46,6 @@ | |||
| 46 | #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ | 46 | #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ |
| 47 | 47 | ||
| 48 | #include <linux/pfn.h> | 48 | #include <linux/pfn.h> |
| 49 | #include <asm/io.h> | ||
| 50 | 49 | ||
| 51 | extern void build_clear_page(void); | 50 | extern void build_clear_page(void); |
| 52 | extern void build_copy_page(void); | 51 | extern void build_copy_page(void); |
| @@ -151,6 +150,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
| 151 | ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) | 150 | ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) |
| 152 | #endif | 151 | #endif |
| 153 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) | 152 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) |
| 153 | #include <asm/io.h> | ||
| 154 | 154 | ||
| 155 | /* | 155 | /* |
| 156 | * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad | 156 | * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad |
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c index 35bed0d2342c..3be9e7bb30ff 100644 --- a/arch/mips/kernel/crash_dump.c +++ b/arch/mips/kernel/crash_dump.c | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #include <linux/bootmem.h> | 2 | #include <linux/bootmem.h> |
| 3 | #include <linux/crash_dump.h> | 3 | #include <linux/crash_dump.h> |
| 4 | #include <asm/uaccess.h> | 4 | #include <asm/uaccess.h> |
| 5 | #include <linux/slab.h> | ||
| 5 | 6 | ||
| 6 | static int __init parse_savemaxmem(char *p) | 7 | static int __init parse_savemaxmem(char *p) |
| 7 | { | 8 | { |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 5c2ba9f08a80..9098829bfcb0 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
| @@ -493,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp) | |||
| 493 | .set noreorder | 493 | .set noreorder |
| 494 | /* check if TLB contains a entry for EPC */ | 494 | /* check if TLB contains a entry for EPC */ |
| 495 | MFC0 k1, CP0_ENTRYHI | 495 | MFC0 k1, CP0_ENTRYHI |
| 496 | andi k1, 0xff /* ASID_MASK patched at run-time!! */ | 496 | andi k1, 0xff /* ASID_MASK */ |
| 497 | MFC0 k0, CP0_EPC | 497 | MFC0 k0, CP0_EPC |
| 498 | PTR_SRL k0, _PAGE_SHIFT + 1 | 498 | PTR_SRL k0, _PAGE_SHIFT + 1 |
| 499 | PTR_SLL k0, _PAGE_SHIFT + 1 | 499 | PTR_SLL k0, _PAGE_SHIFT + 1 |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index eb902c1f0cad..a682a87bcc04 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
| @@ -224,6 +224,9 @@ struct mips_frame_info { | |||
| 224 | int pc_offset; | 224 | int pc_offset; |
| 225 | }; | 225 | }; |
| 226 | 226 | ||
| 227 | #define J_TARGET(pc,target) \ | ||
| 228 | (((unsigned long)(pc) & 0xf0000000) | ((target) << 2)) | ||
| 229 | |||
| 227 | static inline int is_ra_save_ins(union mips_instruction *ip) | 230 | static inline int is_ra_save_ins(union mips_instruction *ip) |
| 228 | { | 231 | { |
| 229 | #ifdef CONFIG_CPU_MICROMIPS | 232 | #ifdef CONFIG_CPU_MICROMIPS |
| @@ -264,7 +267,7 @@ static inline int is_ra_save_ins(union mips_instruction *ip) | |||
| 264 | #endif | 267 | #endif |
| 265 | } | 268 | } |
| 266 | 269 | ||
| 267 | static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) | 270 | static inline int is_jump_ins(union mips_instruction *ip) |
| 268 | { | 271 | { |
| 269 | #ifdef CONFIG_CPU_MICROMIPS | 272 | #ifdef CONFIG_CPU_MICROMIPS |
| 270 | /* | 273 | /* |
| @@ -288,6 +291,8 @@ static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) | |||
| 288 | return 0; | 291 | return 0; |
| 289 | return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op); | 292 | return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op); |
| 290 | #else | 293 | #else |
| 294 | if (ip->j_format.opcode == j_op) | ||
| 295 | return 1; | ||
| 291 | if (ip->j_format.opcode == jal_op) | 296 | if (ip->j_format.opcode == jal_op) |
| 292 | return 1; | 297 | return 1; |
| 293 | if (ip->r_format.opcode != spec_op) | 298 | if (ip->r_format.opcode != spec_op) |
| @@ -350,7 +355,7 @@ static int get_frame_info(struct mips_frame_info *info) | |||
| 350 | 355 | ||
| 351 | for (i = 0; i < max_insns; i++, ip++) { | 356 | for (i = 0; i < max_insns; i++, ip++) { |
| 352 | 357 | ||
| 353 | if (is_jal_jalr_jr_ins(ip)) | 358 | if (is_jump_ins(ip)) |
| 354 | break; | 359 | break; |
| 355 | if (!info->frame_size) { | 360 | if (!info->frame_size) { |
| 356 | if (is_sp_move_ins(ip)) | 361 | if (is_sp_move_ins(ip)) |
| @@ -393,15 +398,42 @@ err: | |||
| 393 | 398 | ||
| 394 | static struct mips_frame_info schedule_mfi __read_mostly; | 399 | static struct mips_frame_info schedule_mfi __read_mostly; |
| 395 | 400 | ||
| 401 | #ifdef CONFIG_KALLSYMS | ||
| 402 | static unsigned long get___schedule_addr(void) | ||
| 403 | { | ||
| 404 | return kallsyms_lookup_name("__schedule"); | ||
| 405 | } | ||
| 406 | #else | ||
| 407 | static unsigned long get___schedule_addr(void) | ||
| 408 | { | ||
| 409 | union mips_instruction *ip = (void *)schedule; | ||
| 410 | int max_insns = 8; | ||
| 411 | int i; | ||
| 412 | |||
| 413 | for (i = 0; i < max_insns; i++, ip++) { | ||
| 414 | if (ip->j_format.opcode == j_op) | ||
| 415 | return J_TARGET(ip, ip->j_format.target); | ||
| 416 | } | ||
| 417 | return 0; | ||
| 418 | } | ||
| 419 | #endif | ||
| 420 | |||
| 396 | static int __init frame_info_init(void) | 421 | static int __init frame_info_init(void) |
| 397 | { | 422 | { |
| 398 | unsigned long size = 0; | 423 | unsigned long size = 0; |
| 399 | #ifdef CONFIG_KALLSYMS | 424 | #ifdef CONFIG_KALLSYMS |
| 400 | unsigned long ofs; | 425 | unsigned long ofs; |
| 426 | #endif | ||
| 427 | unsigned long addr; | ||
| 401 | 428 | ||
| 402 | kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs); | 429 | addr = get___schedule_addr(); |
| 430 | if (!addr) | ||
| 431 | addr = (unsigned long)schedule; | ||
| 432 | |||
| 433 | #ifdef CONFIG_KALLSYMS | ||
| 434 | kallsyms_lookup_size_offset(addr, &size, &ofs); | ||
| 403 | #endif | 435 | #endif |
| 404 | schedule_mfi.func = schedule; | 436 | schedule_mfi.func = (void *)addr; |
| 405 | schedule_mfi.func_size = size; | 437 | schedule_mfi.func_size = size; |
| 406 | 438 | ||
| 407 | get_frame_info(&schedule_mfi); | 439 | get_frame_info(&schedule_mfi); |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 31d22f3121c9..7186222dc5bb 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
| @@ -111,7 +111,7 @@ static int vpe0limit; | |||
| 111 | static int ipibuffers; | 111 | static int ipibuffers; |
| 112 | static int nostlb; | 112 | static int nostlb; |
| 113 | static int asidmask; | 113 | static int asidmask; |
| 114 | unsigned int smtc_asid_mask = 0xff; | 114 | unsigned long smtc_asid_mask = 0xff; |
| 115 | 115 | ||
| 116 | static int __init vpe0tcs(char *str) | 116 | static int __init vpe0tcs(char *str) |
| 117 | { | 117 | { |
| @@ -1395,7 +1395,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
| 1395 | asid = asid_cache(cpu); | 1395 | asid = asid_cache(cpu); |
| 1396 | 1396 | ||
| 1397 | do { | 1397 | do { |
| 1398 | if (!ASID_MASK(ASID_INC(asid))) { | 1398 | if (!((asid += ASID_INC) & ASID_MASK) ) { |
| 1399 | if (cpu_has_vtag_icache) | 1399 | if (cpu_has_vtag_icache) |
| 1400 | flush_icache_all(); | 1400 | flush_icache_all(); |
| 1401 | /* Traverse all online CPUs (hack requires contiguous range) */ | 1401 | /* Traverse all online CPUs (hack requires contiguous range) */ |
| @@ -1414,7 +1414,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
| 1414 | mips_ihb(); | 1414 | mips_ihb(); |
| 1415 | } | 1415 | } |
| 1416 | tcstat = read_tc_c0_tcstatus(); | 1416 | tcstat = read_tc_c0_tcstatus(); |
| 1417 | smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i); | 1417 | smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); |
| 1418 | if (!prevhalt) | 1418 | if (!prevhalt) |
| 1419 | write_tc_c0_tchalt(0); | 1419 | write_tc_c0_tchalt(0); |
| 1420 | } | 1420 | } |
| @@ -1423,7 +1423,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
| 1423 | asid = ASID_FIRST_VERSION; | 1423 | asid = ASID_FIRST_VERSION; |
| 1424 | local_flush_tlb_all(); /* start new asid cycle */ | 1424 | local_flush_tlb_all(); /* start new asid cycle */ |
| 1425 | } | 1425 | } |
| 1426 | } while (smtc_live_asid[tlb][ASID_MASK(asid)]); | 1426 | } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); |
| 1427 | 1427 | ||
| 1428 | /* | 1428 | /* |
| 1429 | * SMTC shares the TLB within VPEs and possibly across all VPEs. | 1429 | * SMTC shares the TLB within VPEs and possibly across all VPEs. |
| @@ -1461,7 +1461,7 @@ void smtc_flush_tlb_asid(unsigned long asid) | |||
| 1461 | tlb_read(); | 1461 | tlb_read(); |
| 1462 | ehb(); | 1462 | ehb(); |
| 1463 | ehi = read_c0_entryhi(); | 1463 | ehi = read_c0_entryhi(); |
| 1464 | if (ASID_MASK(ehi) == asid) { | 1464 | if ((ehi & ASID_MASK) == asid) { |
| 1465 | /* | 1465 | /* |
| 1466 | * Invalidate only entries with specified ASID, | 1466 | * Invalidate only entries with specified ASID, |
| 1467 | * makiing sure all entries differ. | 1467 | * makiing sure all entries differ. |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 77cff1f6d050..cb14db3c5764 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
| @@ -1656,7 +1656,6 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) | |||
| 1656 | unsigned int cpu = smp_processor_id(); | 1656 | unsigned int cpu = smp_processor_id(); |
| 1657 | unsigned int status_set = ST0_CU0; | 1657 | unsigned int status_set = ST0_CU0; |
| 1658 | unsigned int hwrena = cpu_hwrena_impl_bits; | 1658 | unsigned int hwrena = cpu_hwrena_impl_bits; |
| 1659 | unsigned long asid = 0; | ||
| 1660 | #ifdef CONFIG_MIPS_MT_SMTC | 1659 | #ifdef CONFIG_MIPS_MT_SMTC |
| 1661 | int secondaryTC = 0; | 1660 | int secondaryTC = 0; |
| 1662 | int bootTC = (cpu == 0); | 1661 | int bootTC = (cpu == 0); |
| @@ -1740,9 +1739,8 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) | |||
| 1740 | } | 1739 | } |
| 1741 | #endif /* CONFIG_MIPS_MT_SMTC */ | 1740 | #endif /* CONFIG_MIPS_MT_SMTC */ |
| 1742 | 1741 | ||
| 1743 | asid = ASID_FIRST_VERSION; | 1742 | if (!cpu_data[cpu].asid_cache) |
| 1744 | cpu_data[cpu].asid_cache = asid; | 1743 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; |
| 1745 | TLBMISS_HANDLER_SETUP(); | ||
| 1746 | 1744 | ||
| 1747 | atomic_inc(&init_mm.mm_count); | 1745 | atomic_inc(&init_mm.mm_count); |
| 1748 | current->active_mm = &init_mm; | 1746 | current->active_mm = &init_mm; |
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c index 2b2bac9a40aa..4b6274b47f33 100644 --- a/arch/mips/kvm/kvm_mips_emul.c +++ b/arch/mips/kvm/kvm_mips_emul.c | |||
| @@ -525,16 +525,18 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
| 525 | printk("MTCz, cop0->reg[EBASE]: %#lx\n", | 525 | printk("MTCz, cop0->reg[EBASE]: %#lx\n", |
| 526 | kvm_read_c0_guest_ebase(cop0)); | 526 | kvm_read_c0_guest_ebase(cop0)); |
| 527 | } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { | 527 | } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { |
| 528 | uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]); | 528 | uint32_t nasid = |
| 529 | vcpu->arch.gprs[rt] & ASID_MASK; | ||
| 529 | if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) | 530 | if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) |
| 530 | && | 531 | && |
| 531 | (ASID_MASK(kvm_read_c0_guest_entryhi(cop0)) | 532 | ((kvm_read_c0_guest_entryhi(cop0) & |
| 532 | != nasid)) { | 533 | ASID_MASK) != nasid)) { |
| 533 | 534 | ||
| 534 | kvm_debug | 535 | kvm_debug |
| 535 | ("MTCz, change ASID from %#lx to %#lx\n", | 536 | ("MTCz, change ASID from %#lx to %#lx\n", |
| 536 | ASID_MASK(kvm_read_c0_guest_entryhi(cop0)), | 537 | kvm_read_c0_guest_entryhi(cop0) & |
| 537 | ASID_MASK(vcpu->arch.gprs[rt])); | 538 | ASID_MASK, |
| 539 | vcpu->arch.gprs[rt] & ASID_MASK); | ||
| 538 | 540 | ||
| 539 | /* Blow away the shadow host TLBs */ | 541 | /* Blow away the shadow host TLBs */ |
| 540 | kvm_mips_flush_host_tlb(1); | 542 | kvm_mips_flush_host_tlb(1); |
| @@ -986,7 +988,8 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
| 986 | * resulting handler will do the right thing | 988 | * resulting handler will do the right thing |
| 987 | */ | 989 | */ |
| 988 | index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | | 990 | index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | |
| 989 | ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); | 991 | (kvm_read_c0_guest_entryhi |
| 992 | (cop0) & ASID_MASK)); | ||
| 990 | 993 | ||
| 991 | if (index < 0) { | 994 | if (index < 0) { |
| 992 | vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); | 995 | vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); |
| @@ -1151,7 +1154,7 @@ kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, | |||
| 1151 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1154 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
| 1152 | enum emulation_result er = EMULATE_DONE; | 1155 | enum emulation_result er = EMULATE_DONE; |
| 1153 | unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | | 1156 | unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | |
| 1154 | ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); | 1157 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
| 1155 | 1158 | ||
| 1156 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 1159 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
| 1157 | /* save old pc */ | 1160 | /* save old pc */ |
| @@ -1198,7 +1201,7 @@ kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, | |||
| 1198 | enum emulation_result er = EMULATE_DONE; | 1201 | enum emulation_result er = EMULATE_DONE; |
| 1199 | unsigned long entryhi = | 1202 | unsigned long entryhi = |
| 1200 | (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1203 | (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
| 1201 | ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); | 1204 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
| 1202 | 1205 | ||
| 1203 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 1206 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
| 1204 | /* save old pc */ | 1207 | /* save old pc */ |
| @@ -1243,7 +1246,7 @@ kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, | |||
| 1243 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1246 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
| 1244 | enum emulation_result er = EMULATE_DONE; | 1247 | enum emulation_result er = EMULATE_DONE; |
| 1245 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1248 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
| 1246 | ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); | 1249 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
| 1247 | 1250 | ||
| 1248 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 1251 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
| 1249 | /* save old pc */ | 1252 | /* save old pc */ |
| @@ -1287,7 +1290,7 @@ kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, | |||
| 1287 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1290 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
| 1288 | enum emulation_result er = EMULATE_DONE; | 1291 | enum emulation_result er = EMULATE_DONE; |
| 1289 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1292 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
| 1290 | ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); | 1293 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
| 1291 | 1294 | ||
| 1292 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 1295 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
| 1293 | /* save old pc */ | 1296 | /* save old pc */ |
| @@ -1356,7 +1359,7 @@ kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, | |||
| 1356 | { | 1359 | { |
| 1357 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1360 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
| 1358 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1361 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
| 1359 | ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); | 1362 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
| 1360 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1363 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
| 1361 | enum emulation_result er = EMULATE_DONE; | 1364 | enum emulation_result er = EMULATE_DONE; |
| 1362 | 1365 | ||
| @@ -1783,8 +1786,8 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, | |||
| 1783 | */ | 1786 | */ |
| 1784 | index = kvm_mips_guest_tlb_lookup(vcpu, | 1787 | index = kvm_mips_guest_tlb_lookup(vcpu, |
| 1785 | (va & VPN2_MASK) | | 1788 | (va & VPN2_MASK) | |
| 1786 | ASID_MASK(kvm_read_c0_guest_entryhi | 1789 | (kvm_read_c0_guest_entryhi |
| 1787 | (vcpu->arch.cop0))); | 1790 | (vcpu->arch.cop0) & ASID_MASK)); |
| 1788 | if (index < 0) { | 1791 | if (index < 0) { |
| 1789 | if (exccode == T_TLB_LD_MISS) { | 1792 | if (exccode == T_TLB_LD_MISS) { |
| 1790 | er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); | 1793 | er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); |
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c index 89511a9258d3..e3f0d9b8b6c5 100644 --- a/arch/mips/kvm/kvm_tlb.c +++ b/arch/mips/kvm/kvm_tlb.c | |||
| @@ -51,13 +51,13 @@ EXPORT_SYMBOL(kvm_mips_is_error_pfn); | |||
| 51 | 51 | ||
| 52 | uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) | 52 | uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) |
| 53 | { | 53 | { |
| 54 | return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]); | 54 | return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | 57 | ||
| 58 | uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) | 58 | uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) |
| 59 | { | 59 | { |
| 60 | return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]); | 60 | return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) | 63 | inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) |
| @@ -84,7 +84,7 @@ void kvm_mips_dump_host_tlbs(void) | |||
| 84 | old_pagemask = read_c0_pagemask(); | 84 | old_pagemask = read_c0_pagemask(); |
| 85 | 85 | ||
| 86 | printk("HOST TLBs:\n"); | 86 | printk("HOST TLBs:\n"); |
| 87 | printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi())); | 87 | printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); |
| 88 | 88 | ||
| 89 | for (i = 0; i < current_cpu_data.tlbsize; i++) { | 89 | for (i = 0; i < current_cpu_data.tlbsize; i++) { |
| 90 | write_c0_index(i); | 90 | write_c0_index(i); |
| @@ -428,7 +428,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) | |||
| 428 | 428 | ||
| 429 | for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { | 429 | for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { |
| 430 | if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && | 430 | if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && |
| 431 | (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) { | 431 | (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) { |
| 432 | index = i; | 432 | index = i; |
| 433 | break; | 433 | break; |
| 434 | } | 434 | } |
| @@ -626,7 +626,7 @@ kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, | |||
| 626 | { | 626 | { |
| 627 | unsigned long asid = asid_cache(cpu); | 627 | unsigned long asid = asid_cache(cpu); |
| 628 | 628 | ||
| 629 | if (!(ASID_MASK(ASID_INC(asid)))) { | 629 | if (!((asid += ASID_INC) & ASID_MASK)) { |
| 630 | if (cpu_has_vtag_icache) { | 630 | if (cpu_has_vtag_icache) { |
| 631 | flush_icache_all(); | 631 | flush_icache_all(); |
| 632 | } | 632 | } |
| @@ -804,7 +804,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
| 804 | if (!newasid) { | 804 | if (!newasid) { |
| 805 | /* If we preempted while the guest was executing, then reload the pre-empted ASID */ | 805 | /* If we preempted while the guest was executing, then reload the pre-empted ASID */ |
| 806 | if (current->flags & PF_VCPU) { | 806 | if (current->flags & PF_VCPU) { |
| 807 | write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi)); | 807 | write_c0_entryhi(vcpu->arch. |
| 808 | preempt_entryhi & ASID_MASK); | ||
| 808 | ehb(); | 809 | ehb(); |
| 809 | } | 810 | } |
| 810 | } else { | 811 | } else { |
| @@ -816,11 +817,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
| 816 | */ | 817 | */ |
| 817 | if (current->flags & PF_VCPU) { | 818 | if (current->flags & PF_VCPU) { |
| 818 | if (KVM_GUEST_KERNEL_MODE(vcpu)) | 819 | if (KVM_GUEST_KERNEL_MODE(vcpu)) |
| 819 | write_c0_entryhi(ASID_MASK(vcpu->arch. | 820 | write_c0_entryhi(vcpu->arch. |
| 820 | guest_kernel_asid[cpu])); | 821 | guest_kernel_asid[cpu] & |
| 822 | ASID_MASK); | ||
| 821 | else | 823 | else |
| 822 | write_c0_entryhi(ASID_MASK(vcpu->arch. | 824 | write_c0_entryhi(vcpu->arch. |
| 823 | guest_user_asid[cpu])); | 825 | guest_user_asid[cpu] & |
| 826 | ASID_MASK); | ||
| 824 | ehb(); | 827 | ehb(); |
| 825 | } | 828 | } |
| 826 | } | 829 | } |
| @@ -879,7 +882,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) | |||
| 879 | kvm_mips_guest_tlb_lookup(vcpu, | 882 | kvm_mips_guest_tlb_lookup(vcpu, |
| 880 | ((unsigned long) opc & VPN2_MASK) | 883 | ((unsigned long) opc & VPN2_MASK) |
| 881 | | | 884 | | |
| 882 | ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); | 885 | (kvm_read_c0_guest_entryhi |
| 886 | (cop0) & ASID_MASK)); | ||
| 883 | if (index < 0) { | 887 | if (index < 0) { |
| 884 | kvm_err | 888 | kvm_err |
| 885 | ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", | 889 | ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", |
diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c index 9861c8669fab..850821df924c 100644 --- a/arch/mips/lantiq/xway/gptu.c +++ b/arch/mips/lantiq/xway/gptu.c | |||
| @@ -144,10 +144,6 @@ static int gptu_probe(struct platform_device *pdev) | |||
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 146 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 147 | if (!res) { | ||
| 148 | dev_err(&pdev->dev, "Failed to get resource\n"); | ||
| 149 | return -ENOMEM; | ||
| 150 | } | ||
| 151 | 147 | ||
| 152 | /* remap gptu register range */ | 148 | /* remap gptu register range */ |
| 153 | gptu_membase = devm_ioremap_resource(&pdev->dev, res); | 149 | gptu_membase = devm_ioremap_resource(&pdev->dev, res); |
| @@ -169,6 +165,8 @@ static int gptu_probe(struct platform_device *pdev) | |||
| 169 | if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) { | 165 | if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) { |
| 170 | dev_err(&pdev->dev, "Failed to find magic\n"); | 166 | dev_err(&pdev->dev, "Failed to find magic\n"); |
| 171 | gptu_hwexit(); | 167 | gptu_hwexit(); |
| 168 | clk_disable(clk); | ||
| 169 | clk_put(clk); | ||
| 172 | return -ENAVAIL; | 170 | return -ENAVAIL; |
| 173 | } | 171 | } |
| 174 | 172 | ||
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c index 8a12d00908e0..32b9f21bfd85 100644 --- a/arch/mips/lib/dump_tlb.c +++ b/arch/mips/lib/dump_tlb.c | |||
| @@ -11,7 +11,6 @@ | |||
| 11 | #include <asm/page.h> | 11 | #include <asm/page.h> |
| 12 | #include <asm/pgtable.h> | 12 | #include <asm/pgtable.h> |
| 13 | #include <asm/tlbdebug.h> | 13 | #include <asm/tlbdebug.h> |
| 14 | #include <asm/mmu_context.h> | ||
| 15 | 14 | ||
| 16 | static inline const char *msk2str(unsigned int mask) | 15 | static inline const char *msk2str(unsigned int mask) |
| 17 | { | 16 | { |
| @@ -56,7 +55,7 @@ static void dump_tlb(int first, int last) | |||
| 56 | s_pagemask = read_c0_pagemask(); | 55 | s_pagemask = read_c0_pagemask(); |
| 57 | s_entryhi = read_c0_entryhi(); | 56 | s_entryhi = read_c0_entryhi(); |
| 58 | s_index = read_c0_index(); | 57 | s_index = read_c0_index(); |
| 59 | asid = ASID_MASK(s_entryhi); | 58 | asid = s_entryhi & 0xff; |
| 60 | 59 | ||
| 61 | for (i = first; i <= last; i++) { | 60 | for (i = first; i <= last; i++) { |
| 62 | write_c0_index(i); | 61 | write_c0_index(i); |
| @@ -86,7 +85,7 @@ static void dump_tlb(int first, int last) | |||
| 86 | 85 | ||
| 87 | printk("va=%0*lx asid=%02lx\n", | 86 | printk("va=%0*lx asid=%02lx\n", |
| 88 | width, (entryhi & ~0x1fffUL), | 87 | width, (entryhi & ~0x1fffUL), |
| 89 | ASID_MASK(entryhi)); | 88 | entryhi & 0xff); |
| 90 | printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ", | 89 | printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ", |
| 91 | width, | 90 | width, |
| 92 | (entrylo0 << 6) & PAGE_MASK, c0, | 91 | (entrylo0 << 6) & PAGE_MASK, c0, |
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c index 8327698b9937..91615c2ef0cf 100644 --- a/arch/mips/lib/r3k_dump_tlb.c +++ b/arch/mips/lib/r3k_dump_tlb.c | |||
| @@ -9,7 +9,6 @@ | |||
| 9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
| 10 | 10 | ||
| 11 | #include <asm/mipsregs.h> | 11 | #include <asm/mipsregs.h> |
| 12 | #include <asm/mmu_context.h> | ||
| 13 | #include <asm/page.h> | 12 | #include <asm/page.h> |
| 14 | #include <asm/pgtable.h> | 13 | #include <asm/pgtable.h> |
| 15 | #include <asm/tlbdebug.h> | 14 | #include <asm/tlbdebug.h> |
| @@ -22,7 +21,7 @@ static void dump_tlb(int first, int last) | |||
| 22 | unsigned int asid; | 21 | unsigned int asid; |
| 23 | unsigned long entryhi, entrylo0; | 22 | unsigned long entryhi, entrylo0; |
| 24 | 23 | ||
| 25 | asid = ASID_MASK(read_c0_entryhi()); | 24 | asid = read_c0_entryhi() & 0xfc0; |
| 26 | 25 | ||
| 27 | for (i = first; i <= last; i++) { | 26 | for (i = first; i <= last; i++) { |
| 28 | write_c0_index(i<<8); | 27 | write_c0_index(i<<8); |
| @@ -36,7 +35,7 @@ static void dump_tlb(int first, int last) | |||
| 36 | 35 | ||
| 37 | /* Unused entries have a virtual address of KSEG0. */ | 36 | /* Unused entries have a virtual address of KSEG0. */ |
| 38 | if ((entryhi & 0xffffe000) != 0x80000000 | 37 | if ((entryhi & 0xffffe000) != 0x80000000 |
| 39 | && (ASID_MASK(entryhi) == asid)) { | 38 | && (entryhi & 0xfc0) == asid) { |
| 40 | /* | 39 | /* |
| 41 | * Only print entries in use | 40 | * Only print entries in use |
| 42 | */ | 41 | */ |
| @@ -45,7 +44,7 @@ static void dump_tlb(int first, int last) | |||
| 45 | printk("va=%08lx asid=%08lx" | 44 | printk("va=%08lx asid=%08lx" |
| 46 | " [pa=%06lx n=%d d=%d v=%d g=%d]", | 45 | " [pa=%06lx n=%d d=%d v=%d g=%d]", |
| 47 | (entryhi & 0xffffe000), | 46 | (entryhi & 0xffffe000), |
| 48 | ASID_MASK(entryhi), | 47 | entryhi & 0xfc0, |
| 49 | entrylo0 & PAGE_MASK, | 48 | entrylo0 & PAGE_MASK, |
| 50 | (entrylo0 & (1 << 11)) ? 1 : 0, | 49 | (entrylo0 & (1 << 11)) ? 1 : 0, |
| 51 | (entrylo0 & (1 << 10)) ? 1 : 0, | 50 | (entrylo0 & (1 << 10)) ? 1 : 0, |
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c index 4a13c150f31b..a63d1ed0827f 100644 --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c | |||
| @@ -51,7 +51,7 @@ void local_flush_tlb_all(void) | |||
| 51 | #endif | 51 | #endif |
| 52 | 52 | ||
| 53 | local_irq_save(flags); | 53 | local_irq_save(flags); |
| 54 | old_ctx = ASID_MASK(read_c0_entryhi()); | 54 | old_ctx = read_c0_entryhi() & ASID_MASK; |
| 55 | write_c0_entrylo0(0); | 55 | write_c0_entrylo0(0); |
| 56 | entry = r3k_have_wired_reg ? read_c0_wired() : 8; | 56 | entry = r3k_have_wired_reg ? read_c0_wired() : 8; |
| 57 | for (; entry < current_cpu_data.tlbsize; entry++) { | 57 | for (; entry < current_cpu_data.tlbsize; entry++) { |
| @@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
| 87 | 87 | ||
| 88 | #ifdef DEBUG_TLB | 88 | #ifdef DEBUG_TLB |
| 89 | printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", | 89 | printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", |
| 90 | ASID_MASK(cpu_context(cpu, mm)), start, end); | 90 | cpu_context(cpu, mm) & ASID_MASK, start, end); |
| 91 | #endif | 91 | #endif |
| 92 | local_irq_save(flags); | 92 | local_irq_save(flags); |
| 93 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 93 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
| 94 | if (size <= current_cpu_data.tlbsize) { | 94 | if (size <= current_cpu_data.tlbsize) { |
| 95 | int oldpid = ASID_MASK(read_c0_entryhi()); | 95 | int oldpid = read_c0_entryhi() & ASID_MASK; |
| 96 | int newpid = ASID_MASK(cpu_context(cpu, mm)); | 96 | int newpid = cpu_context(cpu, mm) & ASID_MASK; |
| 97 | 97 | ||
| 98 | start &= PAGE_MASK; | 98 | start &= PAGE_MASK; |
| 99 | end += PAGE_SIZE - 1; | 99 | end += PAGE_SIZE - 1; |
| @@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
| 166 | #ifdef DEBUG_TLB | 166 | #ifdef DEBUG_TLB |
| 167 | printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); | 167 | printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); |
| 168 | #endif | 168 | #endif |
| 169 | newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm)); | 169 | newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; |
| 170 | page &= PAGE_MASK; | 170 | page &= PAGE_MASK; |
| 171 | local_irq_save(flags); | 171 | local_irq_save(flags); |
| 172 | oldpid = ASID_MASK(read_c0_entryhi()); | 172 | oldpid = read_c0_entryhi() & ASID_MASK; |
| 173 | write_c0_entryhi(page | newpid); | 173 | write_c0_entryhi(page | newpid); |
| 174 | BARRIER; | 174 | BARRIER; |
| 175 | tlb_probe(); | 175 | tlb_probe(); |
| @@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) | |||
| 197 | if (current->active_mm != vma->vm_mm) | 197 | if (current->active_mm != vma->vm_mm) |
| 198 | return; | 198 | return; |
| 199 | 199 | ||
| 200 | pid = ASID_MASK(read_c0_entryhi()); | 200 | pid = read_c0_entryhi() & ASID_MASK; |
| 201 | 201 | ||
| 202 | #ifdef DEBUG_TLB | 202 | #ifdef DEBUG_TLB |
| 203 | if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) { | 203 | if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { |
| 204 | printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", | 204 | printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", |
| 205 | (cpu_context(cpu, vma->vm_mm)), pid); | 205 | (cpu_context(cpu, vma->vm_mm)), pid); |
| 206 | } | 206 | } |
| @@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
| 241 | 241 | ||
| 242 | local_irq_save(flags); | 242 | local_irq_save(flags); |
| 243 | /* Save old context and create impossible VPN2 value */ | 243 | /* Save old context and create impossible VPN2 value */ |
| 244 | old_ctx = ASID_MASK(read_c0_entryhi()); | 244 | old_ctx = read_c0_entryhi() & ASID_MASK; |
| 245 | old_pagemask = read_c0_pagemask(); | 245 | old_pagemask = read_c0_pagemask(); |
| 246 | w = read_c0_wired(); | 246 | w = read_c0_wired(); |
| 247 | write_c0_wired(w + 1); | 247 | write_c0_wired(w + 1); |
| @@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
| 264 | #endif | 264 | #endif |
| 265 | 265 | ||
| 266 | local_irq_save(flags); | 266 | local_irq_save(flags); |
| 267 | old_ctx = ASID_MASK(read_c0_entryhi()); | 267 | old_ctx = read_c0_entryhi() & ASID_MASK; |
| 268 | write_c0_entrylo0(entrylo0); | 268 | write_c0_entrylo0(entrylo0); |
| 269 | write_c0_entryhi(entryhi); | 269 | write_c0_entryhi(entryhi); |
| 270 | write_c0_index(wired); | 270 | write_c0_index(wired); |
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 09653b290d53..c643de4c473a 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c | |||
| @@ -287,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
| 287 | 287 | ||
| 288 | ENTER_CRITICAL(flags); | 288 | ENTER_CRITICAL(flags); |
| 289 | 289 | ||
| 290 | pid = ASID_MASK(read_c0_entryhi()); | 290 | pid = read_c0_entryhi() & ASID_MASK; |
| 291 | address &= (PAGE_MASK << 1); | 291 | address &= (PAGE_MASK << 1); |
| 292 | write_c0_entryhi(address | pid); | 292 | write_c0_entryhi(address | pid); |
| 293 | pgdp = pgd_offset(vma->vm_mm, address); | 293 | pgdp = pgd_offset(vma->vm_mm, address); |
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c index 122f9207f49e..91c2499f806a 100644 --- a/arch/mips/mm/tlb-r8k.c +++ b/arch/mips/mm/tlb-r8k.c | |||
| @@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
| 195 | if (current->active_mm != vma->vm_mm) | 195 | if (current->active_mm != vma->vm_mm) |
| 196 | return; | 196 | return; |
| 197 | 197 | ||
| 198 | pid = ASID_MASK(read_c0_entryhi()); | 198 | pid = read_c0_entryhi() & ASID_MASK; |
| 199 | 199 | ||
| 200 | local_irq_save(flags); | 200 | local_irq_save(flags); |
| 201 | address &= PAGE_MASK; | 201 | address &= PAGE_MASK; |
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 4d46d3787576..ce9818eef7d3 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
| @@ -29,7 +29,6 @@ | |||
| 29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
| 30 | #include <linux/cache.h> | 30 | #include <linux/cache.h> |
| 31 | 31 | ||
| 32 | #include <asm/mmu_context.h> | ||
| 33 | #include <asm/cacheflush.h> | 32 | #include <asm/cacheflush.h> |
| 34 | #include <asm/pgtable.h> | 33 | #include <asm/pgtable.h> |
| 35 | #include <asm/war.h> | 34 | #include <asm/war.h> |
| @@ -306,78 +305,6 @@ static struct uasm_reloc relocs[128] __cpuinitdata; | |||
| 306 | static int check_for_high_segbits __cpuinitdata; | 305 | static int check_for_high_segbits __cpuinitdata; |
| 307 | #endif | 306 | #endif |
| 308 | 307 | ||
| 309 | static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop, | ||
| 310 | unsigned int i_const) | ||
| 311 | { | ||
| 312 | unsigned int **p; | ||
| 313 | |||
| 314 | for (p = start; p < stop; p++) { | ||
| 315 | #ifndef CONFIG_CPU_MICROMIPS | ||
| 316 | unsigned int *ip; | ||
| 317 | |||
| 318 | ip = *p; | ||
| 319 | *ip = (*ip & 0xffff0000) | i_const; | ||
| 320 | #else | ||
| 321 | unsigned short *ip; | ||
| 322 | |||
| 323 | ip = ((unsigned short *)((unsigned int)*p - 1)); | ||
| 324 | if ((*ip & 0xf000) == 0x4000) { | ||
| 325 | *ip &= 0xfff1; | ||
| 326 | *ip |= (i_const << 1); | ||
| 327 | } else if ((*ip & 0xf000) == 0x6000) { | ||
| 328 | *ip &= 0xfff1; | ||
| 329 | *ip |= ((i_const >> 2) << 1); | ||
| 330 | } else { | ||
| 331 | ip++; | ||
| 332 | *ip = i_const; | ||
| 333 | } | ||
| 334 | #endif | ||
| 335 | local_flush_icache_range((unsigned long)ip, | ||
| 336 | (unsigned long)ip + sizeof(*ip)); | ||
| 337 | } | ||
| 338 | } | ||
| 339 | |||
| 340 | #define asid_insn_fixup(section, const) \ | ||
| 341 | do { \ | ||
| 342 | extern unsigned int *__start_ ## section; \ | ||
| 343 | extern unsigned int *__stop_ ## section; \ | ||
| 344 | insn_fixup(&__start_ ## section, &__stop_ ## section, const); \ | ||
| 345 | } while(0) | ||
| 346 | |||
| 347 | /* | ||
| 348 | * Caller is assumed to flush the caches before the first context switch. | ||
| 349 | */ | ||
| 350 | static void __cpuinit setup_asid(unsigned int inc, unsigned int mask, | ||
| 351 | unsigned int version_mask, | ||
| 352 | unsigned int first_version) | ||
| 353 | { | ||
| 354 | extern asmlinkage void handle_ri_rdhwr_vivt(void); | ||
| 355 | unsigned long *vivt_exc; | ||
| 356 | |||
| 357 | #ifdef CONFIG_CPU_MICROMIPS | ||
| 358 | /* | ||
| 359 | * Worst case optimised microMIPS addiu instructions support | ||
| 360 | * only a 3-bit immediate value. | ||
| 361 | */ | ||
| 362 | if(inc > 7) | ||
| 363 | panic("Invalid ASID increment value!"); | ||
| 364 | #endif | ||
| 365 | asid_insn_fixup(__asid_inc, inc); | ||
| 366 | asid_insn_fixup(__asid_mask, mask); | ||
| 367 | asid_insn_fixup(__asid_version_mask, version_mask); | ||
| 368 | asid_insn_fixup(__asid_first_version, first_version); | ||
| 369 | |||
| 370 | /* Patch up the 'handle_ri_rdhwr_vivt' handler. */ | ||
| 371 | vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt; | ||
| 372 | #ifdef CONFIG_CPU_MICROMIPS | ||
| 373 | vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1); | ||
| 374 | #endif | ||
| 375 | vivt_exc++; | ||
| 376 | *vivt_exc = (*vivt_exc & ~mask) | mask; | ||
| 377 | |||
| 378 | current_cpu_data.asid_cache = first_version; | ||
| 379 | } | ||
| 380 | |||
| 381 | static int check_for_high_segbits __cpuinitdata; | 308 | static int check_for_high_segbits __cpuinitdata; |
| 382 | 309 | ||
| 383 | static unsigned int kscratch_used_mask __cpuinitdata; | 310 | static unsigned int kscratch_used_mask __cpuinitdata; |
| @@ -2256,7 +2183,6 @@ void __cpuinit build_tlb_refill_handler(void) | |||
| 2256 | case CPU_TX3922: | 2183 | case CPU_TX3922: |
| 2257 | case CPU_TX3927: | 2184 | case CPU_TX3927: |
| 2258 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | 2185 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
| 2259 | setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000); | ||
| 2260 | if (cpu_has_local_ebase) | 2186 | if (cpu_has_local_ebase) |
| 2261 | build_r3000_tlb_refill_handler(); | 2187 | build_r3000_tlb_refill_handler(); |
| 2262 | if (!run_once) { | 2188 | if (!run_once) { |
| @@ -2282,11 +2208,6 @@ void __cpuinit build_tlb_refill_handler(void) | |||
| 2282 | break; | 2208 | break; |
| 2283 | 2209 | ||
| 2284 | default: | 2210 | default: |
| 2285 | #ifndef CONFIG_MIPS_MT_SMTC | ||
| 2286 | setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000); | ||
| 2287 | #else | ||
| 2288 | setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000); | ||
| 2289 | #endif | ||
| 2290 | if (!run_once) { | 2211 | if (!run_once) { |
| 2291 | scratch_reg = allocate_kscratch(); | 2212 | scratch_reg = allocate_kscratch(); |
| 2292 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT | 2213 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT |
diff --git a/arch/mips/pmcs-msp71xx/msp_prom.c b/arch/mips/pmcs-msp71xx/msp_prom.c index 0edb89a63516..1c9897531660 100644 --- a/arch/mips/pmcs-msp71xx/msp_prom.c +++ b/arch/mips/pmcs-msp71xx/msp_prom.c | |||
| @@ -83,7 +83,7 @@ static inline unsigned char str2hexnum(unsigned char c) | |||
| 83 | return 0; /* foo */ | 83 | return 0; /* foo */ |
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | static inline int str2eaddr(unsigned char *ea, unsigned char *str) | 86 | int str2eaddr(unsigned char *ea, unsigned char *str) |
| 87 | { | 87 | { |
| 88 | int index = 0; | 88 | int index = 0; |
| 89 | unsigned char num = 0; | 89 | unsigned char num = 0; |
diff --git a/arch/mips/ralink/dts/rt3050.dtsi b/arch/mips/ralink/dts/rt3050.dtsi index ef7da1e227e6..e3203d414fee 100644 --- a/arch/mips/ralink/dts/rt3050.dtsi +++ b/arch/mips/ralink/dts/rt3050.dtsi | |||
| @@ -55,4 +55,14 @@ | |||
| 55 | reg-shift = <2>; | 55 | reg-shift = <2>; |
| 56 | }; | 56 | }; |
| 57 | }; | 57 | }; |
| 58 | |||
| 59 | usb@101c0000 { | ||
| 60 | compatible = "ralink,rt3050-usb", "snps,dwc2"; | ||
| 61 | reg = <0x101c0000 40000>; | ||
| 62 | |||
| 63 | interrupt-parent = <&intc>; | ||
| 64 | interrupts = <18>; | ||
| 65 | |||
| 66 | status = "disabled"; | ||
| 67 | }; | ||
| 58 | }; | 68 | }; |
diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts index c18c9a84f4c4..0ac73ea28198 100644 --- a/arch/mips/ralink/dts/rt3052_eval.dts +++ b/arch/mips/ralink/dts/rt3052_eval.dts | |||
| @@ -43,4 +43,8 @@ | |||
| 43 | reg = <0x50000 0x7b0000>; | 43 | reg = <0x50000 0x7b0000>; |
| 44 | }; | 44 | }; |
| 45 | }; | 45 | }; |
| 46 | |||
| 47 | usb@101c0000 { | ||
| 48 | status = "ok"; | ||
| 49 | }; | ||
| 46 | }; | 50 | }; |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index cad060f288cf..6507dabdd5dd 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
| @@ -245,7 +245,7 @@ config SMP | |||
| 245 | 245 | ||
| 246 | config IRQSTACKS | 246 | config IRQSTACKS |
| 247 | bool "Use separate kernel stacks when processing interrupts" | 247 | bool "Use separate kernel stacks when processing interrupts" |
| 248 | default n | 248 | default y |
| 249 | help | 249 | help |
| 250 | If you say Y here the kernel will use separate kernel stacks | 250 | If you say Y here the kernel will use separate kernel stacks |
| 251 | for handling hard and soft interrupts. This can help avoid | 251 | for handling hard and soft interrupts. This can help avoid |
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 2f967cc6649e..197690068f88 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile | |||
| @@ -23,24 +23,21 @@ NM = sh $(srctree)/arch/parisc/nm | |||
| 23 | CHECKFLAGS += -D__hppa__=1 | 23 | CHECKFLAGS += -D__hppa__=1 |
| 24 | LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) | 24 | LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) |
| 25 | 25 | ||
| 26 | MACHINE := $(shell uname -m) | ||
| 27 | NATIVE := $(if $(filter parisc%,$(MACHINE)),1,0) | ||
| 28 | |||
| 29 | ifdef CONFIG_64BIT | 26 | ifdef CONFIG_64BIT |
| 30 | UTS_MACHINE := parisc64 | 27 | UTS_MACHINE := parisc64 |
| 31 | CHECKFLAGS += -D__LP64__=1 -m64 | 28 | CHECKFLAGS += -D__LP64__=1 -m64 |
| 32 | WIDTH := 64 | 29 | CC_ARCHES = hppa64 |
| 33 | else # 32-bit | 30 | else # 32-bit |
| 34 | WIDTH := | 31 | CC_ARCHES = hppa hppa2.0 hppa1.1 |
| 35 | endif | 32 | endif |
| 36 | 33 | ||
| 37 | # attempt to help out folks who are cross-compiling | 34 | ifneq ($(SUBARCH),$(UTS_MACHINE)) |
| 38 | ifeq ($(NATIVE),1) | 35 | ifeq ($(CROSS_COMPILE),) |
| 39 | CROSS_COMPILE := hppa$(WIDTH)-linux- | 36 | CC_SUFFIXES = linux linux-gnu unknown-linux-gnu |
| 40 | else | 37 | CROSS_COMPILE := $(call cc-cross-prefix, \ |
| 41 | ifeq ($(CROSS_COMPILE),) | 38 | $(foreach a,$(CC_ARCHES), \ |
| 42 | CROSS_COMPILE := hppa$(WIDTH)-linux-gnu- | 39 | $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-))) |
| 43 | endif | 40 | endif |
| 44 | endif | 41 | endif |
| 45 | 42 | ||
| 46 | OBJCOPY_FLAGS =-O binary -R .note -R .comment -S | 43 | OBJCOPY_FLAGS =-O binary -R .note -R .comment -S |
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h index 12373c4dabab..c19f7138ba48 100644 --- a/arch/parisc/include/asm/hardirq.h +++ b/arch/parisc/include/asm/hardirq.h | |||
| @@ -11,10 +11,18 @@ | |||
| 11 | #include <linux/threads.h> | 11 | #include <linux/threads.h> |
| 12 | #include <linux/irq.h> | 12 | #include <linux/irq.h> |
| 13 | 13 | ||
| 14 | #ifdef CONFIG_IRQSTACKS | ||
| 15 | #define __ARCH_HAS_DO_SOFTIRQ | ||
| 16 | #endif | ||
| 17 | |||
| 14 | typedef struct { | 18 | typedef struct { |
| 15 | unsigned int __softirq_pending; | 19 | unsigned int __softirq_pending; |
| 16 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 20 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
| 17 | unsigned int kernel_stack_usage; | 21 | unsigned int kernel_stack_usage; |
| 22 | #ifdef CONFIG_IRQSTACKS | ||
| 23 | unsigned int irq_stack_usage; | ||
| 24 | unsigned int irq_stack_counter; | ||
| 25 | #endif | ||
| 18 | #endif | 26 | #endif |
| 19 | #ifdef CONFIG_SMP | 27 | #ifdef CONFIG_SMP |
| 20 | unsigned int irq_resched_count; | 28 | unsigned int irq_resched_count; |
| @@ -28,6 +36,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); | |||
| 28 | #define __ARCH_IRQ_STAT | 36 | #define __ARCH_IRQ_STAT |
| 29 | #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) | 37 | #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) |
| 30 | #define inc_irq_stat(member) this_cpu_inc(irq_stat.member) | 38 | #define inc_irq_stat(member) this_cpu_inc(irq_stat.member) |
| 39 | #define __inc_irq_stat(member) __this_cpu_inc(irq_stat.member) | ||
| 31 | #define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending) | 40 | #define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending) |
| 32 | 41 | ||
| 33 | #define __ARCH_SET_SOFTIRQ_PENDING | 42 | #define __ARCH_SET_SOFTIRQ_PENDING |
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 064015547d1e..cfbc43929cf6 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h | |||
| @@ -63,10 +63,13 @@ | |||
| 63 | */ | 63 | */ |
| 64 | #ifdef __KERNEL__ | 64 | #ifdef __KERNEL__ |
| 65 | 65 | ||
| 66 | #include <linux/spinlock_types.h> | ||
| 67 | |||
| 66 | #define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */ | 68 | #define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */ |
| 67 | 69 | ||
| 68 | union irq_stack_union { | 70 | union irq_stack_union { |
| 69 | unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; | 71 | unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; |
| 72 | raw_spinlock_t lock; | ||
| 70 | }; | 73 | }; |
| 71 | 74 | ||
| 72 | DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); | 75 | DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); |
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 4bb96ad9b0b1..ae27cb6ce19a 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
| @@ -452,9 +452,41 @@ | |||
| 452 | L2_ptep \pgd,\pte,\index,\va,\fault | 452 | L2_ptep \pgd,\pte,\index,\va,\fault |
| 453 | .endm | 453 | .endm |
| 454 | 454 | ||
| 455 | /* Acquire pa_dbit_lock lock. */ | ||
| 456 | .macro dbit_lock spc,tmp,tmp1 | ||
| 457 | #ifdef CONFIG_SMP | ||
| 458 | cmpib,COND(=),n 0,\spc,2f | ||
| 459 | load32 PA(pa_dbit_lock),\tmp | ||
| 460 | 1: LDCW 0(\tmp),\tmp1 | ||
| 461 | cmpib,COND(=) 0,\tmp1,1b | ||
| 462 | nop | ||
| 463 | 2: | ||
| 464 | #endif | ||
| 465 | .endm | ||
| 466 | |||
| 467 | /* Release pa_dbit_lock lock without reloading lock address. */ | ||
| 468 | .macro dbit_unlock0 spc,tmp | ||
| 469 | #ifdef CONFIG_SMP | ||
| 470 | or,COND(=) %r0,\spc,%r0 | ||
| 471 | stw \spc,0(\tmp) | ||
| 472 | #endif | ||
| 473 | .endm | ||
| 474 | |||
| 475 | /* Release pa_dbit_lock lock. */ | ||
| 476 | .macro dbit_unlock1 spc,tmp | ||
| 477 | #ifdef CONFIG_SMP | ||
| 478 | load32 PA(pa_dbit_lock),\tmp | ||
| 479 | dbit_unlock0 \spc,\tmp | ||
| 480 | #endif | ||
| 481 | .endm | ||
| 482 | |||
| 455 | /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and | 483 | /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and |
| 456 | * don't needlessly dirty the cache line if it was already set */ | 484 | * don't needlessly dirty the cache line if it was already set */ |
| 457 | .macro update_ptep ptep,pte,tmp,tmp1 | 485 | .macro update_ptep spc,ptep,pte,tmp,tmp1 |
| 486 | #ifdef CONFIG_SMP | ||
| 487 | or,COND(=) %r0,\spc,%r0 | ||
| 488 | LDREG 0(\ptep),\pte | ||
| 489 | #endif | ||
| 458 | ldi _PAGE_ACCESSED,\tmp1 | 490 | ldi _PAGE_ACCESSED,\tmp1 |
| 459 | or \tmp1,\pte,\tmp | 491 | or \tmp1,\pte,\tmp |
| 460 | and,COND(<>) \tmp1,\pte,%r0 | 492 | and,COND(<>) \tmp1,\pte,%r0 |
| @@ -463,7 +495,11 @@ | |||
| 463 | 495 | ||
| 464 | /* Set the dirty bit (and accessed bit). No need to be | 496 | /* Set the dirty bit (and accessed bit). No need to be |
| 465 | * clever, this is only used from the dirty fault */ | 497 | * clever, this is only used from the dirty fault */ |
| 466 | .macro update_dirty ptep,pte,tmp | 498 | .macro update_dirty spc,ptep,pte,tmp |
| 499 | #ifdef CONFIG_SMP | ||
| 500 | or,COND(=) %r0,\spc,%r0 | ||
| 501 | LDREG 0(\ptep),\pte | ||
| 502 | #endif | ||
| 467 | ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp | 503 | ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp |
| 468 | or \tmp,\pte,\pte | 504 | or \tmp,\pte,\pte |
| 469 | STREG \pte,0(\ptep) | 505 | STREG \pte,0(\ptep) |
| @@ -1111,11 +1147,13 @@ dtlb_miss_20w: | |||
| 1111 | 1147 | ||
| 1112 | L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w | 1148 | L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w |
| 1113 | 1149 | ||
| 1114 | update_ptep ptp,pte,t0,t1 | 1150 | dbit_lock spc,t0,t1 |
| 1151 | update_ptep spc,ptp,pte,t0,t1 | ||
| 1115 | 1152 | ||
| 1116 | make_insert_tlb spc,pte,prot | 1153 | make_insert_tlb spc,pte,prot |
| 1117 | 1154 | ||
| 1118 | idtlbt pte,prot | 1155 | idtlbt pte,prot |
| 1156 | dbit_unlock1 spc,t0 | ||
| 1119 | 1157 | ||
| 1120 | rfir | 1158 | rfir |
| 1121 | nop | 1159 | nop |
| @@ -1135,11 +1173,13 @@ nadtlb_miss_20w: | |||
| 1135 | 1173 | ||
| 1136 | L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w | 1174 | L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w |
| 1137 | 1175 | ||
| 1138 | update_ptep ptp,pte,t0,t1 | 1176 | dbit_lock spc,t0,t1 |
| 1177 | update_ptep spc,ptp,pte,t0,t1 | ||
| 1139 | 1178 | ||
| 1140 | make_insert_tlb spc,pte,prot | 1179 | make_insert_tlb spc,pte,prot |
| 1141 | 1180 | ||
| 1142 | idtlbt pte,prot | 1181 | idtlbt pte,prot |
| 1182 | dbit_unlock1 spc,t0 | ||
| 1143 | 1183 | ||
| 1144 | rfir | 1184 | rfir |
| 1145 | nop | 1185 | nop |
| @@ -1161,7 +1201,8 @@ dtlb_miss_11: | |||
| 1161 | 1201 | ||
| 1162 | L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 | 1202 | L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 |
| 1163 | 1203 | ||
| 1164 | update_ptep ptp,pte,t0,t1 | 1204 | dbit_lock spc,t0,t1 |
| 1205 | update_ptep spc,ptp,pte,t0,t1 | ||
| 1165 | 1206 | ||
| 1166 | make_insert_tlb_11 spc,pte,prot | 1207 | make_insert_tlb_11 spc,pte,prot |
| 1167 | 1208 | ||
| @@ -1172,6 +1213,7 @@ dtlb_miss_11: | |||
| 1172 | idtlbp prot,(%sr1,va) | 1213 | idtlbp prot,(%sr1,va) |
| 1173 | 1214 | ||
| 1174 | mtsp t0, %sr1 /* Restore sr1 */ | 1215 | mtsp t0, %sr1 /* Restore sr1 */ |
| 1216 | dbit_unlock1 spc,t0 | ||
| 1175 | 1217 | ||
| 1176 | rfir | 1218 | rfir |
| 1177 | nop | 1219 | nop |
| @@ -1192,7 +1234,8 @@ nadtlb_miss_11: | |||
| 1192 | 1234 | ||
| 1193 | L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 | 1235 | L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 |
| 1194 | 1236 | ||
| 1195 | update_ptep ptp,pte,t0,t1 | 1237 | dbit_lock spc,t0,t1 |
| 1238 | update_ptep spc,ptp,pte,t0,t1 | ||
| 1196 | 1239 | ||
| 1197 | make_insert_tlb_11 spc,pte,prot | 1240 | make_insert_tlb_11 spc,pte,prot |
| 1198 | 1241 | ||
| @@ -1204,6 +1247,7 @@ nadtlb_miss_11: | |||
| 1204 | idtlbp prot,(%sr1,va) | 1247 | idtlbp prot,(%sr1,va) |
| 1205 | 1248 | ||
| 1206 | mtsp t0, %sr1 /* Restore sr1 */ | 1249 | mtsp t0, %sr1 /* Restore sr1 */ |
| 1250 | dbit_unlock1 spc,t0 | ||
| 1207 | 1251 | ||
| 1208 | rfir | 1252 | rfir |
| 1209 | nop | 1253 | nop |
| @@ -1224,13 +1268,15 @@ dtlb_miss_20: | |||
| 1224 | 1268 | ||
| 1225 | L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 | 1269 | L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 |
| 1226 | 1270 | ||
| 1227 | update_ptep ptp,pte,t0,t1 | 1271 | dbit_lock spc,t0,t1 |
| 1272 | update_ptep spc,ptp,pte,t0,t1 | ||
| 1228 | 1273 | ||
| 1229 | make_insert_tlb spc,pte,prot | 1274 | make_insert_tlb spc,pte,prot |
| 1230 | 1275 | ||
| 1231 | f_extend pte,t0 | 1276 | f_extend pte,t0 |
| 1232 | 1277 | ||
| 1233 | idtlbt pte,prot | 1278 | idtlbt pte,prot |
| 1279 | dbit_unlock1 spc,t0 | ||
| 1234 | 1280 | ||
| 1235 | rfir | 1281 | rfir |
| 1236 | nop | 1282 | nop |
| @@ -1250,13 +1296,15 @@ nadtlb_miss_20: | |||
| 1250 | 1296 | ||
| 1251 | L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 | 1297 | L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 |
| 1252 | 1298 | ||
| 1253 | update_ptep ptp,pte,t0,t1 | 1299 | dbit_lock spc,t0,t1 |
| 1300 | update_ptep spc,ptp,pte,t0,t1 | ||
| 1254 | 1301 | ||
| 1255 | make_insert_tlb spc,pte,prot | 1302 | make_insert_tlb spc,pte,prot |
| 1256 | 1303 | ||
| 1257 | f_extend pte,t0 | 1304 | f_extend pte,t0 |
| 1258 | 1305 | ||
| 1259 | idtlbt pte,prot | 1306 | idtlbt pte,prot |
| 1307 | dbit_unlock1 spc,t0 | ||
| 1260 | 1308 | ||
| 1261 | rfir | 1309 | rfir |
| 1262 | nop | 1310 | nop |
| @@ -1357,11 +1405,13 @@ itlb_miss_20w: | |||
| 1357 | 1405 | ||
| 1358 | L3_ptep ptp,pte,t0,va,itlb_fault | 1406 | L3_ptep ptp,pte,t0,va,itlb_fault |
| 1359 | 1407 | ||
| 1360 | update_ptep ptp,pte,t0,t1 | 1408 | dbit_lock spc,t0,t1 |
| 1409 | update_ptep spc,ptp,pte,t0,t1 | ||
| 1361 | 1410 | ||
| 1362 | make_insert_tlb spc,pte,prot | 1411 | make_insert_tlb spc,pte,prot |
| 1363 | 1412 | ||
| 1364 | iitlbt pte,prot | 1413 | iitlbt pte,prot |
| 1414 | dbit_unlock1 spc,t0 | ||
| 1365 | 1415 | ||
| 1366 | rfir | 1416 | rfir |
| 1367 | nop | 1417 | nop |
| @@ -1379,11 +1429,13 @@ naitlb_miss_20w: | |||
| 1379 | 1429 | ||
| 1380 | L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w | 1430 | L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w |
| 1381 | 1431 | ||
| 1382 | update_ptep ptp,pte,t0,t1 | 1432 | dbit_lock spc,t0,t1 |
| 1433 | update_ptep spc,ptp,pte,t0,t1 | ||
| 1383 | 1434 | ||
| 1384 | make_insert_tlb spc,pte,prot | 1435 | make_insert_tlb spc,pte,prot |
| 1385 | 1436 | ||
| 1386 | iitlbt pte,prot | 1437 | iitlbt pte,prot |
| 1438 | dbit_unlock1 spc,t0 | ||
| 1387 | 1439 | ||
| 1388 | rfir | 1440 | rfir |
| 1389 | nop | 1441 | nop |
| @@ -1405,7 +1457,8 @@ itlb_miss_11: | |||
| 1405 | 1457 | ||
| 1406 | L2_ptep ptp,pte,t0,va,itlb_fault | 1458 | L2_ptep ptp,pte,t0,va,itlb_fault |
| 1407 | 1459 | ||
| 1408 | update_ptep ptp,pte,t0,t1 | 1460 | dbit_lock spc,t0,t1 |
| 1461 | update_ptep spc,ptp,pte,t0,t1 | ||
| 1409 | 1462 | ||
| 1410 | make_insert_tlb_11 spc,pte,prot | 1463 | make_insert_tlb_11 spc,pte,prot |
| 1411 | 1464 | ||
| @@ -1416,6 +1469,7 @@ itlb_miss_11: | |||
| 1416 | iitlbp prot,(%sr1,va) | 1469 | iitlbp prot,(%sr1,va) |
| 1417 | 1470 | ||
| 1418 | mtsp t0, %sr1 /* Restore sr1 */ | 1471 | mtsp t0, %sr1 /* Restore sr1 */ |
| 1472 | dbit_unlock1 spc,t0 | ||
| 1419 | 1473 | ||
| 1420 | rfir | 1474 | rfir |
| 1421 | nop | 1475 | nop |
| @@ -1427,7 +1481,8 @@ naitlb_miss_11: | |||
| 1427 | 1481 | ||
| 1428 | L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 | 1482 | L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 |
| 1429 | 1483 | ||
| 1430 | update_ptep ptp,pte,t0,t1 | 1484 | dbit_lock spc,t0,t1 |
| 1485 | update_ptep spc,ptp,pte,t0,t1 | ||
| 1431 | 1486 | ||
| 1432 | make_insert_tlb_11 spc,pte,prot | 1487 | make_insert_tlb_11 spc,pte,prot |
| 1433 | 1488 | ||
| @@ -1438,6 +1493,7 @@ naitlb_miss_11: | |||
| 1438 | iitlbp prot,(%sr1,va) | 1493 | iitlbp prot,(%sr1,va) |
| 1439 | 1494 | ||
| 1440 | mtsp t0, %sr1 /* Restore sr1 */ | 1495 | mtsp t0, %sr1 /* Restore sr1 */ |
| 1496 | dbit_unlock1 spc,t0 | ||
| 1441 | 1497 | ||
| 1442 | rfir | 1498 | rfir |
| 1443 | nop | 1499 | nop |
| @@ -1459,13 +1515,15 @@ itlb_miss_20: | |||
| 1459 | 1515 | ||
| 1460 | L2_ptep ptp,pte,t0,va,itlb_fault | 1516 | L2_ptep ptp,pte,t0,va,itlb_fault |
| 1461 | 1517 | ||
| 1462 | update_ptep ptp,pte,t0,t1 | 1518 | dbit_lock spc,t0,t1 |
| 1519 | update_ptep spc,ptp,pte,t0,t1 | ||
| 1463 | 1520 | ||
| 1464 | make_insert_tlb spc,pte,prot | 1521 | make_insert_tlb spc,pte,prot |
| 1465 | 1522 | ||
| 1466 | f_extend pte,t0 | 1523 | f_extend pte,t0 |
| 1467 | 1524 | ||
| 1468 | iitlbt pte,prot | 1525 | iitlbt pte,prot |
| 1526 | dbit_unlock1 spc,t0 | ||
| 1469 | 1527 | ||
| 1470 | rfir | 1528 | rfir |
| 1471 | nop | 1529 | nop |
| @@ -1477,13 +1535,15 @@ naitlb_miss_20: | |||
| 1477 | 1535 | ||
| 1478 | L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 | 1536 | L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 |
| 1479 | 1537 | ||
| 1480 | update_ptep ptp,pte,t0,t1 | 1538 | dbit_lock spc,t0,t1 |
| 1539 | update_ptep spc,ptp,pte,t0,t1 | ||
| 1481 | 1540 | ||
| 1482 | make_insert_tlb spc,pte,prot | 1541 | make_insert_tlb spc,pte,prot |
| 1483 | 1542 | ||
| 1484 | f_extend pte,t0 | 1543 | f_extend pte,t0 |
| 1485 | 1544 | ||
| 1486 | iitlbt pte,prot | 1545 | iitlbt pte,prot |
| 1546 | dbit_unlock1 spc,t0 | ||
| 1487 | 1547 | ||
| 1488 | rfir | 1548 | rfir |
| 1489 | nop | 1549 | nop |
| @@ -1507,29 +1567,13 @@ dbit_trap_20w: | |||
| 1507 | 1567 | ||
| 1508 | L3_ptep ptp,pte,t0,va,dbit_fault | 1568 | L3_ptep ptp,pte,t0,va,dbit_fault |
| 1509 | 1569 | ||
| 1510 | #ifdef CONFIG_SMP | 1570 | dbit_lock spc,t0,t1 |
| 1511 | cmpib,COND(=),n 0,spc,dbit_nolock_20w | 1571 | update_dirty spc,ptp,pte,t1 |
| 1512 | load32 PA(pa_dbit_lock),t0 | ||
| 1513 | |||
| 1514 | dbit_spin_20w: | ||
| 1515 | LDCW 0(t0),t1 | ||
| 1516 | cmpib,COND(=) 0,t1,dbit_spin_20w | ||
| 1517 | nop | ||
| 1518 | |||
| 1519 | dbit_nolock_20w: | ||
| 1520 | #endif | ||
| 1521 | update_dirty ptp,pte,t1 | ||
| 1522 | 1572 | ||
| 1523 | make_insert_tlb spc,pte,prot | 1573 | make_insert_tlb spc,pte,prot |
| 1524 | 1574 | ||
| 1525 | idtlbt pte,prot | 1575 | idtlbt pte,prot |
| 1526 | #ifdef CONFIG_SMP | 1576 | dbit_unlock0 spc,t0 |
| 1527 | cmpib,COND(=),n 0,spc,dbit_nounlock_20w | ||
| 1528 | ldi 1,t1 | ||
| 1529 | stw t1,0(t0) | ||
| 1530 | |||
| 1531 | dbit_nounlock_20w: | ||
| 1532 | #endif | ||
| 1533 | 1577 | ||
| 1534 | rfir | 1578 | rfir |
| 1535 | nop | 1579 | nop |
| @@ -1543,18 +1587,8 @@ dbit_trap_11: | |||
| 1543 | 1587 | ||
| 1544 | L2_ptep ptp,pte,t0,va,dbit_fault | 1588 | L2_ptep ptp,pte,t0,va,dbit_fault |
| 1545 | 1589 | ||
| 1546 | #ifdef CONFIG_SMP | 1590 | dbit_lock spc,t0,t1 |
| 1547 | cmpib,COND(=),n 0,spc,dbit_nolock_11 | 1591 | update_dirty spc,ptp,pte,t1 |
| 1548 | load32 PA(pa_dbit_lock),t0 | ||
| 1549 | |||
| 1550 | dbit_spin_11: | ||
| 1551 | LDCW 0(t0),t1 | ||
| 1552 | cmpib,= 0,t1,dbit_spin_11 | ||
| 1553 | nop | ||
| 1554 | |||
| 1555 | dbit_nolock_11: | ||
| 1556 | #endif | ||
| 1557 | update_dirty ptp,pte,t1 | ||
| 1558 | 1592 | ||
| 1559 | make_insert_tlb_11 spc,pte,prot | 1593 | make_insert_tlb_11 spc,pte,prot |
| 1560 | 1594 | ||
| @@ -1565,13 +1599,7 @@ dbit_nolock_11: | |||
| 1565 | idtlbp prot,(%sr1,va) | 1599 | idtlbp prot,(%sr1,va) |
| 1566 | 1600 | ||
| 1567 | mtsp t1, %sr1 /* Restore sr1 */ | 1601 | mtsp t1, %sr1 /* Restore sr1 */ |
| 1568 | #ifdef CONFIG_SMP | 1602 | dbit_unlock0 spc,t0 |
| 1569 | cmpib,COND(=),n 0,spc,dbit_nounlock_11 | ||
| 1570 | ldi 1,t1 | ||
| 1571 | stw t1,0(t0) | ||
| 1572 | |||
| 1573 | dbit_nounlock_11: | ||
| 1574 | #endif | ||
| 1575 | 1603 | ||
| 1576 | rfir | 1604 | rfir |
| 1577 | nop | 1605 | nop |
| @@ -1583,32 +1611,15 @@ dbit_trap_20: | |||
| 1583 | 1611 | ||
| 1584 | L2_ptep ptp,pte,t0,va,dbit_fault | 1612 | L2_ptep ptp,pte,t0,va,dbit_fault |
| 1585 | 1613 | ||
| 1586 | #ifdef CONFIG_SMP | 1614 | dbit_lock spc,t0,t1 |
| 1587 | cmpib,COND(=),n 0,spc,dbit_nolock_20 | 1615 | update_dirty spc,ptp,pte,t1 |
| 1588 | load32 PA(pa_dbit_lock),t0 | ||
| 1589 | |||
| 1590 | dbit_spin_20: | ||
| 1591 | LDCW 0(t0),t1 | ||
| 1592 | cmpib,= 0,t1,dbit_spin_20 | ||
| 1593 | nop | ||
| 1594 | |||
| 1595 | dbit_nolock_20: | ||
| 1596 | #endif | ||
| 1597 | update_dirty ptp,pte,t1 | ||
| 1598 | 1616 | ||
| 1599 | make_insert_tlb spc,pte,prot | 1617 | make_insert_tlb spc,pte,prot |
| 1600 | 1618 | ||
| 1601 | f_extend pte,t1 | 1619 | f_extend pte,t1 |
| 1602 | 1620 | ||
| 1603 | idtlbt pte,prot | 1621 | idtlbt pte,prot |
| 1604 | 1622 | dbit_unlock0 spc,t0 | |
| 1605 | #ifdef CONFIG_SMP | ||
| 1606 | cmpib,COND(=),n 0,spc,dbit_nounlock_20 | ||
| 1607 | ldi 1,t1 | ||
| 1608 | stw t1,0(t0) | ||
| 1609 | |||
| 1610 | dbit_nounlock_20: | ||
| 1611 | #endif | ||
| 1612 | 1623 | ||
| 1613 | rfir | 1624 | rfir |
| 1614 | nop | 1625 | nop |
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index e255db0bb761..55237a70e197 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
| @@ -166,22 +166,32 @@ int arch_show_interrupts(struct seq_file *p, int prec) | |||
| 166 | seq_printf(p, "%*s: ", prec, "STK"); | 166 | seq_printf(p, "%*s: ", prec, "STK"); |
| 167 | for_each_online_cpu(j) | 167 | for_each_online_cpu(j) |
| 168 | seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage); | 168 | seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage); |
| 169 | seq_printf(p, " Kernel stack usage\n"); | 169 | seq_puts(p, " Kernel stack usage\n"); |
| 170 | # ifdef CONFIG_IRQSTACKS | ||
| 171 | seq_printf(p, "%*s: ", prec, "IST"); | ||
| 172 | for_each_online_cpu(j) | ||
| 173 | seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage); | ||
| 174 | seq_puts(p, " Interrupt stack usage\n"); | ||
| 175 | seq_printf(p, "%*s: ", prec, "ISC"); | ||
| 176 | for_each_online_cpu(j) | ||
| 177 | seq_printf(p, "%10u ", irq_stats(j)->irq_stack_counter); | ||
| 178 | seq_puts(p, " Interrupt stack usage counter\n"); | ||
| 179 | # endif | ||
| 170 | #endif | 180 | #endif |
| 171 | #ifdef CONFIG_SMP | 181 | #ifdef CONFIG_SMP |
| 172 | seq_printf(p, "%*s: ", prec, "RES"); | 182 | seq_printf(p, "%*s: ", prec, "RES"); |
| 173 | for_each_online_cpu(j) | 183 | for_each_online_cpu(j) |
| 174 | seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); | 184 | seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); |
| 175 | seq_printf(p, " Rescheduling interrupts\n"); | 185 | seq_puts(p, " Rescheduling interrupts\n"); |
| 176 | seq_printf(p, "%*s: ", prec, "CAL"); | 186 | seq_printf(p, "%*s: ", prec, "CAL"); |
| 177 | for_each_online_cpu(j) | 187 | for_each_online_cpu(j) |
| 178 | seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); | 188 | seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); |
| 179 | seq_printf(p, " Function call interrupts\n"); | 189 | seq_puts(p, " Function call interrupts\n"); |
| 180 | #endif | 190 | #endif |
| 181 | seq_printf(p, "%*s: ", prec, "TLB"); | 191 | seq_printf(p, "%*s: ", prec, "TLB"); |
| 182 | for_each_online_cpu(j) | 192 | for_each_online_cpu(j) |
| 183 | seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); | 193 | seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); |
| 184 | seq_printf(p, " TLB shootdowns\n"); | 194 | seq_puts(p, " TLB shootdowns\n"); |
| 185 | return 0; | 195 | return 0; |
| 186 | } | 196 | } |
| 187 | 197 | ||
| @@ -378,6 +388,7 @@ static inline void stack_overflow_check(struct pt_regs *regs) | |||
| 378 | unsigned long sp = regs->gr[30]; | 388 | unsigned long sp = regs->gr[30]; |
| 379 | unsigned long stack_usage; | 389 | unsigned long stack_usage; |
| 380 | unsigned int *last_usage; | 390 | unsigned int *last_usage; |
| 391 | int cpu = smp_processor_id(); | ||
| 381 | 392 | ||
| 382 | /* if sr7 != 0, we interrupted a userspace process which we do not want | 393 | /* if sr7 != 0, we interrupted a userspace process which we do not want |
| 383 | * to check for stack overflow. We will only check the kernel stack. */ | 394 | * to check for stack overflow. We will only check the kernel stack. */ |
| @@ -386,7 +397,31 @@ static inline void stack_overflow_check(struct pt_regs *regs) | |||
| 386 | 397 | ||
| 387 | /* calculate kernel stack usage */ | 398 | /* calculate kernel stack usage */ |
| 388 | stack_usage = sp - stack_start; | 399 | stack_usage = sp - stack_start; |
| 389 | last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id()); | 400 | #ifdef CONFIG_IRQSTACKS |
| 401 | if (likely(stack_usage <= THREAD_SIZE)) | ||
| 402 | goto check_kernel_stack; /* found kernel stack */ | ||
| 403 | |||
| 404 | /* check irq stack usage */ | ||
| 405 | stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack; | ||
| 406 | stack_usage = sp - stack_start; | ||
| 407 | |||
| 408 | last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu); | ||
| 409 | if (unlikely(stack_usage > *last_usage)) | ||
| 410 | *last_usage = stack_usage; | ||
| 411 | |||
| 412 | if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN))) | ||
| 413 | return; | ||
| 414 | |||
| 415 | pr_emerg("stackcheck: %s will most likely overflow irq stack " | ||
| 416 | "(sp:%lx, stk bottom-top:%lx-%lx)\n", | ||
| 417 | current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE); | ||
| 418 | goto panic_check; | ||
| 419 | |||
| 420 | check_kernel_stack: | ||
| 421 | #endif | ||
| 422 | |||
| 423 | /* check kernel stack usage */ | ||
| 424 | last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu); | ||
| 390 | 425 | ||
| 391 | if (unlikely(stack_usage > *last_usage)) | 426 | if (unlikely(stack_usage > *last_usage)) |
| 392 | *last_usage = stack_usage; | 427 | *last_usage = stack_usage; |
| @@ -398,31 +433,69 @@ static inline void stack_overflow_check(struct pt_regs *regs) | |||
| 398 | "(sp:%lx, stk bottom-top:%lx-%lx)\n", | 433 | "(sp:%lx, stk bottom-top:%lx-%lx)\n", |
| 399 | current->comm, sp, stack_start, stack_start + THREAD_SIZE); | 434 | current->comm, sp, stack_start, stack_start + THREAD_SIZE); |
| 400 | 435 | ||
| 436 | #ifdef CONFIG_IRQSTACKS | ||
| 437 | panic_check: | ||
| 438 | #endif | ||
| 401 | if (sysctl_panic_on_stackoverflow) | 439 | if (sysctl_panic_on_stackoverflow) |
| 402 | panic("low stack detected by irq handler - check messages\n"); | 440 | panic("low stack detected by irq handler - check messages\n"); |
| 403 | #endif | 441 | #endif |
| 404 | } | 442 | } |
| 405 | 443 | ||
| 406 | #ifdef CONFIG_IRQSTACKS | 444 | #ifdef CONFIG_IRQSTACKS |
| 407 | DEFINE_PER_CPU(union irq_stack_union, irq_stack_union); | 445 | DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = { |
| 446 | .lock = __RAW_SPIN_LOCK_UNLOCKED((irq_stack_union).lock) | ||
| 447 | }; | ||
| 408 | 448 | ||
| 409 | static void execute_on_irq_stack(void *func, unsigned long param1) | 449 | static void execute_on_irq_stack(void *func, unsigned long param1) |
| 410 | { | 450 | { |
| 411 | unsigned long *irq_stack_start; | 451 | union irq_stack_union *union_ptr; |
| 412 | unsigned long irq_stack; | 452 | unsigned long irq_stack; |
| 413 | int cpu = smp_processor_id(); | 453 | raw_spinlock_t *irq_stack_in_use; |
| 414 | 454 | ||
| 415 | irq_stack_start = &per_cpu(irq_stack_union, cpu).stack[0]; | 455 | union_ptr = &per_cpu(irq_stack_union, smp_processor_id()); |
| 416 | irq_stack = (unsigned long) irq_stack_start; | 456 | irq_stack = (unsigned long) &union_ptr->stack; |
| 417 | irq_stack = ALIGN(irq_stack, 16); /* align for stack frame usage */ | 457 | irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.lock), |
| 458 | 64); /* align for stack frame usage */ | ||
| 418 | 459 | ||
| 419 | BUG_ON(*irq_stack_start); /* report bug if we were called recursive. */ | 460 | /* We may be called recursive. If we are already using the irq stack, |
| 420 | *irq_stack_start = 1; | 461 | * just continue to use it. Use spinlocks to serialize |
| 462 | * the irq stack usage. | ||
| 463 | */ | ||
| 464 | irq_stack_in_use = &union_ptr->lock; | ||
| 465 | if (!raw_spin_trylock(irq_stack_in_use)) { | ||
| 466 | void (*direct_call)(unsigned long p1) = func; | ||
| 467 | |||
| 468 | /* We are using the IRQ stack already. | ||
| 469 | * Do direct call on current stack. */ | ||
| 470 | direct_call(param1); | ||
| 471 | return; | ||
| 472 | } | ||
| 421 | 473 | ||
| 422 | /* This is where we switch to the IRQ stack. */ | 474 | /* This is where we switch to the IRQ stack. */ |
| 423 | call_on_stack(param1, func, irq_stack); | 475 | call_on_stack(param1, func, irq_stack); |
| 424 | 476 | ||
| 425 | *irq_stack_start = 0; | 477 | __inc_irq_stat(irq_stack_counter); |
| 478 | |||
| 479 | /* free up irq stack usage. */ | ||
| 480 | do_raw_spin_unlock(irq_stack_in_use); | ||
| 481 | } | ||
| 482 | |||
| 483 | asmlinkage void do_softirq(void) | ||
| 484 | { | ||
| 485 | __u32 pending; | ||
| 486 | unsigned long flags; | ||
| 487 | |||
| 488 | if (in_interrupt()) | ||
| 489 | return; | ||
| 490 | |||
| 491 | local_irq_save(flags); | ||
| 492 | |||
| 493 | pending = local_softirq_pending(); | ||
| 494 | |||
| 495 | if (pending) | ||
| 496 | execute_on_irq_stack(__do_softirq, 0); | ||
| 497 | |||
| 498 | local_irq_restore(flags); | ||
| 426 | } | 499 | } |
| 427 | #endif /* CONFIG_IRQSTACKS */ | 500 | #endif /* CONFIG_IRQSTACKS */ |
| 428 | 501 | ||
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index ce939ac8622b..1c965642068b 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
| @@ -1069,7 +1069,7 @@ void flush_tlb_all(void) | |||
| 1069 | { | 1069 | { |
| 1070 | int do_recycle; | 1070 | int do_recycle; |
| 1071 | 1071 | ||
| 1072 | inc_irq_stat(irq_tlb_count); | 1072 | __inc_irq_stat(irq_tlb_count); |
| 1073 | do_recycle = 0; | 1073 | do_recycle = 0; |
| 1074 | spin_lock(&sid_lock); | 1074 | spin_lock(&sid_lock); |
| 1075 | if (dirty_space_ids > RECYCLE_THRESHOLD) { | 1075 | if (dirty_space_ids > RECYCLE_THRESHOLD) { |
| @@ -1090,7 +1090,7 @@ void flush_tlb_all(void) | |||
| 1090 | #else | 1090 | #else |
| 1091 | void flush_tlb_all(void) | 1091 | void flush_tlb_all(void) |
| 1092 | { | 1092 | { |
| 1093 | inc_irq_stat(irq_tlb_count); | 1093 | __inc_irq_stat(irq_tlb_count); |
| 1094 | spin_lock(&sid_lock); | 1094 | spin_lock(&sid_lock); |
| 1095 | flush_tlb_all_local(NULL); | 1095 | flush_tlb_all_local(NULL); |
| 1096 | recycle_sids(); | 1096 | recycle_sids(); |
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 5416e28a7538..863d877e0b5f 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug | |||
| @@ -262,8 +262,31 @@ config PPC_EARLY_DEBUG_OPAL_HVSI | |||
| 262 | Select this to enable early debugging for the PowerNV platform | 262 | Select this to enable early debugging for the PowerNV platform |
| 263 | using an "hvsi" console | 263 | using an "hvsi" console |
| 264 | 264 | ||
| 265 | config PPC_EARLY_DEBUG_MEMCONS | ||
| 266 | bool "In memory console" | ||
| 267 | help | ||
| 268 | Select this to enable early debugging using an in memory console. | ||
| 269 | This console provides input and output buffers stored within the | ||
| 270 | kernel BSS and should be safe to select on any system. A debugger | ||
| 271 | can then be used to read kernel output or send input to the console. | ||
| 265 | endchoice | 272 | endchoice |
| 266 | 273 | ||
| 274 | config PPC_MEMCONS_OUTPUT_SIZE | ||
| 275 | int "In memory console output buffer size" | ||
| 276 | depends on PPC_EARLY_DEBUG_MEMCONS | ||
| 277 | default 4096 | ||
| 278 | help | ||
| 279 | Selects the size of the output buffer (in bytes) of the in memory | ||
| 280 | console. | ||
| 281 | |||
| 282 | config PPC_MEMCONS_INPUT_SIZE | ||
| 283 | int "In memory console input buffer size" | ||
| 284 | depends on PPC_EARLY_DEBUG_MEMCONS | ||
| 285 | default 128 | ||
| 286 | help | ||
| 287 | Selects the size of the input buffer (in bytes) of the in memory | ||
| 288 | console. | ||
| 289 | |||
| 267 | config PPC_EARLY_DEBUG_OPAL | 290 | config PPC_EARLY_DEBUG_OPAL |
| 268 | def_bool y | 291 | def_bool y |
| 269 | depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI | 292 | depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI |
diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h new file mode 100644 index 000000000000..b6f5a33b8ee2 --- /dev/null +++ b/arch/powerpc/include/asm/context_tracking.h | |||
| @@ -0,0 +1,10 @@ | |||
| 1 | #ifndef _ASM_POWERPC_CONTEXT_TRACKING_H | ||
| 2 | #define _ASM_POWERPC_CONTEXT_TRACKING_H | ||
| 3 | |||
| 4 | #ifdef CONFIG_CONTEXT_TRACKING | ||
| 5 | #define SCHEDULE_USER bl .schedule_user | ||
| 6 | #else | ||
| 7 | #define SCHEDULE_USER bl .schedule | ||
| 8 | #endif | ||
| 9 | |||
| 10 | #endif | ||
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index 0df54646f968..681bc0314b6b 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h | |||
| @@ -52,6 +52,7 @@ | |||
| 52 | #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000) | 52 | #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000) |
| 53 | #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) | 53 | #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) |
| 54 | #define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) | 54 | #define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) |
| 55 | #define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000) | ||
| 55 | 56 | ||
| 56 | #ifndef __ASSEMBLY__ | 57 | #ifndef __ASSEMBLY__ |
| 57 | 58 | ||
| @@ -69,7 +70,8 @@ enum { | |||
| 69 | FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY | | 70 | FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY | |
| 70 | FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN, | 71 | FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN, |
| 71 | FW_FEATURE_PSERIES_ALWAYS = 0, | 72 | FW_FEATURE_PSERIES_ALWAYS = 0, |
| 72 | FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2, | 73 | FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 | |
| 74 | FW_FEATURE_OPALv3, | ||
| 73 | FW_FEATURE_POWERNV_ALWAYS = 0, | 75 | FW_FEATURE_POWERNV_ALWAYS = 0, |
| 74 | FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, | 76 | FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, |
| 75 | FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, | 77 | FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, |
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index d615b28dda82..ba713f166fa5 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h | |||
| @@ -96,11 +96,12 @@ static inline bool arch_irqs_disabled(void) | |||
| 96 | #endif | 96 | #endif |
| 97 | 97 | ||
| 98 | #define hard_irq_disable() do { \ | 98 | #define hard_irq_disable() do { \ |
| 99 | u8 _was_enabled = get_paca()->soft_enabled; \ | ||
| 99 | __hard_irq_disable(); \ | 100 | __hard_irq_disable(); \ |
| 100 | if (local_paca->soft_enabled) \ | ||
| 101 | trace_hardirqs_off(); \ | ||
| 102 | get_paca()->soft_enabled = 0; \ | 101 | get_paca()->soft_enabled = 0; \ |
| 103 | get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \ | 102 | get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \ |
| 103 | if (_was_enabled) \ | ||
| 104 | trace_hardirqs_off(); \ | ||
| 104 | } while(0) | 105 | } while(0) |
| 105 | 106 | ||
| 106 | static inline bool lazy_irq_pending(void) | 107 | static inline bool lazy_irq_pending(void) |
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index b6c8b58b1d76..cbb9305ab15a 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h | |||
| @@ -243,7 +243,8 @@ enum OpalMCE_TlbErrorType { | |||
| 243 | 243 | ||
| 244 | enum OpalThreadStatus { | 244 | enum OpalThreadStatus { |
| 245 | OPAL_THREAD_INACTIVE = 0x0, | 245 | OPAL_THREAD_INACTIVE = 0x0, |
| 246 | OPAL_THREAD_STARTED = 0x1 | 246 | OPAL_THREAD_STARTED = 0x1, |
| 247 | OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */ | ||
| 247 | }; | 248 | }; |
| 248 | 249 | ||
| 249 | enum OpalPciBusCompare { | 250 | enum OpalPciBusCompare { |
| @@ -563,6 +564,8 @@ extern void opal_nvram_init(void); | |||
| 563 | 564 | ||
| 564 | extern int opal_machine_check(struct pt_regs *regs); | 565 | extern int opal_machine_check(struct pt_regs *regs); |
| 565 | 566 | ||
| 567 | extern void opal_shutdown(void); | ||
| 568 | |||
| 566 | #endif /* __ASSEMBLY__ */ | 569 | #endif /* __ASSEMBLY__ */ |
| 567 | 570 | ||
| 568 | #endif /* __OPAL_H */ | 571 | #endif /* __OPAL_H */ |
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 91acb12bac92..b66ae722a8e9 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h | |||
| @@ -186,7 +186,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | |||
| 186 | 186 | ||
| 187 | static inline pgtable_t pmd_pgtable(pmd_t pmd) | 187 | static inline pgtable_t pmd_pgtable(pmd_t pmd) |
| 188 | { | 188 | { |
| 189 | return (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE); | 189 | return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS); |
| 190 | } | 190 | } |
| 191 | 191 | ||
| 192 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 192 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index 3e13e23e4fdf..d836d945068d 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h | |||
| @@ -47,7 +47,7 @@ | |||
| 47 | * generic accessors and iterators here | 47 | * generic accessors and iterators here |
| 48 | */ | 48 | */ |
| 49 | #define __real_pte(e,p) ((real_pte_t) { \ | 49 | #define __real_pte(e,p) ((real_pte_t) { \ |
| 50 | (e), ((e) & _PAGE_COMBO) ? \ | 50 | (e), (pte_val(e) & _PAGE_COMBO) ? \ |
| 51 | (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) | 51 | (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) |
| 52 | #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ | 52 | #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ |
| 53 | (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) | 53 | (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) |
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index a8bc2bb4adc9..34fd70488d83 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h | |||
| @@ -264,6 +264,8 @@ extern void rtas_progress(char *s, unsigned short hex); | |||
| 264 | extern void rtas_initialize(void); | 264 | extern void rtas_initialize(void); |
| 265 | extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); | 265 | extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); |
| 266 | extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); | 266 | extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); |
| 267 | extern int rtas_online_cpus_mask(cpumask_var_t cpus); | ||
| 268 | extern int rtas_offline_cpus_mask(cpumask_var_t cpus); | ||
| 267 | extern int rtas_ibm_suspend_me(struct rtas_args *); | 269 | extern int rtas_ibm_suspend_me(struct rtas_args *); |
| 268 | 270 | ||
| 269 | struct rtc_time; | 271 | struct rtc_time; |
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 8ceea14d6fe4..ba7b1973866e 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h | |||
| @@ -97,7 +97,7 @@ static inline struct thread_info *current_thread_info(void) | |||
| 97 | #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ | 97 | #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ |
| 98 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | 98 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
| 99 | #define TIF_SINGLESTEP 8 /* singlestepping active */ | 99 | #define TIF_SINGLESTEP 8 /* singlestepping active */ |
| 100 | #define TIF_MEMDIE 9 /* is terminating due to OOM killer */ | 100 | #define TIF_NOHZ 9 /* in adaptive nohz mode */ |
| 101 | #define TIF_SECCOMP 10 /* secure computing */ | 101 | #define TIF_SECCOMP 10 /* secure computing */ |
| 102 | #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ | 102 | #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ |
| 103 | #define TIF_NOERROR 12 /* Force successful syscall return */ | 103 | #define TIF_NOERROR 12 /* Force successful syscall return */ |
| @@ -106,6 +106,7 @@ static inline struct thread_info *current_thread_info(void) | |||
| 106 | #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ | 106 | #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ |
| 107 | #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation | 107 | #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation |
| 108 | for stack store? */ | 108 | for stack store? */ |
| 109 | #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ | ||
| 109 | 110 | ||
| 110 | /* as above, but as bit values */ | 111 | /* as above, but as bit values */ |
| 111 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 112 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
| @@ -124,8 +125,10 @@ static inline struct thread_info *current_thread_info(void) | |||
| 124 | #define _TIF_UPROBE (1<<TIF_UPROBE) | 125 | #define _TIF_UPROBE (1<<TIF_UPROBE) |
| 125 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) | 126 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) |
| 126 | #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) | 127 | #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) |
| 128 | #define _TIF_NOHZ (1<<TIF_NOHZ) | ||
| 127 | #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ | 129 | #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ |
| 128 | _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) | 130 | _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ |
| 131 | _TIF_NOHZ) | ||
| 129 | 132 | ||
| 130 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ | 133 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ |
| 131 | _TIF_NOTIFY_RESUME | _TIF_UPROBE) | 134 | _TIF_NOTIFY_RESUME | _TIF_UPROBE) |
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h index 5a7510e9d09d..dc590919f8eb 100644 --- a/arch/powerpc/include/asm/udbg.h +++ b/arch/powerpc/include/asm/udbg.h | |||
| @@ -52,6 +52,7 @@ extern void __init udbg_init_40x_realmode(void); | |||
| 52 | extern void __init udbg_init_cpm(void); | 52 | extern void __init udbg_init_cpm(void); |
| 53 | extern void __init udbg_init_usbgecko(void); | 53 | extern void __init udbg_init_usbgecko(void); |
| 54 | extern void __init udbg_init_wsp(void); | 54 | extern void __init udbg_init_wsp(void); |
| 55 | extern void __init udbg_init_memcons(void); | ||
| 55 | extern void __init udbg_init_ehv_bc(void); | 56 | extern void __init udbg_init_ehv_bc(void); |
| 56 | extern void __init udbg_init_ps3gelic(void); | 57 | extern void __init udbg_init_ps3gelic(void); |
| 57 | extern void __init udbg_init_debug_opal_raw(void); | 58 | extern void __init udbg_init_debug_opal_raw(void); |
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index e514de57a125..d22e73e4618b 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S | |||
| @@ -439,8 +439,6 @@ ret_from_fork: | |||
| 439 | ret_from_kernel_thread: | 439 | ret_from_kernel_thread: |
| 440 | REST_NVGPRS(r1) | 440 | REST_NVGPRS(r1) |
| 441 | bl schedule_tail | 441 | bl schedule_tail |
| 442 | li r3,0 | ||
| 443 | stw r3,0(r1) | ||
| 444 | mtlr r14 | 442 | mtlr r14 |
| 445 | mr r3,r15 | 443 | mr r3,r15 |
| 446 | PPC440EP_ERR42 | 444 | PPC440EP_ERR42 |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 915fbb4fc2fe..51cfb8fc301f 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <asm/irqflags.h> | 33 | #include <asm/irqflags.h> |
| 34 | #include <asm/ftrace.h> | 34 | #include <asm/ftrace.h> |
| 35 | #include <asm/hw_irq.h> | 35 | #include <asm/hw_irq.h> |
| 36 | #include <asm/context_tracking.h> | ||
| 36 | 37 | ||
| 37 | /* | 38 | /* |
| 38 | * System calls. | 39 | * System calls. |
| @@ -376,8 +377,6 @@ _GLOBAL(ret_from_fork) | |||
| 376 | _GLOBAL(ret_from_kernel_thread) | 377 | _GLOBAL(ret_from_kernel_thread) |
| 377 | bl .schedule_tail | 378 | bl .schedule_tail |
| 378 | REST_NVGPRS(r1) | 379 | REST_NVGPRS(r1) |
| 379 | li r3,0 | ||
| 380 | std r3,0(r1) | ||
| 381 | ld r14, 0(r14) | 380 | ld r14, 0(r14) |
| 382 | mtlr r14 | 381 | mtlr r14 |
| 383 | mr r3,r15 | 382 | mr r3,r15 |
| @@ -634,7 +633,7 @@ _GLOBAL(ret_from_except_lite) | |||
| 634 | andi. r0,r4,_TIF_NEED_RESCHED | 633 | andi. r0,r4,_TIF_NEED_RESCHED |
| 635 | beq 1f | 634 | beq 1f |
| 636 | bl .restore_interrupts | 635 | bl .restore_interrupts |
| 637 | bl .schedule | 636 | SCHEDULE_USER |
| 638 | b .ret_from_except_lite | 637 | b .ret_from_except_lite |
| 639 | 638 | ||
| 640 | 1: bl .save_nvgprs | 639 | 1: bl .save_nvgprs |
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 42a756eec9ff..645170a07ada 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S | |||
| @@ -489,7 +489,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
| 489 | */ | 489 | */ |
| 490 | 490 | ||
| 491 | mfspr r14,SPRN_DBSR /* check single-step/branch taken */ | 491 | mfspr r14,SPRN_DBSR /* check single-step/branch taken */ |
| 492 | andis. r15,r14,DBSR_IC@h | 492 | andis. r15,r14,(DBSR_IC|DBSR_BT)@h |
| 493 | beq+ 1f | 493 | beq+ 1f |
| 494 | 494 | ||
| 495 | LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) | 495 | LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) |
| @@ -500,7 +500,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
| 500 | bge+ cr1,1f | 500 | bge+ cr1,1f |
| 501 | 501 | ||
| 502 | /* here it looks like we got an inappropriate debug exception. */ | 502 | /* here it looks like we got an inappropriate debug exception. */ |
| 503 | lis r14,DBSR_IC@h /* clear the IC event */ | 503 | lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ |
| 504 | rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */ | 504 | rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */ |
| 505 | mtspr SPRN_DBSR,r14 | 505 | mtspr SPRN_DBSR,r14 |
| 506 | mtspr SPRN_CSRR1,r11 | 506 | mtspr SPRN_CSRR1,r11 |
| @@ -555,7 +555,7 @@ kernel_dbg_exc: | |||
| 555 | */ | 555 | */ |
| 556 | 556 | ||
| 557 | mfspr r14,SPRN_DBSR /* check single-step/branch taken */ | 557 | mfspr r14,SPRN_DBSR /* check single-step/branch taken */ |
| 558 | andis. r15,r14,DBSR_IC@h | 558 | andis. r15,r14,(DBSR_IC|DBSR_BT)@h |
| 559 | beq+ 1f | 559 | beq+ 1f |
| 560 | 560 | ||
| 561 | LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) | 561 | LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) |
| @@ -566,7 +566,7 @@ kernel_dbg_exc: | |||
| 566 | bge+ cr1,1f | 566 | bge+ cr1,1f |
| 567 | 567 | ||
| 568 | /* here it looks like we got an inappropriate debug exception. */ | 568 | /* here it looks like we got an inappropriate debug exception. */ |
| 569 | lis r14,DBSR_IC@h /* clear the IC event */ | 569 | lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ |
| 570 | rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */ | 570 | rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */ |
| 571 | mtspr SPRN_DBSR,r14 | 571 | mtspr SPRN_DBSR,r14 |
| 572 | mtspr SPRN_DSRR1,r11 | 572 | mtspr SPRN_DSRR1,r11 |
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 466a2908bb63..611acdf30096 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
| 18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
| 19 | #include <linux/cpu.h> | 19 | #include <linux/cpu.h> |
| 20 | #include <linux/hardirq.h> | ||
| 20 | 21 | ||
| 21 | #include <asm/page.h> | 22 | #include <asm/page.h> |
| 22 | #include <asm/current.h> | 23 | #include <asm/current.h> |
| @@ -335,10 +336,13 @@ void default_machine_kexec(struct kimage *image) | |||
| 335 | pr_debug("kexec: Starting switchover sequence.\n"); | 336 | pr_debug("kexec: Starting switchover sequence.\n"); |
| 336 | 337 | ||
| 337 | /* switch to a staticly allocated stack. Based on irq stack code. | 338 | /* switch to a staticly allocated stack. Based on irq stack code. |
| 339 | * We setup preempt_count to avoid using VMX in memcpy. | ||
| 338 | * XXX: the task struct will likely be invalid once we do the copy! | 340 | * XXX: the task struct will likely be invalid once we do the copy! |
| 339 | */ | 341 | */ |
| 340 | kexec_stack.thread_info.task = current_thread_info()->task; | 342 | kexec_stack.thread_info.task = current_thread_info()->task; |
| 341 | kexec_stack.thread_info.flags = 0; | 343 | kexec_stack.thread_info.flags = 0; |
| 344 | kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET; | ||
| 345 | kexec_stack.thread_info.cpu = current_thread_info()->cpu; | ||
| 342 | 346 | ||
| 343 | /* We need a static PACA, too; copy this CPU's PACA over and switch to | 347 | /* We need a static PACA, too; copy this CPU's PACA over and switch to |
| 344 | * it. Also poison per_cpu_offset to catch anyone using non-static | 348 | * it. Also poison per_cpu_offset to catch anyone using non-static |
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 19e096bd0e73..e469f30e6eeb 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S | |||
| @@ -657,6 +657,17 @@ _GLOBAL(__ucmpdi2) | |||
| 657 | li r3,2 | 657 | li r3,2 |
| 658 | blr | 658 | blr |
| 659 | 659 | ||
| 660 | _GLOBAL(__bswapdi2) | ||
| 661 | rotlwi r9,r4,8 | ||
| 662 | rotlwi r10,r3,8 | ||
| 663 | rlwimi r9,r4,24,0,7 | ||
| 664 | rlwimi r10,r3,24,0,7 | ||
| 665 | rlwimi r9,r4,24,16,23 | ||
| 666 | rlwimi r10,r3,24,16,23 | ||
| 667 | mr r3,r9 | ||
| 668 | mr r4,r10 | ||
| 669 | blr | ||
| 670 | |||
| 660 | _GLOBAL(abs) | 671 | _GLOBAL(abs) |
| 661 | srawi r4,r3,31 | 672 | srawi r4,r3,31 |
| 662 | xor r3,r3,r4 | 673 | xor r3,r3,r4 |
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 5cfa8008693b..6820e45f557b 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S | |||
| @@ -234,6 +234,17 @@ _GLOBAL(__flush_dcache_icache) | |||
| 234 | isync | 234 | isync |
| 235 | blr | 235 | blr |
| 236 | 236 | ||
| 237 | _GLOBAL(__bswapdi2) | ||
| 238 | srdi r8,r3,32 | ||
| 239 | rlwinm r7,r3,8,0xffffffff | ||
| 240 | rlwimi r7,r3,24,0,7 | ||
| 241 | rlwinm r9,r8,8,0xffffffff | ||
| 242 | rlwimi r7,r3,24,16,23 | ||
| 243 | rlwimi r9,r8,24,0,7 | ||
| 244 | rlwimi r9,r8,24,16,23 | ||
| 245 | sldi r7,r7,32 | ||
| 246 | or r3,r7,r9 | ||
| 247 | blr | ||
| 237 | 248 | ||
| 238 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) | 249 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) |
| 239 | /* | 250 | /* |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index f5c5c90799a7..6053f037ef0a 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
| @@ -359,7 +359,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, | |||
| 359 | enum pci_mmap_state mmap_state, | 359 | enum pci_mmap_state mmap_state, |
| 360 | int write_combine) | 360 | int write_combine) |
| 361 | { | 361 | { |
| 362 | unsigned long prot = pgprot_val(protection); | ||
| 363 | 362 | ||
| 364 | /* Write combine is always 0 on non-memory space mappings. On | 363 | /* Write combine is always 0 on non-memory space mappings. On |
| 365 | * memory space, if the user didn't pass 1, we check for a | 364 | * memory space, if the user didn't pass 1, we check for a |
| @@ -376,9 +375,9 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, | |||
| 376 | 375 | ||
| 377 | /* XXX would be nice to have a way to ask for write-through */ | 376 | /* XXX would be nice to have a way to ask for write-through */ |
| 378 | if (write_combine) | 377 | if (write_combine) |
| 379 | return pgprot_noncached_wc(prot); | 378 | return pgprot_noncached_wc(protection); |
| 380 | else | 379 | else |
| 381 | return pgprot_noncached(prot); | 380 | return pgprot_noncached(protection); |
| 382 | } | 381 | } |
| 383 | 382 | ||
| 384 | /* | 383 | /* |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 78b8766fd79e..c29666586998 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
| @@ -143,7 +143,8 @@ EXPORT_SYMBOL(__lshrdi3); | |||
| 143 | int __ucmpdi2(unsigned long long, unsigned long long); | 143 | int __ucmpdi2(unsigned long long, unsigned long long); |
| 144 | EXPORT_SYMBOL(__ucmpdi2); | 144 | EXPORT_SYMBOL(__ucmpdi2); |
| 145 | #endif | 145 | #endif |
| 146 | 146 | long long __bswapdi2(long long); | |
| 147 | EXPORT_SYMBOL(__bswapdi2); | ||
| 147 | EXPORT_SYMBOL(memcpy); | 148 | EXPORT_SYMBOL(memcpy); |
| 148 | EXPORT_SYMBOL(memset); | 149 | EXPORT_SYMBOL(memset); |
| 149 | EXPORT_SYMBOL(memmove); | 150 | EXPORT_SYMBOL(memmove); |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index ceb4e7b62cf4..a902723fdc69 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
| @@ -339,6 +339,13 @@ static void set_debug_reg_defaults(struct thread_struct *thread) | |||
| 339 | 339 | ||
| 340 | static void prime_debug_regs(struct thread_struct *thread) | 340 | static void prime_debug_regs(struct thread_struct *thread) |
| 341 | { | 341 | { |
| 342 | /* | ||
| 343 | * We could have inherited MSR_DE from userspace, since | ||
| 344 | * it doesn't get cleared on exception entry. Make sure | ||
| 345 | * MSR_DE is clear before we enable any debug events. | ||
| 346 | */ | ||
| 347 | mtmsr(mfmsr() & ~MSR_DE); | ||
| 348 | |||
| 342 | mtspr(SPRN_IAC1, thread->iac1); | 349 | mtspr(SPRN_IAC1, thread->iac1); |
| 343 | mtspr(SPRN_IAC2, thread->iac2); | 350 | mtspr(SPRN_IAC2, thread->iac2); |
| 344 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | 351 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 |
| @@ -971,6 +978,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
| 971 | * do some house keeping and then return from the fork or clone | 978 | * do some house keeping and then return from the fork or clone |
| 972 | * system call, using the stack frame created above. | 979 | * system call, using the stack frame created above. |
| 973 | */ | 980 | */ |
| 981 | ((unsigned long *)sp)[0] = 0; | ||
| 974 | sp -= sizeof(struct pt_regs); | 982 | sp -= sizeof(struct pt_regs); |
| 975 | kregs = (struct pt_regs *) sp; | 983 | kregs = (struct pt_regs *) sp; |
| 976 | sp -= STACK_FRAME_OVERHEAD; | 984 | sp -= STACK_FRAME_OVERHEAD; |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 3b14d320e69f..98c2fc198712 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <trace/syscall.h> | 32 | #include <trace/syscall.h> |
| 33 | #include <linux/hw_breakpoint.h> | 33 | #include <linux/hw_breakpoint.h> |
| 34 | #include <linux/perf_event.h> | 34 | #include <linux/perf_event.h> |
| 35 | #include <linux/context_tracking.h> | ||
| 35 | 36 | ||
| 36 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
| 37 | #include <asm/page.h> | 38 | #include <asm/page.h> |
| @@ -1788,6 +1789,8 @@ long do_syscall_trace_enter(struct pt_regs *regs) | |||
| 1788 | { | 1789 | { |
| 1789 | long ret = 0; | 1790 | long ret = 0; |
| 1790 | 1791 | ||
| 1792 | user_exit(); | ||
| 1793 | |||
| 1791 | secure_computing_strict(regs->gpr[0]); | 1794 | secure_computing_strict(regs->gpr[0]); |
| 1792 | 1795 | ||
| 1793 | if (test_thread_flag(TIF_SYSCALL_TRACE) && | 1796 | if (test_thread_flag(TIF_SYSCALL_TRACE) && |
| @@ -1832,4 +1835,6 @@ void do_syscall_trace_leave(struct pt_regs *regs) | |||
| 1832 | step = test_thread_flag(TIF_SINGLESTEP); | 1835 | step = test_thread_flag(TIF_SINGLESTEP); |
| 1833 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) | 1836 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) |
| 1834 | tracehook_report_syscall_exit(regs, step); | 1837 | tracehook_report_syscall_exit(regs, step); |
| 1838 | |||
| 1839 | user_enter(); | ||
| 1835 | } | 1840 | } |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 1fd6e7b2f390..52add6f3e201 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
| 20 | #include <linux/capability.h> | 20 | #include <linux/capability.h> |
| 21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
| 22 | #include <linux/cpu.h> | ||
| 22 | #include <linux/smp.h> | 23 | #include <linux/smp.h> |
| 23 | #include <linux/completion.h> | 24 | #include <linux/completion.h> |
| 24 | #include <linux/cpumask.h> | 25 | #include <linux/cpumask.h> |
| @@ -807,6 +808,95 @@ static void rtas_percpu_suspend_me(void *info) | |||
| 807 | __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); | 808 | __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); |
| 808 | } | 809 | } |
| 809 | 810 | ||
| 811 | enum rtas_cpu_state { | ||
| 812 | DOWN, | ||
| 813 | UP, | ||
| 814 | }; | ||
| 815 | |||
| 816 | #ifndef CONFIG_SMP | ||
| 817 | static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, | ||
| 818 | cpumask_var_t cpus) | ||
| 819 | { | ||
| 820 | if (!cpumask_empty(cpus)) { | ||
| 821 | cpumask_clear(cpus); | ||
| 822 | return -EINVAL; | ||
| 823 | } else | ||
| 824 | return 0; | ||
| 825 | } | ||
| 826 | #else | ||
| 827 | /* On return cpumask will be altered to indicate CPUs changed. | ||
| 828 | * CPUs with states changed will be set in the mask, | ||
| 829 | * CPUs with status unchanged will be unset in the mask. */ | ||
| 830 | static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, | ||
| 831 | cpumask_var_t cpus) | ||
| 832 | { | ||
| 833 | int cpu; | ||
| 834 | int cpuret = 0; | ||
| 835 | int ret = 0; | ||
| 836 | |||
| 837 | if (cpumask_empty(cpus)) | ||
| 838 | return 0; | ||
| 839 | |||
| 840 | for_each_cpu(cpu, cpus) { | ||
| 841 | switch (state) { | ||
| 842 | case DOWN: | ||
| 843 | cpuret = cpu_down(cpu); | ||
| 844 | break; | ||
| 845 | case UP: | ||
| 846 | cpuret = cpu_up(cpu); | ||
| 847 | break; | ||
| 848 | } | ||
| 849 | if (cpuret) { | ||
| 850 | pr_debug("%s: cpu_%s for cpu#%d returned %d.\n", | ||
| 851 | __func__, | ||
| 852 | ((state == UP) ? "up" : "down"), | ||
| 853 | cpu, cpuret); | ||
| 854 | if (!ret) | ||
| 855 | ret = cpuret; | ||
| 856 | if (state == UP) { | ||
| 857 | /* clear bits for unchanged cpus, return */ | ||
| 858 | cpumask_shift_right(cpus, cpus, cpu); | ||
| 859 | cpumask_shift_left(cpus, cpus, cpu); | ||
| 860 | break; | ||
| 861 | } else { | ||
| 862 | /* clear bit for unchanged cpu, continue */ | ||
| 863 | cpumask_clear_cpu(cpu, cpus); | ||
| 864 | } | ||
| 865 | } | ||
| 866 | } | ||
| 867 | |||
| 868 | return ret; | ||
| 869 | } | ||
| 870 | #endif | ||
| 871 | |||
| 872 | int rtas_online_cpus_mask(cpumask_var_t cpus) | ||
| 873 | { | ||
| 874 | int ret; | ||
| 875 | |||
| 876 | ret = rtas_cpu_state_change_mask(UP, cpus); | ||
| 877 | |||
| 878 | if (ret) { | ||
| 879 | cpumask_var_t tmp_mask; | ||
| 880 | |||
| 881 | if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY)) | ||
| 882 | return ret; | ||
| 883 | |||
| 884 | /* Use tmp_mask to preserve cpus mask from first failure */ | ||
| 885 | cpumask_copy(tmp_mask, cpus); | ||
| 886 | rtas_offline_cpus_mask(tmp_mask); | ||
| 887 | free_cpumask_var(tmp_mask); | ||
| 888 | } | ||
| 889 | |||
| 890 | return ret; | ||
| 891 | } | ||
| 892 | EXPORT_SYMBOL(rtas_online_cpus_mask); | ||
| 893 | |||
| 894 | int rtas_offline_cpus_mask(cpumask_var_t cpus) | ||
| 895 | { | ||
| 896 | return rtas_cpu_state_change_mask(DOWN, cpus); | ||
| 897 | } | ||
| 898 | EXPORT_SYMBOL(rtas_offline_cpus_mask); | ||
| 899 | |||
| 810 | int rtas_ibm_suspend_me(struct rtas_args *args) | 900 | int rtas_ibm_suspend_me(struct rtas_args *args) |
| 811 | { | 901 | { |
| 812 | long state; | 902 | long state; |
| @@ -814,6 +904,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args) | |||
| 814 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | 904 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; |
| 815 | struct rtas_suspend_me_data data; | 905 | struct rtas_suspend_me_data data; |
| 816 | DECLARE_COMPLETION_ONSTACK(done); | 906 | DECLARE_COMPLETION_ONSTACK(done); |
| 907 | cpumask_var_t offline_mask; | ||
| 908 | int cpuret; | ||
| 817 | 909 | ||
| 818 | if (!rtas_service_present("ibm,suspend-me")) | 910 | if (!rtas_service_present("ibm,suspend-me")) |
| 819 | return -ENOSYS; | 911 | return -ENOSYS; |
| @@ -837,11 +929,24 @@ int rtas_ibm_suspend_me(struct rtas_args *args) | |||
| 837 | return 0; | 929 | return 0; |
| 838 | } | 930 | } |
| 839 | 931 | ||
| 932 | if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) | ||
| 933 | return -ENOMEM; | ||
| 934 | |||
| 840 | atomic_set(&data.working, 0); | 935 | atomic_set(&data.working, 0); |
| 841 | atomic_set(&data.done, 0); | 936 | atomic_set(&data.done, 0); |
| 842 | atomic_set(&data.error, 0); | 937 | atomic_set(&data.error, 0); |
| 843 | data.token = rtas_token("ibm,suspend-me"); | 938 | data.token = rtas_token("ibm,suspend-me"); |
| 844 | data.complete = &done; | 939 | data.complete = &done; |
| 940 | |||
| 941 | /* All present CPUs must be online */ | ||
| 942 | cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask); | ||
| 943 | cpuret = rtas_online_cpus_mask(offline_mask); | ||
| 944 | if (cpuret) { | ||
| 945 | pr_err("%s: Could not bring present CPUs online.\n", __func__); | ||
| 946 | atomic_set(&data.error, cpuret); | ||
| 947 | goto out; | ||
| 948 | } | ||
| 949 | |||
| 845 | stop_topology_update(); | 950 | stop_topology_update(); |
| 846 | 951 | ||
| 847 | /* Call function on all CPUs. One of us will make the | 952 | /* Call function on all CPUs. One of us will make the |
| @@ -857,6 +962,14 @@ int rtas_ibm_suspend_me(struct rtas_args *args) | |||
| 857 | 962 | ||
| 858 | start_topology_update(); | 963 | start_topology_update(); |
| 859 | 964 | ||
| 965 | /* Take down CPUs not online prior to suspend */ | ||
| 966 | cpuret = rtas_offline_cpus_mask(offline_mask); | ||
| 967 | if (cpuret) | ||
| 968 | pr_warn("%s: Could not restore CPUs to offline state.\n", | ||
| 969 | __func__); | ||
| 970 | |||
| 971 | out: | ||
| 972 | free_cpumask_var(offline_mask); | ||
| 860 | return atomic_read(&data.error); | 973 | return atomic_read(&data.error); |
| 861 | } | 974 | } |
| 862 | #else /* CONFIG_PPC_PSERIES */ | 975 | #else /* CONFIG_PPC_PSERIES */ |
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 5b3022470126..2f3cdb01506d 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c | |||
| @@ -89,6 +89,7 @@ | |||
| 89 | 89 | ||
| 90 | /* Array sizes */ | 90 | /* Array sizes */ |
| 91 | #define VALIDATE_BUF_SIZE 4096 | 91 | #define VALIDATE_BUF_SIZE 4096 |
| 92 | #define VALIDATE_MSG_LEN 256 | ||
| 92 | #define RTAS_MSG_MAXLEN 64 | 93 | #define RTAS_MSG_MAXLEN 64 |
| 93 | 94 | ||
| 94 | /* Quirk - RTAS requires 4k list length and block size */ | 95 | /* Quirk - RTAS requires 4k list length and block size */ |
| @@ -466,7 +467,7 @@ static void validate_flash(struct rtas_validate_flash_t *args_buf) | |||
| 466 | } | 467 | } |
| 467 | 468 | ||
| 468 | static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, | 469 | static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, |
| 469 | char *msg) | 470 | char *msg, int msglen) |
| 470 | { | 471 | { |
| 471 | int n; | 472 | int n; |
| 472 | 473 | ||
| @@ -474,7 +475,8 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, | |||
| 474 | n = sprintf(msg, "%d\n", args_buf->update_results); | 475 | n = sprintf(msg, "%d\n", args_buf->update_results); |
| 475 | if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) || | 476 | if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) || |
| 476 | (args_buf->update_results == VALIDATE_TMP_UPDATE)) | 477 | (args_buf->update_results == VALIDATE_TMP_UPDATE)) |
| 477 | n += sprintf(msg + n, "%s\n", args_buf->buf); | 478 | n += snprintf(msg + n, msglen - n, "%s\n", |
| 479 | args_buf->buf); | ||
| 478 | } else { | 480 | } else { |
| 479 | n = sprintf(msg, "%d\n", args_buf->status); | 481 | n = sprintf(msg, "%d\n", args_buf->status); |
| 480 | } | 482 | } |
| @@ -486,11 +488,11 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf, | |||
| 486 | { | 488 | { |
| 487 | struct rtas_validate_flash_t *const args_buf = | 489 | struct rtas_validate_flash_t *const args_buf = |
| 488 | &rtas_validate_flash_data; | 490 | &rtas_validate_flash_data; |
| 489 | char msg[RTAS_MSG_MAXLEN]; | 491 | char msg[VALIDATE_MSG_LEN]; |
| 490 | int msglen; | 492 | int msglen; |
| 491 | 493 | ||
| 492 | mutex_lock(&rtas_validate_flash_mutex); | 494 | mutex_lock(&rtas_validate_flash_mutex); |
| 493 | msglen = get_validate_flash_msg(args_buf, msg); | 495 | msglen = get_validate_flash_msg(args_buf, msg, VALIDATE_MSG_LEN); |
| 494 | mutex_unlock(&rtas_validate_flash_mutex); | 496 | mutex_unlock(&rtas_validate_flash_mutex); |
| 495 | 497 | ||
| 496 | return simple_read_from_buffer(buf, count, ppos, msg, msglen); | 498 | return simple_read_from_buffer(buf, count, ppos, msg, msglen); |
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index cf12eae02de5..577a8aa69c6e 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/signal.h> | 13 | #include <linux/signal.h> |
| 14 | #include <linux/uprobes.h> | 14 | #include <linux/uprobes.h> |
| 15 | #include <linux/key.h> | 15 | #include <linux/key.h> |
| 16 | #include <linux/context_tracking.h> | ||
| 16 | #include <asm/hw_breakpoint.h> | 17 | #include <asm/hw_breakpoint.h> |
| 17 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
| 18 | #include <asm/unistd.h> | 19 | #include <asm/unistd.h> |
| @@ -24,7 +25,7 @@ | |||
| 24 | * through debug.exception-trace sysctl. | 25 | * through debug.exception-trace sysctl. |
| 25 | */ | 26 | */ |
| 26 | 27 | ||
| 27 | int show_unhandled_signals = 0; | 28 | int show_unhandled_signals = 1; |
| 28 | 29 | ||
| 29 | /* | 30 | /* |
| 30 | * Allocate space for the signal frame | 31 | * Allocate space for the signal frame |
| @@ -159,6 +160,8 @@ static int do_signal(struct pt_regs *regs) | |||
| 159 | 160 | ||
| 160 | void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) | 161 | void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) |
| 161 | { | 162 | { |
| 163 | user_exit(); | ||
| 164 | |||
| 162 | if (thread_info_flags & _TIF_UPROBE) | 165 | if (thread_info_flags & _TIF_UPROBE) |
| 163 | uprobe_notify_resume(regs); | 166 | uprobe_notify_resume(regs); |
| 164 | 167 | ||
| @@ -169,4 +172,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) | |||
| 169 | clear_thread_flag(TIF_NOTIFY_RESUME); | 172 | clear_thread_flag(TIF_NOTIFY_RESUME); |
| 170 | tracehook_notify_resume(regs); | 173 | tracehook_notify_resume(regs); |
| 171 | } | 174 | } |
| 175 | |||
| 176 | user_enter(); | ||
| 172 | } | 177 | } |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 83efa2f7d926..a7a648f6b750 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/kdebug.h> | 35 | #include <linux/kdebug.h> |
| 36 | #include <linux/debugfs.h> | 36 | #include <linux/debugfs.h> |
| 37 | #include <linux/ratelimit.h> | 37 | #include <linux/ratelimit.h> |
| 38 | #include <linux/context_tracking.h> | ||
| 38 | 39 | ||
| 39 | #include <asm/emulated_ops.h> | 40 | #include <asm/emulated_ops.h> |
| 40 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
| @@ -667,6 +668,7 @@ int machine_check_generic(struct pt_regs *regs) | |||
| 667 | 668 | ||
| 668 | void machine_check_exception(struct pt_regs *regs) | 669 | void machine_check_exception(struct pt_regs *regs) |
| 669 | { | 670 | { |
| 671 | enum ctx_state prev_state = exception_enter(); | ||
| 670 | int recover = 0; | 672 | int recover = 0; |
| 671 | 673 | ||
| 672 | __get_cpu_var(irq_stat).mce_exceptions++; | 674 | __get_cpu_var(irq_stat).mce_exceptions++; |
| @@ -683,7 +685,7 @@ void machine_check_exception(struct pt_regs *regs) | |||
| 683 | recover = cur_cpu_spec->machine_check(regs); | 685 | recover = cur_cpu_spec->machine_check(regs); |
| 684 | 686 | ||
| 685 | if (recover > 0) | 687 | if (recover > 0) |
| 686 | return; | 688 | goto bail; |
| 687 | 689 | ||
| 688 | #if defined(CONFIG_8xx) && defined(CONFIG_PCI) | 690 | #if defined(CONFIG_8xx) && defined(CONFIG_PCI) |
| 689 | /* the qspan pci read routines can cause machine checks -- Cort | 691 | /* the qspan pci read routines can cause machine checks -- Cort |
| @@ -693,20 +695,23 @@ void machine_check_exception(struct pt_regs *regs) | |||
| 693 | * -- BenH | 695 | * -- BenH |
| 694 | */ | 696 | */ |
| 695 | bad_page_fault(regs, regs->dar, SIGBUS); | 697 | bad_page_fault(regs, regs->dar, SIGBUS); |
| 696 | return; | 698 | goto bail; |
| 697 | #endif | 699 | #endif |
| 698 | 700 | ||
| 699 | if (debugger_fault_handler(regs)) | 701 | if (debugger_fault_handler(regs)) |
| 700 | return; | 702 | goto bail; |
| 701 | 703 | ||
| 702 | if (check_io_access(regs)) | 704 | if (check_io_access(regs)) |
| 703 | return; | 705 | goto bail; |
| 704 | 706 | ||
| 705 | die("Machine check", regs, SIGBUS); | 707 | die("Machine check", regs, SIGBUS); |
| 706 | 708 | ||
| 707 | /* Must die if the interrupt is not recoverable */ | 709 | /* Must die if the interrupt is not recoverable */ |
| 708 | if (!(regs->msr & MSR_RI)) | 710 | if (!(regs->msr & MSR_RI)) |
| 709 | panic("Unrecoverable Machine check"); | 711 | panic("Unrecoverable Machine check"); |
| 712 | |||
| 713 | bail: | ||
| 714 | exception_exit(prev_state); | ||
| 710 | } | 715 | } |
| 711 | 716 | ||
| 712 | void SMIException(struct pt_regs *regs) | 717 | void SMIException(struct pt_regs *regs) |
| @@ -716,20 +721,29 @@ void SMIException(struct pt_regs *regs) | |||
| 716 | 721 | ||
| 717 | void unknown_exception(struct pt_regs *regs) | 722 | void unknown_exception(struct pt_regs *regs) |
| 718 | { | 723 | { |
| 724 | enum ctx_state prev_state = exception_enter(); | ||
| 725 | |||
| 719 | printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", | 726 | printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", |
| 720 | regs->nip, regs->msr, regs->trap); | 727 | regs->nip, regs->msr, regs->trap); |
| 721 | 728 | ||
| 722 | _exception(SIGTRAP, regs, 0, 0); | 729 | _exception(SIGTRAP, regs, 0, 0); |
| 730 | |||
| 731 | exception_exit(prev_state); | ||
| 723 | } | 732 | } |
| 724 | 733 | ||
| 725 | void instruction_breakpoint_exception(struct pt_regs *regs) | 734 | void instruction_breakpoint_exception(struct pt_regs *regs) |
| 726 | { | 735 | { |
| 736 | enum ctx_state prev_state = exception_enter(); | ||
| 737 | |||
| 727 | if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, | 738 | if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, |
| 728 | 5, SIGTRAP) == NOTIFY_STOP) | 739 | 5, SIGTRAP) == NOTIFY_STOP) |
| 729 | return; | 740 | goto bail; |
| 730 | if (debugger_iabr_match(regs)) | 741 | if (debugger_iabr_match(regs)) |
| 731 | return; | 742 | goto bail; |
| 732 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); | 743 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); |
| 744 | |||
| 745 | bail: | ||
| 746 | exception_exit(prev_state); | ||
| 733 | } | 747 | } |
| 734 | 748 | ||
| 735 | void RunModeException(struct pt_regs *regs) | 749 | void RunModeException(struct pt_regs *regs) |
| @@ -739,15 +753,20 @@ void RunModeException(struct pt_regs *regs) | |||
| 739 | 753 | ||
| 740 | void __kprobes single_step_exception(struct pt_regs *regs) | 754 | void __kprobes single_step_exception(struct pt_regs *regs) |
| 741 | { | 755 | { |
| 756 | enum ctx_state prev_state = exception_enter(); | ||
| 757 | |||
| 742 | clear_single_step(regs); | 758 | clear_single_step(regs); |
| 743 | 759 | ||
| 744 | if (notify_die(DIE_SSTEP, "single_step", regs, 5, | 760 | if (notify_die(DIE_SSTEP, "single_step", regs, 5, |
| 745 | 5, SIGTRAP) == NOTIFY_STOP) | 761 | 5, SIGTRAP) == NOTIFY_STOP) |
| 746 | return; | 762 | goto bail; |
| 747 | if (debugger_sstep(regs)) | 763 | if (debugger_sstep(regs)) |
| 748 | return; | 764 | goto bail; |
| 749 | 765 | ||
| 750 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); | 766 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); |
| 767 | |||
| 768 | bail: | ||
| 769 | exception_exit(prev_state); | ||
| 751 | } | 770 | } |
| 752 | 771 | ||
| 753 | /* | 772 | /* |
| @@ -1005,6 +1024,7 @@ int is_valid_bugaddr(unsigned long addr) | |||
| 1005 | 1024 | ||
| 1006 | void __kprobes program_check_exception(struct pt_regs *regs) | 1025 | void __kprobes program_check_exception(struct pt_regs *regs) |
| 1007 | { | 1026 | { |
| 1027 | enum ctx_state prev_state = exception_enter(); | ||
| 1008 | unsigned int reason = get_reason(regs); | 1028 | unsigned int reason = get_reason(regs); |
| 1009 | extern int do_mathemu(struct pt_regs *regs); | 1029 | extern int do_mathemu(struct pt_regs *regs); |
| 1010 | 1030 | ||
| @@ -1014,26 +1034,26 @@ void __kprobes program_check_exception(struct pt_regs *regs) | |||
| 1014 | if (reason & REASON_FP) { | 1034 | if (reason & REASON_FP) { |
| 1015 | /* IEEE FP exception */ | 1035 | /* IEEE FP exception */ |
| 1016 | parse_fpe(regs); | 1036 | parse_fpe(regs); |
| 1017 | return; | 1037 | goto bail; |
| 1018 | } | 1038 | } |
| 1019 | if (reason & REASON_TRAP) { | 1039 | if (reason & REASON_TRAP) { |
| 1020 | /* Debugger is first in line to stop recursive faults in | 1040 | /* Debugger is first in line to stop recursive faults in |
| 1021 | * rcu_lock, notify_die, or atomic_notifier_call_chain */ | 1041 | * rcu_lock, notify_die, or atomic_notifier_call_chain */ |
| 1022 | if (debugger_bpt(regs)) | 1042 | if (debugger_bpt(regs)) |
| 1023 | return; | 1043 | goto bail; |
| 1024 | 1044 | ||
| 1025 | /* trap exception */ | 1045 | /* trap exception */ |
| 1026 | if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) | 1046 | if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) |
| 1027 | == NOTIFY_STOP) | 1047 | == NOTIFY_STOP) |
| 1028 | return; | 1048 | goto bail; |
| 1029 | 1049 | ||
| 1030 | if (!(regs->msr & MSR_PR) && /* not user-mode */ | 1050 | if (!(regs->msr & MSR_PR) && /* not user-mode */ |
| 1031 | report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { | 1051 | report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { |
| 1032 | regs->nip += 4; | 1052 | regs->nip += 4; |
| 1033 | return; | 1053 | goto bail; |
| 1034 | } | 1054 | } |
| 1035 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); | 1055 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); |
| 1036 | return; | 1056 | goto bail; |
| 1037 | } | 1057 | } |
| 1038 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 1058 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 1039 | if (reason & REASON_TM) { | 1059 | if (reason & REASON_TM) { |
| @@ -1049,7 +1069,7 @@ void __kprobes program_check_exception(struct pt_regs *regs) | |||
| 1049 | if (!user_mode(regs) && | 1069 | if (!user_mode(regs) && |
| 1050 | report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { | 1070 | report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { |
| 1051 | regs->nip += 4; | 1071 | regs->nip += 4; |
| 1052 | return; | 1072 | goto bail; |
| 1053 | } | 1073 | } |
| 1054 | /* If usermode caused this, it's done something illegal and | 1074 | /* If usermode caused this, it's done something illegal and |
| 1055 | * gets a SIGILL slap on the wrist. We call it an illegal | 1075 | * gets a SIGILL slap on the wrist. We call it an illegal |
| @@ -1059,7 +1079,7 @@ void __kprobes program_check_exception(struct pt_regs *regs) | |||
| 1059 | */ | 1079 | */ |
| 1060 | if (user_mode(regs)) { | 1080 | if (user_mode(regs)) { |
| 1061 | _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); | 1081 | _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); |
| 1062 | return; | 1082 | goto bail; |
| 1063 | } else { | 1083 | } else { |
| 1064 | printk(KERN_EMERG "Unexpected TM Bad Thing exception " | 1084 | printk(KERN_EMERG "Unexpected TM Bad Thing exception " |
| 1065 | "at %lx (msr 0x%x)\n", regs->nip, reason); | 1085 | "at %lx (msr 0x%x)\n", regs->nip, reason); |
| @@ -1083,16 +1103,16 @@ void __kprobes program_check_exception(struct pt_regs *regs) | |||
| 1083 | switch (do_mathemu(regs)) { | 1103 | switch (do_mathemu(regs)) { |
| 1084 | case 0: | 1104 | case 0: |
| 1085 | emulate_single_step(regs); | 1105 | emulate_single_step(regs); |
| 1086 | return; | 1106 | goto bail; |
| 1087 | case 1: { | 1107 | case 1: { |
| 1088 | int code = 0; | 1108 | int code = 0; |
| 1089 | code = __parse_fpscr(current->thread.fpscr.val); | 1109 | code = __parse_fpscr(current->thread.fpscr.val); |
| 1090 | _exception(SIGFPE, regs, code, regs->nip); | 1110 | _exception(SIGFPE, regs, code, regs->nip); |
| 1091 | return; | 1111 | goto bail; |
| 1092 | } | 1112 | } |
| 1093 | case -EFAULT: | 1113 | case -EFAULT: |
| 1094 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); | 1114 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); |
| 1095 | return; | 1115 | goto bail; |
| 1096 | } | 1116 | } |
| 1097 | /* fall through on any other errors */ | 1117 | /* fall through on any other errors */ |
| 1098 | #endif /* CONFIG_MATH_EMULATION */ | 1118 | #endif /* CONFIG_MATH_EMULATION */ |
| @@ -1103,10 +1123,10 @@ void __kprobes program_check_exception(struct pt_regs *regs) | |||
| 1103 | case 0: | 1123 | case 0: |
| 1104 | regs->nip += 4; | 1124 | regs->nip += 4; |
| 1105 | emulate_single_step(regs); | 1125 | emulate_single_step(regs); |
| 1106 | return; | 1126 | goto bail; |
| 1107 | case -EFAULT: | 1127 | case -EFAULT: |
| 1108 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); | 1128 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); |
| 1109 | return; | 1129 | goto bail; |
| 1110 | } | 1130 | } |
| 1111 | } | 1131 | } |
| 1112 | 1132 | ||
| @@ -1114,10 +1134,14 @@ void __kprobes program_check_exception(struct pt_regs *regs) | |||
| 1114 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); | 1134 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); |
| 1115 | else | 1135 | else |
| 1116 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | 1136 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); |
| 1137 | |||
| 1138 | bail: | ||
| 1139 | exception_exit(prev_state); | ||
| 1117 | } | 1140 | } |
| 1118 | 1141 | ||
| 1119 | void alignment_exception(struct pt_regs *regs) | 1142 | void alignment_exception(struct pt_regs *regs) |
| 1120 | { | 1143 | { |
| 1144 | enum ctx_state prev_state = exception_enter(); | ||
| 1121 | int sig, code, fixed = 0; | 1145 | int sig, code, fixed = 0; |
| 1122 | 1146 | ||
| 1123 | /* We restore the interrupt state now */ | 1147 | /* We restore the interrupt state now */ |
| @@ -1131,7 +1155,7 @@ void alignment_exception(struct pt_regs *regs) | |||
| 1131 | if (fixed == 1) { | 1155 | if (fixed == 1) { |
| 1132 | regs->nip += 4; /* skip over emulated instruction */ | 1156 | regs->nip += 4; /* skip over emulated instruction */ |
| 1133 | emulate_single_step(regs); | 1157 | emulate_single_step(regs); |
| 1134 | return; | 1158 | goto bail; |
| 1135 | } | 1159 | } |
| 1136 | 1160 | ||
| 1137 | /* Operand address was bad */ | 1161 | /* Operand address was bad */ |
| @@ -1146,6 +1170,9 @@ void alignment_exception(struct pt_regs *regs) | |||
| 1146 | _exception(sig, regs, code, regs->dar); | 1170 | _exception(sig, regs, code, regs->dar); |
| 1147 | else | 1171 | else |
| 1148 | bad_page_fault(regs, regs->dar, sig); | 1172 | bad_page_fault(regs, regs->dar, sig); |
| 1173 | |||
| 1174 | bail: | ||
| 1175 | exception_exit(prev_state); | ||
| 1149 | } | 1176 | } |
| 1150 | 1177 | ||
| 1151 | void StackOverflow(struct pt_regs *regs) | 1178 | void StackOverflow(struct pt_regs *regs) |
| @@ -1174,23 +1201,32 @@ void trace_syscall(struct pt_regs *regs) | |||
| 1174 | 1201 | ||
| 1175 | void kernel_fp_unavailable_exception(struct pt_regs *regs) | 1202 | void kernel_fp_unavailable_exception(struct pt_regs *regs) |
| 1176 | { | 1203 | { |
| 1204 | enum ctx_state prev_state = exception_enter(); | ||
| 1205 | |||
| 1177 | printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " | 1206 | printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " |
| 1178 | "%lx at %lx\n", regs->trap, regs->nip); | 1207 | "%lx at %lx\n", regs->trap, regs->nip); |
| 1179 | die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); | 1208 | die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); |
| 1209 | |||
| 1210 | exception_exit(prev_state); | ||
| 1180 | } | 1211 | } |
| 1181 | 1212 | ||
| 1182 | void altivec_unavailable_exception(struct pt_regs *regs) | 1213 | void altivec_unavailable_exception(struct pt_regs *regs) |
| 1183 | { | 1214 | { |
| 1215 | enum ctx_state prev_state = exception_enter(); | ||
| 1216 | |||
| 1184 | if (user_mode(regs)) { | 1217 | if (user_mode(regs)) { |
| 1185 | /* A user program has executed an altivec instruction, | 1218 | /* A user program has executed an altivec instruction, |
| 1186 | but this kernel doesn't support altivec. */ | 1219 | but this kernel doesn't support altivec. */ |
| 1187 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | 1220 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); |
| 1188 | return; | 1221 | goto bail; |
| 1189 | } | 1222 | } |
| 1190 | 1223 | ||
| 1191 | printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " | 1224 | printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " |
| 1192 | "%lx at %lx\n", regs->trap, regs->nip); | 1225 | "%lx at %lx\n", regs->trap, regs->nip); |
| 1193 | die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); | 1226 | die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); |
| 1227 | |||
| 1228 | bail: | ||
| 1229 | exception_exit(prev_state); | ||
| 1194 | } | 1230 | } |
| 1195 | 1231 | ||
| 1196 | void vsx_unavailable_exception(struct pt_regs *regs) | 1232 | void vsx_unavailable_exception(struct pt_regs *regs) |
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index 13b867093499..9d3fdcd66290 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c | |||
| @@ -64,6 +64,9 @@ void __init udbg_early_init(void) | |||
| 64 | udbg_init_usbgecko(); | 64 | udbg_init_usbgecko(); |
| 65 | #elif defined(CONFIG_PPC_EARLY_DEBUG_WSP) | 65 | #elif defined(CONFIG_PPC_EARLY_DEBUG_WSP) |
| 66 | udbg_init_wsp(); | 66 | udbg_init_wsp(); |
| 67 | #elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS) | ||
| 68 | /* In memory console */ | ||
| 69 | udbg_init_memcons(); | ||
| 67 | #elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC) | 70 | #elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC) |
| 68 | udbg_init_ehv_bc(); | 71 | udbg_init_ehv_bc(); |
| 69 | #elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC) | 72 | #elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC) |
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 229951ffc351..8726779e1409 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/perf_event.h> | 32 | #include <linux/perf_event.h> |
| 33 | #include <linux/magic.h> | 33 | #include <linux/magic.h> |
| 34 | #include <linux/ratelimit.h> | 34 | #include <linux/ratelimit.h> |
| 35 | #include <linux/context_tracking.h> | ||
| 35 | 36 | ||
| 36 | #include <asm/firmware.h> | 37 | #include <asm/firmware.h> |
| 37 | #include <asm/page.h> | 38 | #include <asm/page.h> |
| @@ -196,6 +197,7 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) | |||
| 196 | int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | 197 | int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, |
| 197 | unsigned long error_code) | 198 | unsigned long error_code) |
| 198 | { | 199 | { |
| 200 | enum ctx_state prev_state = exception_enter(); | ||
| 199 | struct vm_area_struct * vma; | 201 | struct vm_area_struct * vma; |
| 200 | struct mm_struct *mm = current->mm; | 202 | struct mm_struct *mm = current->mm; |
| 201 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | 203 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
| @@ -204,6 +206,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | |||
| 204 | int trap = TRAP(regs); | 206 | int trap = TRAP(regs); |
| 205 | int is_exec = trap == 0x400; | 207 | int is_exec = trap == 0x400; |
| 206 | int fault; | 208 | int fault; |
| 209 | int rc = 0; | ||
| 207 | 210 | ||
| 208 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | 211 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) |
| 209 | /* | 212 | /* |
| @@ -230,28 +233,30 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | |||
| 230 | * look at it | 233 | * look at it |
| 231 | */ | 234 | */ |
| 232 | if (error_code & ICSWX_DSI_UCT) { | 235 | if (error_code & ICSWX_DSI_UCT) { |
| 233 | int rc = acop_handle_fault(regs, address, error_code); | 236 | rc = acop_handle_fault(regs, address, error_code); |
| 234 | if (rc) | 237 | if (rc) |
| 235 | return rc; | 238 | goto bail; |
| 236 | } | 239 | } |
| 237 | #endif /* CONFIG_PPC_ICSWX */ | 240 | #endif /* CONFIG_PPC_ICSWX */ |
| 238 | 241 | ||
| 239 | if (notify_page_fault(regs)) | 242 | if (notify_page_fault(regs)) |
| 240 | return 0; | 243 | goto bail; |
| 241 | 244 | ||
| 242 | if (unlikely(debugger_fault_handler(regs))) | 245 | if (unlikely(debugger_fault_handler(regs))) |
| 243 | return 0; | 246 | goto bail; |
| 244 | 247 | ||
| 245 | /* On a kernel SLB miss we can only check for a valid exception entry */ | 248 | /* On a kernel SLB miss we can only check for a valid exception entry */ |
| 246 | if (!user_mode(regs) && (address >= TASK_SIZE)) | 249 | if (!user_mode(regs) && (address >= TASK_SIZE)) { |
| 247 | return SIGSEGV; | 250 | rc = SIGSEGV; |
| 251 | goto bail; | ||
| 252 | } | ||
| 248 | 253 | ||
| 249 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \ | 254 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \ |
| 250 | defined(CONFIG_PPC_BOOK3S_64)) | 255 | defined(CONFIG_PPC_BOOK3S_64)) |
| 251 | if (error_code & DSISR_DABRMATCH) { | 256 | if (error_code & DSISR_DABRMATCH) { |
| 252 | /* breakpoint match */ | 257 | /* breakpoint match */ |
| 253 | do_break(regs, address, error_code); | 258 | do_break(regs, address, error_code); |
| 254 | return 0; | 259 | goto bail; |
| 255 | } | 260 | } |
| 256 | #endif | 261 | #endif |
| 257 | 262 | ||
| @@ -260,8 +265,10 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | |||
| 260 | local_irq_enable(); | 265 | local_irq_enable(); |
| 261 | 266 | ||
| 262 | if (in_atomic() || mm == NULL) { | 267 | if (in_atomic() || mm == NULL) { |
| 263 | if (!user_mode(regs)) | 268 | if (!user_mode(regs)) { |
| 264 | return SIGSEGV; | 269 | rc = SIGSEGV; |
| 270 | goto bail; | ||
| 271 | } | ||
| 265 | /* in_atomic() in user mode is really bad, | 272 | /* in_atomic() in user mode is really bad, |
| 266 | as is current->mm == NULL. */ | 273 | as is current->mm == NULL. */ |
| 267 | printk(KERN_EMERG "Page fault in user mode with " | 274 | printk(KERN_EMERG "Page fault in user mode with " |
| @@ -417,9 +424,11 @@ good_area: | |||
| 417 | */ | 424 | */ |
| 418 | fault = handle_mm_fault(mm, vma, address, flags); | 425 | fault = handle_mm_fault(mm, vma, address, flags); |
| 419 | if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { | 426 | if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { |
| 420 | int rc = mm_fault_error(regs, address, fault); | 427 | rc = mm_fault_error(regs, address, fault); |
| 421 | if (rc >= MM_FAULT_RETURN) | 428 | if (rc >= MM_FAULT_RETURN) |
| 422 | return rc; | 429 | goto bail; |
| 430 | else | ||
| 431 | rc = 0; | ||
| 423 | } | 432 | } |
| 424 | 433 | ||
| 425 | /* | 434 | /* |
| @@ -454,7 +463,7 @@ good_area: | |||
| 454 | } | 463 | } |
| 455 | 464 | ||
| 456 | up_read(&mm->mmap_sem); | 465 | up_read(&mm->mmap_sem); |
| 457 | return 0; | 466 | goto bail; |
| 458 | 467 | ||
| 459 | bad_area: | 468 | bad_area: |
| 460 | up_read(&mm->mmap_sem); | 469 | up_read(&mm->mmap_sem); |
| @@ -463,7 +472,7 @@ bad_area_nosemaphore: | |||
| 463 | /* User mode accesses cause a SIGSEGV */ | 472 | /* User mode accesses cause a SIGSEGV */ |
| 464 | if (user_mode(regs)) { | 473 | if (user_mode(regs)) { |
| 465 | _exception(SIGSEGV, regs, code, address); | 474 | _exception(SIGSEGV, regs, code, address); |
| 466 | return 0; | 475 | goto bail; |
| 467 | } | 476 | } |
| 468 | 477 | ||
| 469 | if (is_exec && (error_code & DSISR_PROTFAULT)) | 478 | if (is_exec && (error_code & DSISR_PROTFAULT)) |
| @@ -471,7 +480,11 @@ bad_area_nosemaphore: | |||
| 471 | " page (%lx) - exploit attempt? (uid: %d)\n", | 480 | " page (%lx) - exploit attempt? (uid: %d)\n", |
| 472 | address, from_kuid(&init_user_ns, current_uid())); | 481 | address, from_kuid(&init_user_ns, current_uid())); |
| 473 | 482 | ||
| 474 | return SIGSEGV; | 483 | rc = SIGSEGV; |
| 484 | |||
| 485 | bail: | ||
| 486 | exception_exit(prev_state); | ||
| 487 | return rc; | ||
| 475 | 488 | ||
| 476 | } | 489 | } |
| 477 | 490 | ||
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 88ac0eeaadde..e303a6d74e3a 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
| 34 | #include <linux/signal.h> | 34 | #include <linux/signal.h> |
| 35 | #include <linux/memblock.h> | 35 | #include <linux/memblock.h> |
| 36 | #include <linux/context_tracking.h> | ||
| 36 | 37 | ||
| 37 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
| 38 | #include <asm/pgtable.h> | 39 | #include <asm/pgtable.h> |
| @@ -954,6 +955,7 @@ void hash_failure_debug(unsigned long ea, unsigned long access, | |||
| 954 | */ | 955 | */ |
| 955 | int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | 956 | int hash_page(unsigned long ea, unsigned long access, unsigned long trap) |
| 956 | { | 957 | { |
| 958 | enum ctx_state prev_state = exception_enter(); | ||
| 957 | pgd_t *pgdir; | 959 | pgd_t *pgdir; |
| 958 | unsigned long vsid; | 960 | unsigned long vsid; |
| 959 | struct mm_struct *mm; | 961 | struct mm_struct *mm; |
| @@ -973,7 +975,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
| 973 | mm = current->mm; | 975 | mm = current->mm; |
| 974 | if (! mm) { | 976 | if (! mm) { |
| 975 | DBG_LOW(" user region with no mm !\n"); | 977 | DBG_LOW(" user region with no mm !\n"); |
| 976 | return 1; | 978 | rc = 1; |
| 979 | goto bail; | ||
| 977 | } | 980 | } |
| 978 | psize = get_slice_psize(mm, ea); | 981 | psize = get_slice_psize(mm, ea); |
| 979 | ssize = user_segment_size(ea); | 982 | ssize = user_segment_size(ea); |
| @@ -992,19 +995,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
| 992 | /* Not a valid range | 995 | /* Not a valid range |
| 993 | * Send the problem up to do_page_fault | 996 | * Send the problem up to do_page_fault |
| 994 | */ | 997 | */ |
| 995 | return 1; | 998 | rc = 1; |
| 999 | goto bail; | ||
| 996 | } | 1000 | } |
| 997 | DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); | 1001 | DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); |
| 998 | 1002 | ||
| 999 | /* Bad address. */ | 1003 | /* Bad address. */ |
| 1000 | if (!vsid) { | 1004 | if (!vsid) { |
| 1001 | DBG_LOW("Bad address!\n"); | 1005 | DBG_LOW("Bad address!\n"); |
| 1002 | return 1; | 1006 | rc = 1; |
| 1007 | goto bail; | ||
| 1003 | } | 1008 | } |
| 1004 | /* Get pgdir */ | 1009 | /* Get pgdir */ |
| 1005 | pgdir = mm->pgd; | 1010 | pgdir = mm->pgd; |
| 1006 | if (pgdir == NULL) | 1011 | if (pgdir == NULL) { |
| 1007 | return 1; | 1012 | rc = 1; |
| 1013 | goto bail; | ||
| 1014 | } | ||
| 1008 | 1015 | ||
| 1009 | /* Check CPU locality */ | 1016 | /* Check CPU locality */ |
| 1010 | tmp = cpumask_of(smp_processor_id()); | 1017 | tmp = cpumask_of(smp_processor_id()); |
| @@ -1027,7 +1034,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
| 1027 | ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift); | 1034 | ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift); |
| 1028 | if (ptep == NULL || !pte_present(*ptep)) { | 1035 | if (ptep == NULL || !pte_present(*ptep)) { |
| 1029 | DBG_LOW(" no PTE !\n"); | 1036 | DBG_LOW(" no PTE !\n"); |
| 1030 | return 1; | 1037 | rc = 1; |
| 1038 | goto bail; | ||
| 1031 | } | 1039 | } |
| 1032 | 1040 | ||
| 1033 | /* Add _PAGE_PRESENT to the required access perm */ | 1041 | /* Add _PAGE_PRESENT to the required access perm */ |
| @@ -1038,13 +1046,16 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
| 1038 | */ | 1046 | */ |
| 1039 | if (access & ~pte_val(*ptep)) { | 1047 | if (access & ~pte_val(*ptep)) { |
| 1040 | DBG_LOW(" no access !\n"); | 1048 | DBG_LOW(" no access !\n"); |
| 1041 | return 1; | 1049 | rc = 1; |
| 1050 | goto bail; | ||
| 1042 | } | 1051 | } |
| 1043 | 1052 | ||
| 1044 | #ifdef CONFIG_HUGETLB_PAGE | 1053 | #ifdef CONFIG_HUGETLB_PAGE |
| 1045 | if (hugeshift) | 1054 | if (hugeshift) { |
| 1046 | return __hash_page_huge(ea, access, vsid, ptep, trap, local, | 1055 | rc = __hash_page_huge(ea, access, vsid, ptep, trap, local, |
| 1047 | ssize, hugeshift, psize); | 1056 | ssize, hugeshift, psize); |
| 1057 | goto bail; | ||
| 1058 | } | ||
| 1048 | #endif /* CONFIG_HUGETLB_PAGE */ | 1059 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 1049 | 1060 | ||
| 1050 | #ifndef CONFIG_PPC_64K_PAGES | 1061 | #ifndef CONFIG_PPC_64K_PAGES |
| @@ -1124,6 +1135,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
| 1124 | pte_val(*(ptep + PTRS_PER_PTE))); | 1135 | pte_val(*(ptep + PTRS_PER_PTE))); |
| 1125 | #endif | 1136 | #endif |
| 1126 | DBG_LOW(" -> rc=%d\n", rc); | 1137 | DBG_LOW(" -> rc=%d\n", rc); |
| 1138 | |||
| 1139 | bail: | ||
| 1140 | exception_exit(prev_state); | ||
| 1127 | return rc; | 1141 | return rc; |
| 1128 | } | 1142 | } |
| 1129 | EXPORT_SYMBOL_GPL(hash_page); | 1143 | EXPORT_SYMBOL_GPL(hash_page); |
| @@ -1259,6 +1273,8 @@ void flush_hash_range(unsigned long number, int local) | |||
| 1259 | */ | 1273 | */ |
| 1260 | void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) | 1274 | void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) |
| 1261 | { | 1275 | { |
| 1276 | enum ctx_state prev_state = exception_enter(); | ||
| 1277 | |||
| 1262 | if (user_mode(regs)) { | 1278 | if (user_mode(regs)) { |
| 1263 | #ifdef CONFIG_PPC_SUBPAGE_PROT | 1279 | #ifdef CONFIG_PPC_SUBPAGE_PROT |
| 1264 | if (rc == -2) | 1280 | if (rc == -2) |
| @@ -1268,6 +1284,8 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) | |||
| 1268 | _exception(SIGBUS, regs, BUS_ADRERR, address); | 1284 | _exception(SIGBUS, regs, BUS_ADRERR, address); |
| 1269 | } else | 1285 | } else |
| 1270 | bad_page_fault(regs, address, SIGBUS); | 1286 | bad_page_fault(regs, address, SIGBUS); |
| 1287 | |||
| 1288 | exception_exit(prev_state); | ||
| 1271 | } | 1289 | } |
| 1272 | 1290 | ||
| 1273 | long hpte_insert_repeating(unsigned long hash, unsigned long vpn, | 1291 | long hpte_insert_repeating(unsigned long hash, unsigned long vpn, |
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index c2787bf779ca..a90b9c458990 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
| @@ -215,7 +215,8 @@ static void __meminit vmemmap_create_mapping(unsigned long start, | |||
| 215 | unsigned long phys) | 215 | unsigned long phys) |
| 216 | { | 216 | { |
| 217 | int mapped = htab_bolt_mapping(start, start + page_size, phys, | 217 | int mapped = htab_bolt_mapping(start, start + page_size, phys, |
| 218 | PAGE_KERNEL, mmu_vmemmap_psize, | 218 | pgprot_val(PAGE_KERNEL), |
| 219 | mmu_vmemmap_psize, | ||
| 219 | mmu_kernel_ssize); | 220 | mmu_kernel_ssize); |
| 220 | BUG_ON(mapped < 0); | 221 | BUG_ON(mapped < 0); |
| 221 | } | 222 | } |
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index c627843c5b2e..426180b84978 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
| @@ -13,11 +13,13 @@ | |||
| 13 | #include <linux/perf_event.h> | 13 | #include <linux/perf_event.h> |
| 14 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
| 15 | #include <linux/hardirq.h> | 15 | #include <linux/hardirq.h> |
| 16 | #include <linux/uaccess.h> | ||
| 16 | #include <asm/reg.h> | 17 | #include <asm/reg.h> |
| 17 | #include <asm/pmc.h> | 18 | #include <asm/pmc.h> |
| 18 | #include <asm/machdep.h> | 19 | #include <asm/machdep.h> |
| 19 | #include <asm/firmware.h> | 20 | #include <asm/firmware.h> |
| 20 | #include <asm/ptrace.h> | 21 | #include <asm/ptrace.h> |
| 22 | #include <asm/code-patching.h> | ||
| 21 | 23 | ||
| 22 | #define BHRB_MAX_ENTRIES 32 | 24 | #define BHRB_MAX_ENTRIES 32 |
| 23 | #define BHRB_TARGET 0x0000000000000002 | 25 | #define BHRB_TARGET 0x0000000000000002 |
| @@ -100,6 +102,10 @@ static inline int siar_valid(struct pt_regs *regs) | |||
| 100 | return 1; | 102 | return 1; |
| 101 | } | 103 | } |
| 102 | 104 | ||
| 105 | static inline void power_pmu_bhrb_enable(struct perf_event *event) {} | ||
| 106 | static inline void power_pmu_bhrb_disable(struct perf_event *event) {} | ||
| 107 | void power_pmu_flush_branch_stack(void) {} | ||
| 108 | static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} | ||
| 103 | #endif /* CONFIG_PPC32 */ | 109 | #endif /* CONFIG_PPC32 */ |
| 104 | 110 | ||
| 105 | static bool regs_use_siar(struct pt_regs *regs) | 111 | static bool regs_use_siar(struct pt_regs *regs) |
| @@ -308,6 +314,159 @@ static inline int siar_valid(struct pt_regs *regs) | |||
| 308 | return 1; | 314 | return 1; |
| 309 | } | 315 | } |
| 310 | 316 | ||
| 317 | |||
| 318 | /* Reset all possible BHRB entries */ | ||
| 319 | static void power_pmu_bhrb_reset(void) | ||
| 320 | { | ||
| 321 | asm volatile(PPC_CLRBHRB); | ||
| 322 | } | ||
| 323 | |||
| 324 | static void power_pmu_bhrb_enable(struct perf_event *event) | ||
| 325 | { | ||
| 326 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||
| 327 | |||
| 328 | if (!ppmu->bhrb_nr) | ||
| 329 | return; | ||
| 330 | |||
| 331 | /* Clear BHRB if we changed task context to avoid data leaks */ | ||
| 332 | if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { | ||
| 333 | power_pmu_bhrb_reset(); | ||
| 334 | cpuhw->bhrb_context = event->ctx; | ||
| 335 | } | ||
| 336 | cpuhw->bhrb_users++; | ||
| 337 | } | ||
| 338 | |||
| 339 | static void power_pmu_bhrb_disable(struct perf_event *event) | ||
| 340 | { | ||
| 341 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||
| 342 | |||
| 343 | if (!ppmu->bhrb_nr) | ||
| 344 | return; | ||
| 345 | |||
| 346 | cpuhw->bhrb_users--; | ||
| 347 | WARN_ON_ONCE(cpuhw->bhrb_users < 0); | ||
| 348 | |||
| 349 | if (!cpuhw->disabled && !cpuhw->bhrb_users) { | ||
| 350 | /* BHRB cannot be turned off when other | ||
| 351 | * events are active on the PMU. | ||
| 352 | */ | ||
| 353 | |||
| 354 | /* avoid stale pointer */ | ||
| 355 | cpuhw->bhrb_context = NULL; | ||
| 356 | } | ||
| 357 | } | ||
| 358 | |||
| 359 | /* Called from ctxsw to prevent one process's branch entries to | ||
| 360 | * mingle with the other process's entries during context switch. | ||
| 361 | */ | ||
| 362 | void power_pmu_flush_branch_stack(void) | ||
| 363 | { | ||
| 364 | if (ppmu->bhrb_nr) | ||
| 365 | power_pmu_bhrb_reset(); | ||
| 366 | } | ||
| 367 | /* Calculate the to address for a branch */ | ||
| 368 | static __u64 power_pmu_bhrb_to(u64 addr) | ||
| 369 | { | ||
| 370 | unsigned int instr; | ||
| 371 | int ret; | ||
| 372 | __u64 target; | ||
| 373 | |||
| 374 | if (is_kernel_addr(addr)) | ||
| 375 | return branch_target((unsigned int *)addr); | ||
| 376 | |||
| 377 | /* Userspace: need copy instruction here then translate it */ | ||
| 378 | pagefault_disable(); | ||
| 379 | ret = __get_user_inatomic(instr, (unsigned int __user *)addr); | ||
| 380 | if (ret) { | ||
| 381 | pagefault_enable(); | ||
| 382 | return 0; | ||
| 383 | } | ||
| 384 | pagefault_enable(); | ||
| 385 | |||
| 386 | target = branch_target(&instr); | ||
| 387 | if ((!target) || (instr & BRANCH_ABSOLUTE)) | ||
| 388 | return target; | ||
| 389 | |||
| 390 | /* Translate relative branch target from kernel to user address */ | ||
| 391 | return target - (unsigned long)&instr + addr; | ||
| 392 | } | ||
| 393 | |||
| 394 | /* Processing BHRB entries */ | ||
| 395 | void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) | ||
| 396 | { | ||
| 397 | u64 val; | ||
| 398 | u64 addr; | ||
| 399 | int r_index, u_index, pred; | ||
| 400 | |||
| 401 | r_index = 0; | ||
| 402 | u_index = 0; | ||
| 403 | while (r_index < ppmu->bhrb_nr) { | ||
| 404 | /* Assembly read function */ | ||
| 405 | val = read_bhrb(r_index++); | ||
| 406 | if (!val) | ||
| 407 | /* Terminal marker: End of valid BHRB entries */ | ||
| 408 | break; | ||
| 409 | else { | ||
| 410 | addr = val & BHRB_EA; | ||
| 411 | pred = val & BHRB_PREDICTION; | ||
| 412 | |||
| 413 | if (!addr) | ||
| 414 | /* invalid entry */ | ||
| 415 | continue; | ||
| 416 | |||
| 417 | /* Branches are read most recent first (ie. mfbhrb 0 is | ||
| 418 | * the most recent branch). | ||
| 419 | * There are two types of valid entries: | ||
| 420 | * 1) a target entry which is the to address of a | ||
| 421 | * computed goto like a blr,bctr,btar. The next | ||
| 422 | * entry read from the bhrb will be branch | ||
| 423 | * corresponding to this target (ie. the actual | ||
| 424 | * blr/bctr/btar instruction). | ||
| 425 | * 2) a from address which is an actual branch. If a | ||
| 426 | * target entry proceeds this, then this is the | ||
| 427 | * matching branch for that target. If this is not | ||
| 428 | * following a target entry, then this is a branch | ||
| 429 | * where the target is given as an immediate field | ||
| 430 | * in the instruction (ie. an i or b form branch). | ||
| 431 | * In this case we need to read the instruction from | ||
| 432 | * memory to determine the target/to address. | ||
| 433 | */ | ||
| 434 | |||
| 435 | if (val & BHRB_TARGET) { | ||
| 436 | /* Target branches use two entries | ||
| 437 | * (ie. computed gotos/XL form) | ||
| 438 | */ | ||
| 439 | cpuhw->bhrb_entries[u_index].to = addr; | ||
| 440 | cpuhw->bhrb_entries[u_index].mispred = pred; | ||
| 441 | cpuhw->bhrb_entries[u_index].predicted = ~pred; | ||
| 442 | |||
| 443 | /* Get from address in next entry */ | ||
| 444 | val = read_bhrb(r_index++); | ||
| 445 | addr = val & BHRB_EA; | ||
| 446 | if (val & BHRB_TARGET) { | ||
| 447 | /* Shouldn't have two targets in a | ||
| 448 | row.. Reset index and try again */ | ||
| 449 | r_index--; | ||
| 450 | addr = 0; | ||
| 451 | } | ||
| 452 | cpuhw->bhrb_entries[u_index].from = addr; | ||
| 453 | } else { | ||
| 454 | /* Branches to immediate field | ||
| 455 | (ie I or B form) */ | ||
| 456 | cpuhw->bhrb_entries[u_index].from = addr; | ||
| 457 | cpuhw->bhrb_entries[u_index].to = | ||
| 458 | power_pmu_bhrb_to(addr); | ||
| 459 | cpuhw->bhrb_entries[u_index].mispred = pred; | ||
| 460 | cpuhw->bhrb_entries[u_index].predicted = ~pred; | ||
| 461 | } | ||
| 462 | u_index++; | ||
| 463 | |||
| 464 | } | ||
| 465 | } | ||
| 466 | cpuhw->bhrb_stack.nr = u_index; | ||
| 467 | return; | ||
| 468 | } | ||
| 469 | |||
| 311 | #endif /* CONFIG_PPC64 */ | 470 | #endif /* CONFIG_PPC64 */ |
| 312 | 471 | ||
| 313 | static void perf_event_interrupt(struct pt_regs *regs); | 472 | static void perf_event_interrupt(struct pt_regs *regs); |
| @@ -904,47 +1063,6 @@ static int collect_events(struct perf_event *group, int max_count, | |||
| 904 | return n; | 1063 | return n; |
| 905 | } | 1064 | } |
| 906 | 1065 | ||
| 907 | /* Reset all possible BHRB entries */ | ||
| 908 | static void power_pmu_bhrb_reset(void) | ||
| 909 | { | ||
| 910 | asm volatile(PPC_CLRBHRB); | ||
| 911 | } | ||
| 912 | |||
| 913 | void power_pmu_bhrb_enable(struct perf_event *event) | ||
| 914 | { | ||
| 915 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||
| 916 | |||
| 917 | if (!ppmu->bhrb_nr) | ||
| 918 | return; | ||
| 919 | |||
| 920 | /* Clear BHRB if we changed task context to avoid data leaks */ | ||
| 921 | if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { | ||
| 922 | power_pmu_bhrb_reset(); | ||
| 923 | cpuhw->bhrb_context = event->ctx; | ||
| 924 | } | ||
| 925 | cpuhw->bhrb_users++; | ||
| 926 | } | ||
| 927 | |||
| 928 | void power_pmu_bhrb_disable(struct perf_event *event) | ||
| 929 | { | ||
| 930 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||
| 931 | |||
| 932 | if (!ppmu->bhrb_nr) | ||
| 933 | return; | ||
| 934 | |||
| 935 | cpuhw->bhrb_users--; | ||
| 936 | WARN_ON_ONCE(cpuhw->bhrb_users < 0); | ||
| 937 | |||
| 938 | if (!cpuhw->disabled && !cpuhw->bhrb_users) { | ||
| 939 | /* BHRB cannot be turned off when other | ||
| 940 | * events are active on the PMU. | ||
| 941 | */ | ||
| 942 | |||
| 943 | /* avoid stale pointer */ | ||
| 944 | cpuhw->bhrb_context = NULL; | ||
| 945 | } | ||
| 946 | } | ||
| 947 | |||
| 948 | /* | 1066 | /* |
| 949 | * Add a event to the PMU. | 1067 | * Add a event to the PMU. |
| 950 | * If all events are not already frozen, then we disable and | 1068 | * If all events are not already frozen, then we disable and |
| @@ -1180,15 +1298,6 @@ int power_pmu_commit_txn(struct pmu *pmu) | |||
| 1180 | return 0; | 1298 | return 0; |
| 1181 | } | 1299 | } |
| 1182 | 1300 | ||
| 1183 | /* Called from ctxsw to prevent one process's branch entries to | ||
| 1184 | * mingle with the other process's entries during context switch. | ||
| 1185 | */ | ||
| 1186 | void power_pmu_flush_branch_stack(void) | ||
| 1187 | { | ||
| 1188 | if (ppmu->bhrb_nr) | ||
| 1189 | power_pmu_bhrb_reset(); | ||
| 1190 | } | ||
| 1191 | |||
| 1192 | /* | 1301 | /* |
| 1193 | * Return 1 if we might be able to put event on a limited PMC, | 1302 | * Return 1 if we might be able to put event on a limited PMC, |
| 1194 | * or 0 if not. | 1303 | * or 0 if not. |
| @@ -1458,77 +1567,6 @@ struct pmu power_pmu = { | |||
| 1458 | .flush_branch_stack = power_pmu_flush_branch_stack, | 1567 | .flush_branch_stack = power_pmu_flush_branch_stack, |
| 1459 | }; | 1568 | }; |
| 1460 | 1569 | ||
| 1461 | /* Processing BHRB entries */ | ||
| 1462 | void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) | ||
| 1463 | { | ||
| 1464 | u64 val; | ||
| 1465 | u64 addr; | ||
| 1466 | int r_index, u_index, target, pred; | ||
| 1467 | |||
| 1468 | r_index = 0; | ||
| 1469 | u_index = 0; | ||
| 1470 | while (r_index < ppmu->bhrb_nr) { | ||
| 1471 | /* Assembly read function */ | ||
| 1472 | val = read_bhrb(r_index); | ||
| 1473 | |||
| 1474 | /* Terminal marker: End of valid BHRB entries */ | ||
| 1475 | if (val == 0) { | ||
| 1476 | break; | ||
| 1477 | } else { | ||
| 1478 | /* BHRB field break up */ | ||
| 1479 | addr = val & BHRB_EA; | ||
| 1480 | pred = val & BHRB_PREDICTION; | ||
| 1481 | target = val & BHRB_TARGET; | ||
| 1482 | |||
| 1483 | /* Probable Missed entry: Not applicable for POWER8 */ | ||
| 1484 | if ((addr == 0) && (target == 0) && (pred == 1)) { | ||
| 1485 | r_index++; | ||
| 1486 | continue; | ||
| 1487 | } | ||
| 1488 | |||
| 1489 | /* Real Missed entry: Power8 based missed entry */ | ||
| 1490 | if ((addr == 0) && (target == 1) && (pred == 1)) { | ||
| 1491 | r_index++; | ||
| 1492 | continue; | ||
| 1493 | } | ||
| 1494 | |||
| 1495 | /* Reserved condition: Not a valid entry */ | ||
| 1496 | if ((addr == 0) && (target == 1) && (pred == 0)) { | ||
| 1497 | r_index++; | ||
| 1498 | continue; | ||
| 1499 | } | ||
| 1500 | |||
| 1501 | /* Is a target address */ | ||
| 1502 | if (val & BHRB_TARGET) { | ||
| 1503 | /* First address cannot be a target address */ | ||
| 1504 | if (r_index == 0) { | ||
| 1505 | r_index++; | ||
| 1506 | continue; | ||
| 1507 | } | ||
| 1508 | |||
| 1509 | /* Update target address for the previous entry */ | ||
| 1510 | cpuhw->bhrb_entries[u_index - 1].to = addr; | ||
| 1511 | cpuhw->bhrb_entries[u_index - 1].mispred = pred; | ||
| 1512 | cpuhw->bhrb_entries[u_index - 1].predicted = ~pred; | ||
| 1513 | |||
| 1514 | /* Dont increment u_index */ | ||
| 1515 | r_index++; | ||
| 1516 | } else { | ||
| 1517 | /* Update address, flags for current entry */ | ||
| 1518 | cpuhw->bhrb_entries[u_index].from = addr; | ||
| 1519 | cpuhw->bhrb_entries[u_index].mispred = pred; | ||
| 1520 | cpuhw->bhrb_entries[u_index].predicted = ~pred; | ||
| 1521 | |||
| 1522 | /* Successfully popullated one entry */ | ||
| 1523 | u_index++; | ||
| 1524 | r_index++; | ||
| 1525 | } | ||
| 1526 | } | ||
| 1527 | } | ||
| 1528 | cpuhw->bhrb_stack.nr = u_index; | ||
| 1529 | return; | ||
| 1530 | } | ||
| 1531 | |||
| 1532 | /* | 1570 | /* |
| 1533 | * A counter has overflowed; update its count and record | 1571 | * A counter has overflowed; update its count and record |
| 1534 | * things if requested. Note that interrupts are hard-disabled | 1572 | * things if requested. Note that interrupts are hard-disabled |
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index a881232a3cce..b62aab3e22ec 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig | |||
| @@ -128,7 +128,7 @@ config PPC_RTAS_DAEMON | |||
| 128 | 128 | ||
| 129 | config RTAS_PROC | 129 | config RTAS_PROC |
| 130 | bool "Proc interface to RTAS" | 130 | bool "Proc interface to RTAS" |
| 131 | depends on PPC_RTAS | 131 | depends on PPC_RTAS && PROC_FS |
| 132 | default y | 132 | default y |
| 133 | 133 | ||
| 134 | config RTAS_FLASH | 134 | config RTAS_FLASH |
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index ade4463226c6..628c564ceadb 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/of.h> | 15 | #include <linux/of.h> |
| 16 | #include <linux/of_platform.h> | 16 | #include <linux/of_platform.h> |
| 17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
| 18 | #include <linux/slab.h> | ||
| 18 | #include <asm/opal.h> | 19 | #include <asm/opal.h> |
| 19 | #include <asm/firmware.h> | 20 | #include <asm/firmware.h> |
| 20 | 21 | ||
| @@ -28,6 +29,8 @@ struct opal { | |||
| 28 | static struct device_node *opal_node; | 29 | static struct device_node *opal_node; |
| 29 | static DEFINE_SPINLOCK(opal_write_lock); | 30 | static DEFINE_SPINLOCK(opal_write_lock); |
| 30 | extern u64 opal_mc_secondary_handler[]; | 31 | extern u64 opal_mc_secondary_handler[]; |
| 32 | static unsigned int *opal_irqs; | ||
| 33 | static unsigned int opal_irq_count; | ||
| 31 | 34 | ||
| 32 | int __init early_init_dt_scan_opal(unsigned long node, | 35 | int __init early_init_dt_scan_opal(unsigned long node, |
| 33 | const char *uname, int depth, void *data) | 36 | const char *uname, int depth, void *data) |
| @@ -53,7 +56,11 @@ int __init early_init_dt_scan_opal(unsigned long node, | |||
| 53 | opal.entry, entryp, entrysz); | 56 | opal.entry, entryp, entrysz); |
| 54 | 57 | ||
| 55 | powerpc_firmware_features |= FW_FEATURE_OPAL; | 58 | powerpc_firmware_features |= FW_FEATURE_OPAL; |
| 56 | if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { | 59 | if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) { |
| 60 | powerpc_firmware_features |= FW_FEATURE_OPALv2; | ||
| 61 | powerpc_firmware_features |= FW_FEATURE_OPALv3; | ||
| 62 | printk("OPAL V3 detected !\n"); | ||
| 63 | } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { | ||
| 57 | powerpc_firmware_features |= FW_FEATURE_OPALv2; | 64 | powerpc_firmware_features |= FW_FEATURE_OPALv2; |
| 58 | printk("OPAL V2 detected !\n"); | 65 | printk("OPAL V2 detected !\n"); |
| 59 | } else { | 66 | } else { |
| @@ -144,6 +151,13 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len) | |||
| 144 | rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) { | 151 | rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) { |
| 145 | len = total_len; | 152 | len = total_len; |
| 146 | rc = opal_console_write(vtermno, &len, data); | 153 | rc = opal_console_write(vtermno, &len, data); |
| 154 | |||
| 155 | /* Closed or other error drop */ | ||
| 156 | if (rc != OPAL_SUCCESS && rc != OPAL_BUSY && | ||
| 157 | rc != OPAL_BUSY_EVENT) { | ||
| 158 | written = total_len; | ||
| 159 | break; | ||
| 160 | } | ||
| 147 | if (rc == OPAL_SUCCESS) { | 161 | if (rc == OPAL_SUCCESS) { |
| 148 | total_len -= len; | 162 | total_len -= len; |
| 149 | data += len; | 163 | data += len; |
| @@ -316,6 +330,8 @@ static int __init opal_init(void) | |||
| 316 | irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); | 330 | irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); |
| 317 | pr_debug("opal: Found %d interrupts reserved for OPAL\n", | 331 | pr_debug("opal: Found %d interrupts reserved for OPAL\n", |
| 318 | irqs ? (irqlen / 4) : 0); | 332 | irqs ? (irqlen / 4) : 0); |
| 333 | opal_irq_count = irqlen / 4; | ||
| 334 | opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL); | ||
| 319 | for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) { | 335 | for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) { |
| 320 | unsigned int hwirq = be32_to_cpup(irqs); | 336 | unsigned int hwirq = be32_to_cpup(irqs); |
| 321 | unsigned int irq = irq_create_mapping(NULL, hwirq); | 337 | unsigned int irq = irq_create_mapping(NULL, hwirq); |
| @@ -327,7 +343,19 @@ static int __init opal_init(void) | |||
| 327 | if (rc) | 343 | if (rc) |
| 328 | pr_warning("opal: Error %d requesting irq %d" | 344 | pr_warning("opal: Error %d requesting irq %d" |
| 329 | " (0x%x)\n", rc, irq, hwirq); | 345 | " (0x%x)\n", rc, irq, hwirq); |
| 346 | opal_irqs[i] = irq; | ||
| 330 | } | 347 | } |
| 331 | return 0; | 348 | return 0; |
| 332 | } | 349 | } |
| 333 | subsys_initcall(opal_init); | 350 | subsys_initcall(opal_init); |
| 351 | |||
| 352 | void opal_shutdown(void) | ||
| 353 | { | ||
| 354 | unsigned int i; | ||
| 355 | |||
| 356 | for (i = 0; i < opal_irq_count; i++) { | ||
| 357 | if (opal_irqs[i]) | ||
| 358 | free_irq(opal_irqs[i], 0); | ||
| 359 | opal_irqs[i] = 0; | ||
| 360 | } | ||
| 361 | } | ||
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 1da578b7c1bf..3937aaae5bc4 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
| @@ -1048,6 +1048,12 @@ static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus, | |||
| 1048 | return phb->ioda.pe_rmap[(bus->number << 8) | devfn]; | 1048 | return phb->ioda.pe_rmap[(bus->number << 8) | devfn]; |
| 1049 | } | 1049 | } |
| 1050 | 1050 | ||
| 1051 | static void pnv_pci_ioda_shutdown(struct pnv_phb *phb) | ||
| 1052 | { | ||
| 1053 | opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET, | ||
| 1054 | OPAL_ASSERT_RESET); | ||
| 1055 | } | ||
| 1056 | |||
| 1051 | void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) | 1057 | void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) |
| 1052 | { | 1058 | { |
| 1053 | struct pci_controller *hose; | 1059 | struct pci_controller *hose; |
| @@ -1178,6 +1184,9 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) | |||
| 1178 | /* Setup TCEs */ | 1184 | /* Setup TCEs */ |
| 1179 | phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; | 1185 | phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; |
| 1180 | 1186 | ||
| 1187 | /* Setup shutdown function for kexec */ | ||
| 1188 | phb->shutdown = pnv_pci_ioda_shutdown; | ||
| 1189 | |||
| 1181 | /* Setup MSI support */ | 1190 | /* Setup MSI support */ |
| 1182 | pnv_pci_init_ioda_msis(phb); | 1191 | pnv_pci_init_ioda_msis(phb); |
| 1183 | 1192 | ||
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 55dfca844ddf..163bd7422f1c 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c | |||
| @@ -450,6 +450,18 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev) | |||
| 450 | pnv_pci_dma_fallback_setup(hose, pdev); | 450 | pnv_pci_dma_fallback_setup(hose, pdev); |
| 451 | } | 451 | } |
| 452 | 452 | ||
| 453 | void pnv_pci_shutdown(void) | ||
| 454 | { | ||
| 455 | struct pci_controller *hose; | ||
| 456 | |||
| 457 | list_for_each_entry(hose, &hose_list, list_node) { | ||
| 458 | struct pnv_phb *phb = hose->private_data; | ||
| 459 | |||
| 460 | if (phb && phb->shutdown) | ||
| 461 | phb->shutdown(phb); | ||
| 462 | } | ||
| 463 | } | ||
| 464 | |||
| 453 | /* Fixup wrong class code in p7ioc and p8 root complex */ | 465 | /* Fixup wrong class code in p7ioc and p8 root complex */ |
| 454 | static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) | 466 | static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) |
| 455 | { | 467 | { |
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 48dc4bb856a1..25d76c4df50b 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h | |||
| @@ -86,6 +86,7 @@ struct pnv_phb { | |||
| 86 | void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); | 86 | void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); |
| 87 | void (*fixup_phb)(struct pci_controller *hose); | 87 | void (*fixup_phb)(struct pci_controller *hose); |
| 88 | u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); | 88 | u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); |
| 89 | void (*shutdown)(struct pnv_phb *phb); | ||
| 89 | 90 | ||
| 90 | union { | 91 | union { |
| 91 | struct { | 92 | struct { |
| @@ -158,4 +159,5 @@ extern void pnv_pci_init_ioda_hub(struct device_node *np); | |||
| 158 | extern void pnv_pci_init_ioda2_phb(struct device_node *np); | 159 | extern void pnv_pci_init_ioda2_phb(struct device_node *np); |
| 159 | extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, | 160 | extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, |
| 160 | u64 *startp, u64 *endp); | 161 | u64 *startp, u64 *endp); |
| 162 | |||
| 161 | #endif /* __POWERNV_PCI_H */ | 163 | #endif /* __POWERNV_PCI_H */ |
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h index 8a9df7f9667e..a1c6f83fc391 100644 --- a/arch/powerpc/platforms/powernv/powernv.h +++ b/arch/powerpc/platforms/powernv/powernv.h | |||
| @@ -9,8 +9,10 @@ static inline void pnv_smp_init(void) { } | |||
| 9 | 9 | ||
| 10 | #ifdef CONFIG_PCI | 10 | #ifdef CONFIG_PCI |
| 11 | extern void pnv_pci_init(void); | 11 | extern void pnv_pci_init(void); |
| 12 | extern void pnv_pci_shutdown(void); | ||
| 12 | #else | 13 | #else |
| 13 | static inline void pnv_pci_init(void) { } | 14 | static inline void pnv_pci_init(void) { } |
| 15 | static inline void pnv_pci_shutdown(void) { } | ||
| 14 | #endif | 16 | #endif |
| 15 | 17 | ||
| 16 | #endif /* _POWERNV_H */ | 18 | #endif /* _POWERNV_H */ |
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index db1ad1c8f68f..d4459bfc92f7 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c | |||
| @@ -78,7 +78,9 @@ static void pnv_show_cpuinfo(struct seq_file *m) | |||
| 78 | if (root) | 78 | if (root) |
| 79 | model = of_get_property(root, "model", NULL); | 79 | model = of_get_property(root, "model", NULL); |
| 80 | seq_printf(m, "machine\t\t: PowerNV %s\n", model); | 80 | seq_printf(m, "machine\t\t: PowerNV %s\n", model); |
| 81 | if (firmware_has_feature(FW_FEATURE_OPALv2)) | 81 | if (firmware_has_feature(FW_FEATURE_OPALv3)) |
| 82 | seq_printf(m, "firmware\t: OPAL v3\n"); | ||
| 83 | else if (firmware_has_feature(FW_FEATURE_OPALv2)) | ||
| 82 | seq_printf(m, "firmware\t: OPAL v2\n"); | 84 | seq_printf(m, "firmware\t: OPAL v2\n"); |
| 83 | else if (firmware_has_feature(FW_FEATURE_OPAL)) | 85 | else if (firmware_has_feature(FW_FEATURE_OPAL)) |
| 84 | seq_printf(m, "firmware\t: OPAL v1\n"); | 86 | seq_printf(m, "firmware\t: OPAL v1\n"); |
| @@ -126,6 +128,17 @@ static void pnv_progress(char *s, unsigned short hex) | |||
| 126 | { | 128 | { |
| 127 | } | 129 | } |
| 128 | 130 | ||
| 131 | static void pnv_shutdown(void) | ||
| 132 | { | ||
| 133 | /* Let the PCI code clear up IODA tables */ | ||
| 134 | pnv_pci_shutdown(); | ||
| 135 | |||
| 136 | /* And unregister all OPAL interrupts so they don't fire | ||
| 137 | * up while we kexec | ||
| 138 | */ | ||
| 139 | opal_shutdown(); | ||
| 140 | } | ||
| 141 | |||
| 129 | #ifdef CONFIG_KEXEC | 142 | #ifdef CONFIG_KEXEC |
| 130 | static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) | 143 | static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) |
| 131 | { | 144 | { |
| @@ -187,6 +200,7 @@ define_machine(powernv) { | |||
| 187 | .init_IRQ = pnv_init_IRQ, | 200 | .init_IRQ = pnv_init_IRQ, |
| 188 | .show_cpuinfo = pnv_show_cpuinfo, | 201 | .show_cpuinfo = pnv_show_cpuinfo, |
| 189 | .progress = pnv_progress, | 202 | .progress = pnv_progress, |
| 203 | .machine_shutdown = pnv_shutdown, | ||
| 190 | .power_save = power7_idle, | 204 | .power_save = power7_idle, |
| 191 | .calibrate_decr = generic_calibrate_decr, | 205 | .calibrate_decr = generic_calibrate_decr, |
| 192 | #ifdef CONFIG_KEXEC | 206 | #ifdef CONFIG_KEXEC |
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 6a3ecca5b725..88c9459c3e07 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c | |||
| @@ -71,18 +71,68 @@ int pnv_smp_kick_cpu(int nr) | |||
| 71 | 71 | ||
| 72 | BUG_ON(nr < 0 || nr >= NR_CPUS); | 72 | BUG_ON(nr < 0 || nr >= NR_CPUS); |
| 73 | 73 | ||
| 74 | /* On OPAL v2 the CPU are still spinning inside OPAL itself, | 74 | /* |
| 75 | * get them back now | 75 | * If we already started or OPALv2 is not supported, we just |
| 76 | * kick the CPU via the PACA | ||
| 76 | */ | 77 | */ |
| 77 | if (!paca[nr].cpu_start && firmware_has_feature(FW_FEATURE_OPALv2)) { | 78 | if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2)) |
| 78 | pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu); | 79 | goto kick; |
| 79 | rc = opal_start_cpu(pcpu, start_here); | 80 | |
| 81 | /* | ||
| 82 | * At this point, the CPU can either be spinning on the way in | ||
| 83 | * from kexec or be inside OPAL waiting to be started for the | ||
| 84 | * first time. OPAL v3 allows us to query OPAL to know if it | ||
| 85 | * has the CPUs, so we do that | ||
| 86 | */ | ||
| 87 | if (firmware_has_feature(FW_FEATURE_OPALv3)) { | ||
| 88 | uint8_t status; | ||
| 89 | |||
| 90 | rc = opal_query_cpu_status(pcpu, &status); | ||
| 80 | if (rc != OPAL_SUCCESS) { | 91 | if (rc != OPAL_SUCCESS) { |
| 81 | pr_warn("OPAL Error %ld starting CPU %d\n", | 92 | pr_warn("OPAL Error %ld querying CPU %d state\n", |
| 82 | rc, nr); | 93 | rc, nr); |
| 83 | return -ENODEV; | 94 | return -ENODEV; |
| 84 | } | 95 | } |
| 96 | |||
| 97 | /* | ||
| 98 | * Already started, just kick it, probably coming from | ||
| 99 | * kexec and spinning | ||
| 100 | */ | ||
| 101 | if (status == OPAL_THREAD_STARTED) | ||
| 102 | goto kick; | ||
| 103 | |||
| 104 | /* | ||
| 105 | * Available/inactive, let's kick it | ||
| 106 | */ | ||
| 107 | if (status == OPAL_THREAD_INACTIVE) { | ||
| 108 | pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", | ||
| 109 | nr, pcpu); | ||
| 110 | rc = opal_start_cpu(pcpu, start_here); | ||
| 111 | if (rc != OPAL_SUCCESS) { | ||
| 112 | pr_warn("OPAL Error %ld starting CPU %d\n", | ||
| 113 | rc, nr); | ||
| 114 | return -ENODEV; | ||
| 115 | } | ||
| 116 | } else { | ||
| 117 | /* | ||
| 118 | * An unavailable CPU (or any other unknown status) | ||
| 119 | * shouldn't be started. It should also | ||
| 120 | * not be in the possible map but currently it can | ||
| 121 | * happen | ||
| 122 | */ | ||
| 123 | pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable" | ||
| 124 | " (status %d)...\n", nr, pcpu, status); | ||
| 125 | return -ENODEV; | ||
| 126 | } | ||
| 127 | } else { | ||
| 128 | /* | ||
| 129 | * On OPAL v2, we just kick it and hope for the best, | ||
| 130 | * we must not test the error from opal_start_cpu() or | ||
| 131 | * we would fail to get CPUs from kexec. | ||
| 132 | */ | ||
| 133 | opal_start_cpu(pcpu, start_here); | ||
| 85 | } | 134 | } |
| 135 | kick: | ||
| 86 | return smp_generic_kick_cpu(nr); | 136 | return smp_generic_kick_cpu(nr); |
| 87 | } | 137 | } |
| 88 | 138 | ||
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 9a0941bc4d31..023b288f895b 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig | |||
| @@ -18,6 +18,7 @@ config PPC_PSERIES | |||
| 18 | select PPC_PCI_CHOICE if EXPERT | 18 | select PPC_PCI_CHOICE if EXPERT |
| 19 | select ZLIB_DEFLATE | 19 | select ZLIB_DEFLATE |
| 20 | select PPC_DOORBELL | 20 | select PPC_DOORBELL |
| 21 | select HAVE_CONTEXT_TRACKING | ||
| 21 | default y | 22 | default y |
| 22 | 23 | ||
| 23 | config PPC_SPLPAR | 24 | config PPC_SPLPAR |
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c index 47226e04126d..5f997e79d570 100644 --- a/arch/powerpc/platforms/pseries/suspend.c +++ b/arch/powerpc/platforms/pseries/suspend.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #include <linux/cpu.h> | ||
| 19 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
| 20 | #include <linux/suspend.h> | 21 | #include <linux/suspend.h> |
| 21 | #include <linux/stat.h> | 22 | #include <linux/stat.h> |
| @@ -126,11 +127,15 @@ static ssize_t store_hibernate(struct device *dev, | |||
| 126 | struct device_attribute *attr, | 127 | struct device_attribute *attr, |
| 127 | const char *buf, size_t count) | 128 | const char *buf, size_t count) |
| 128 | { | 129 | { |
| 130 | cpumask_var_t offline_mask; | ||
| 129 | int rc; | 131 | int rc; |
| 130 | 132 | ||
| 131 | if (!capable(CAP_SYS_ADMIN)) | 133 | if (!capable(CAP_SYS_ADMIN)) |
| 132 | return -EPERM; | 134 | return -EPERM; |
| 133 | 135 | ||
| 136 | if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) | ||
| 137 | return -ENOMEM; | ||
| 138 | |||
| 134 | stream_id = simple_strtoul(buf, NULL, 16); | 139 | stream_id = simple_strtoul(buf, NULL, 16); |
| 135 | 140 | ||
| 136 | do { | 141 | do { |
| @@ -140,15 +145,32 @@ static ssize_t store_hibernate(struct device *dev, | |||
| 140 | } while (rc == -EAGAIN); | 145 | } while (rc == -EAGAIN); |
| 141 | 146 | ||
| 142 | if (!rc) { | 147 | if (!rc) { |
| 148 | /* All present CPUs must be online */ | ||
| 149 | cpumask_andnot(offline_mask, cpu_present_mask, | ||
| 150 | cpu_online_mask); | ||
| 151 | rc = rtas_online_cpus_mask(offline_mask); | ||
| 152 | if (rc) { | ||
| 153 | pr_err("%s: Could not bring present CPUs online.\n", | ||
| 154 | __func__); | ||
| 155 | goto out; | ||
| 156 | } | ||
| 157 | |||
| 143 | stop_topology_update(); | 158 | stop_topology_update(); |
| 144 | rc = pm_suspend(PM_SUSPEND_MEM); | 159 | rc = pm_suspend(PM_SUSPEND_MEM); |
| 145 | start_topology_update(); | 160 | start_topology_update(); |
| 161 | |||
| 162 | /* Take down CPUs not online prior to suspend */ | ||
| 163 | if (!rtas_offline_cpus_mask(offline_mask)) | ||
| 164 | pr_warn("%s: Could not restore CPUs to offline " | ||
| 165 | "state.\n", __func__); | ||
| 146 | } | 166 | } |
| 147 | 167 | ||
| 148 | stream_id = 0; | 168 | stream_id = 0; |
| 149 | 169 | ||
| 150 | if (!rc) | 170 | if (!rc) |
| 151 | rc = count; | 171 | rc = count; |
| 172 | out: | ||
| 173 | free_cpumask_var(offline_mask); | ||
| 152 | return rc; | 174 | return rc; |
| 153 | } | 175 | } |
| 154 | 176 | ||
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c index 97fe82ee8633..2d3b1dd9571d 100644 --- a/arch/powerpc/platforms/wsp/ics.c +++ b/arch/powerpc/platforms/wsp/ics.c | |||
| @@ -361,7 +361,7 @@ static int wsp_chip_set_affinity(struct irq_data *d, | |||
| 361 | xive = xive_set_server(xive, get_irq_server(ics, hw_irq)); | 361 | xive = xive_set_server(xive, get_irq_server(ics, hw_irq)); |
| 362 | wsp_ics_set_xive(ics, hw_irq, xive); | 362 | wsp_ics_set_xive(ics, hw_irq, xive); |
| 363 | 363 | ||
| 364 | return 0; | 364 | return IRQ_SET_MASK_OK; |
| 365 | } | 365 | } |
| 366 | 366 | ||
| 367 | static struct irq_chip wsp_irq_chip = { | 367 | static struct irq_chip wsp_irq_chip = { |
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index b0a518e97599..99464a7bdb3b 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile | |||
| @@ -64,6 +64,8 @@ endif | |||
| 64 | 64 | ||
| 65 | obj-$(CONFIG_PPC_SCOM) += scom.o | 65 | obj-$(CONFIG_PPC_SCOM) += scom.o |
| 66 | 66 | ||
| 67 | obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS) += udbg_memcons.o | ||
| 68 | |||
| 67 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | 69 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror |
| 68 | 70 | ||
| 69 | obj-$(CONFIG_PPC_XICS) += xics/ | 71 | obj-$(CONFIG_PPC_XICS) += xics/ |
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c index 6e0e1005227f..9cd0e60716fe 100644 --- a/arch/powerpc/sysdev/ehv_pic.c +++ b/arch/powerpc/sysdev/ehv_pic.c | |||
| @@ -81,7 +81,7 @@ int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest, | |||
| 81 | ev_int_set_config(src, config, prio, cpuid); | 81 | ev_int_set_config(src, config, prio, cpuid); |
| 82 | spin_unlock_irqrestore(&ehv_pic_lock, flags); | 82 | spin_unlock_irqrestore(&ehv_pic_lock, flags); |
| 83 | 83 | ||
| 84 | return 0; | 84 | return IRQ_SET_MASK_OK; |
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | static unsigned int ehv_pic_type_to_vecpri(unsigned int type) | 87 | static unsigned int ehv_pic_type_to_vecpri(unsigned int type) |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index ee21b5e71aec..0a13ecb270c7 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
| @@ -836,7 +836,7 @@ int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, | |||
| 836 | mpic_physmask(mask)); | 836 | mpic_physmask(mask)); |
| 837 | } | 837 | } |
| 838 | 838 | ||
| 839 | return 0; | 839 | return IRQ_SET_MASK_OK; |
| 840 | } | 840 | } |
| 841 | 841 | ||
| 842 | static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) | 842 | static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) |
diff --git a/arch/powerpc/sysdev/udbg_memcons.c b/arch/powerpc/sysdev/udbg_memcons.c new file mode 100644 index 000000000000..ce5a7b489e4b --- /dev/null +++ b/arch/powerpc/sysdev/udbg_memcons.c | |||
| @@ -0,0 +1,105 @@ | |||
| 1 | /* | ||
| 2 | * A udbg backend which logs messages and reads input from in memory | ||
| 3 | * buffers. | ||
| 4 | * | ||
| 5 | * The console output can be read from memcons_output which is a | ||
| 6 | * circular buffer whose next write position is stored in memcons.output_pos. | ||
| 7 | * | ||
| 8 | * Input may be passed by writing into the memcons_input buffer when it is | ||
| 9 | * empty. The input buffer is empty when both input_pos == input_start and | ||
| 10 | * *input_start == '\0'. | ||
| 11 | * | ||
| 12 | * Copyright (C) 2003-2005 Anton Blanchard and Milton Miller, IBM Corp | ||
| 13 | * Copyright (C) 2013 Alistair Popple, IBM Corp | ||
| 14 | * | ||
| 15 | * This program is free software; you can redistribute it and/or | ||
| 16 | * modify it under the terms of the GNU General Public License | ||
| 17 | * as published by the Free Software Foundation; either version | ||
| 18 | * 2 of the License, or (at your option) any later version. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/init.h> | ||
| 22 | #include <linux/kernel.h> | ||
| 23 | #include <asm/barrier.h> | ||
| 24 | #include <asm/page.h> | ||
| 25 | #include <asm/processor.h> | ||
| 26 | #include <asm/udbg.h> | ||
| 27 | |||
| 28 | struct memcons { | ||
| 29 | char *output_start; | ||
| 30 | char *output_pos; | ||
| 31 | char *output_end; | ||
| 32 | char *input_start; | ||
| 33 | char *input_pos; | ||
| 34 | char *input_end; | ||
| 35 | }; | ||
| 36 | |||
| 37 | static char memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE]; | ||
| 38 | static char memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE]; | ||
| 39 | |||
| 40 | struct memcons memcons = { | ||
| 41 | .output_start = memcons_output, | ||
| 42 | .output_pos = memcons_output, | ||
| 43 | .output_end = &memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE], | ||
| 44 | .input_start = memcons_input, | ||
| 45 | .input_pos = memcons_input, | ||
| 46 | .input_end = &memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE], | ||
| 47 | }; | ||
| 48 | |||
| 49 | void memcons_putc(char c) | ||
| 50 | { | ||
| 51 | char *new_output_pos; | ||
| 52 | |||
| 53 | *memcons.output_pos = c; | ||
| 54 | wmb(); | ||
| 55 | new_output_pos = memcons.output_pos + 1; | ||
| 56 | if (new_output_pos >= memcons.output_end) | ||
| 57 | new_output_pos = memcons.output_start; | ||
| 58 | |||
| 59 | memcons.output_pos = new_output_pos; | ||
| 60 | } | ||
| 61 | |||
| 62 | int memcons_getc_poll(void) | ||
| 63 | { | ||
| 64 | char c; | ||
| 65 | char *new_input_pos; | ||
| 66 | |||
| 67 | if (*memcons.input_pos) { | ||
| 68 | c = *memcons.input_pos; | ||
| 69 | |||
| 70 | new_input_pos = memcons.input_pos + 1; | ||
| 71 | if (new_input_pos >= memcons.input_end) | ||
| 72 | new_input_pos = memcons.input_start; | ||
| 73 | else if (*new_input_pos == '\0') | ||
| 74 | new_input_pos = memcons.input_start; | ||
| 75 | |||
| 76 | *memcons.input_pos = '\0'; | ||
| 77 | wmb(); | ||
| 78 | memcons.input_pos = new_input_pos; | ||
| 79 | return c; | ||
| 80 | } | ||
| 81 | |||
| 82 | return -1; | ||
| 83 | } | ||
| 84 | |||
| 85 | int memcons_getc(void) | ||
| 86 | { | ||
| 87 | int c; | ||
| 88 | |||
| 89 | while (1) { | ||
| 90 | c = memcons_getc_poll(); | ||
| 91 | if (c == -1) | ||
| 92 | cpu_relax(); | ||
| 93 | else | ||
| 94 | break; | ||
| 95 | } | ||
| 96 | |||
| 97 | return c; | ||
| 98 | } | ||
| 99 | |||
| 100 | void udbg_init_memcons(void) | ||
| 101 | { | ||
| 102 | udbg_putc = memcons_putc; | ||
| 103 | udbg_getc = memcons_getc; | ||
| 104 | udbg_getc_poll = memcons_getc_poll; | ||
| 105 | } | ||
diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c index f7e8609df0d5..39d72212655e 100644 --- a/arch/powerpc/sysdev/xics/ics-opal.c +++ b/arch/powerpc/sysdev/xics/ics-opal.c | |||
| @@ -148,7 +148,7 @@ static int ics_opal_set_affinity(struct irq_data *d, | |||
| 148 | __func__, d->irq, hw_irq, server, rc); | 148 | __func__, d->irq, hw_irq, server, rc); |
| 149 | return -1; | 149 | return -1; |
| 150 | } | 150 | } |
| 151 | return 0; | 151 | return IRQ_SET_MASK_OK; |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | static struct irq_chip ics_opal_irq_chip = { | 154 | static struct irq_chip ics_opal_irq_chip = { |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6a154a91c7e7..685692c94f05 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -108,7 +108,6 @@ config X86 | |||
| 108 | select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) | 108 | select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) |
| 109 | select GENERIC_TIME_VSYSCALL if X86_64 | 109 | select GENERIC_TIME_VSYSCALL if X86_64 |
| 110 | select KTIME_SCALAR if X86_32 | 110 | select KTIME_SCALAR if X86_32 |
| 111 | select ALWAYS_USE_PERSISTENT_CLOCK | ||
| 112 | select GENERIC_STRNCPY_FROM_USER | 111 | select GENERIC_STRNCPY_FROM_USER |
| 113 | select GENERIC_STRNLEN_USER | 112 | select GENERIC_STRNLEN_USER |
| 114 | select HAVE_CONTEXT_TRACKING if X86_64 | 113 | select HAVE_CONTEXT_TRACKING if X86_64 |
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index dab95a85f7f8..55b67614ed94 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
| @@ -34,7 +34,7 @@ | |||
| 34 | extern pgd_t early_level4_pgt[PTRS_PER_PGD]; | 34 | extern pgd_t early_level4_pgt[PTRS_PER_PGD]; |
| 35 | extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; | 35 | extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; |
| 36 | static unsigned int __initdata next_early_pgt = 2; | 36 | static unsigned int __initdata next_early_pgt = 2; |
| 37 | pmdval_t __initdata early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); | 37 | pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); |
| 38 | 38 | ||
| 39 | /* Wipe all early page tables except for the kernel symbol map */ | 39 | /* Wipe all early page tables except for the kernel symbol map */ |
| 40 | static void __init reset_early_page_tables(void) | 40 | static void __init reset_early_page_tables(void) |
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c index d893e8ed8ac9..2e9e12871c2b 100644 --- a/arch/x86/kernel/microcode_intel_early.c +++ b/arch/x86/kernel/microcode_intel_early.c | |||
| @@ -487,6 +487,7 @@ static inline void show_saved_mc(void) | |||
| 487 | #endif | 487 | #endif |
| 488 | 488 | ||
| 489 | #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) | 489 | #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) |
| 490 | static DEFINE_MUTEX(x86_cpu_microcode_mutex); | ||
| 490 | /* | 491 | /* |
| 491 | * Save this mc into mc_saved_data. So it will be loaded early when a CPU is | 492 | * Save this mc into mc_saved_data. So it will be loaded early when a CPU is |
| 492 | * hot added or resumes. | 493 | * hot added or resumes. |
| @@ -507,7 +508,7 @@ int save_mc_for_early(u8 *mc) | |||
| 507 | * Hold hotplug lock so mc_saved_data is not accessed by a CPU in | 508 | * Hold hotplug lock so mc_saved_data is not accessed by a CPU in |
| 508 | * hotplug. | 509 | * hotplug. |
| 509 | */ | 510 | */ |
| 510 | cpu_hotplug_driver_lock(); | 511 | mutex_lock(&x86_cpu_microcode_mutex); |
| 511 | 512 | ||
| 512 | mc_saved_count_init = mc_saved_data.mc_saved_count; | 513 | mc_saved_count_init = mc_saved_data.mc_saved_count; |
| 513 | mc_saved_count = mc_saved_data.mc_saved_count; | 514 | mc_saved_count = mc_saved_data.mc_saved_count; |
| @@ -544,7 +545,7 @@ int save_mc_for_early(u8 *mc) | |||
| 544 | } | 545 | } |
| 545 | 546 | ||
| 546 | out: | 547 | out: |
| 547 | cpu_hotplug_driver_unlock(); | 548 | mutex_unlock(&x86_cpu_microcode_mutex); |
| 548 | 549 | ||
| 549 | return ret; | 550 | return ret; |
| 550 | } | 551 | } |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 607af0d4d5ef..4e7a37ff03ab 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -312,6 +312,8 @@ void arch_cpu_idle(void) | |||
| 312 | { | 312 | { |
| 313 | if (cpuidle_idle_call()) | 313 | if (cpuidle_idle_call()) |
| 314 | x86_idle(); | 314 | x86_idle(); |
| 315 | else | ||
| 316 | local_irq_enable(); | ||
| 315 | } | 317 | } |
| 316 | 318 | ||
| 317 | /* | 319 | /* |
| @@ -368,9 +370,6 @@ void amd_e400_remove_cpu(int cpu) | |||
| 368 | */ | 370 | */ |
| 369 | static void amd_e400_idle(void) | 371 | static void amd_e400_idle(void) |
| 370 | { | 372 | { |
| 371 | if (need_resched()) | ||
| 372 | return; | ||
| 373 | |||
| 374 | if (!amd_e400_c1e_detected) { | 373 | if (!amd_e400_c1e_detected) { |
| 375 | u32 lo, hi; | 374 | u32 lo, hi; |
| 376 | 375 | ||
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index fdc5dca14fb3..eaac1743def7 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
| @@ -359,7 +359,17 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
| 359 | } | 359 | } |
| 360 | 360 | ||
| 361 | /* | 361 | /* |
| 362 | * would have hole in the middle or ends, and only ram parts will be mapped. | 362 | * We need to iterate through the E820 memory map and create direct mappings |
| 363 | * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply | ||
| 364 | * create direct mappings for all pfns from [0 to max_low_pfn) and | ||
| 365 | * [4GB to max_pfn) because of possible memory holes in high addresses | ||
| 366 | * that cannot be marked as UC by fixed/variable range MTRRs. | ||
| 367 | * Depending on the alignment of E820 ranges, this may possibly result | ||
| 368 | * in using smaller size (i.e. 4K instead of 2M or 1G) page tables. | ||
| 369 | * | ||
| 370 | * init_mem_mapping() calls init_range_memory_mapping() with big range. | ||
| 371 | * That range would have hole in the middle or ends, and only ram parts | ||
| 372 | * will be mapped in init_range_memory_mapping(). | ||
| 363 | */ | 373 | */ |
| 364 | static unsigned long __init init_range_memory_mapping( | 374 | static unsigned long __init init_range_memory_mapping( |
| 365 | unsigned long r_start, | 375 | unsigned long r_start, |
| @@ -419,6 +429,13 @@ void __init init_mem_mapping(void) | |||
| 419 | max_pfn_mapped = 0; /* will get exact value next */ | 429 | max_pfn_mapped = 0; /* will get exact value next */ |
| 420 | min_pfn_mapped = real_end >> PAGE_SHIFT; | 430 | min_pfn_mapped = real_end >> PAGE_SHIFT; |
| 421 | last_start = start = real_end; | 431 | last_start = start = real_end; |
| 432 | |||
| 433 | /* | ||
| 434 | * We start from the top (end of memory) and go to the bottom. | ||
| 435 | * The memblock_find_in_range() gets us a block of RAM from the | ||
| 436 | * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages | ||
| 437 | * for page table. | ||
| 438 | */ | ||
| 422 | while (last_start > ISA_END_ADDRESS) { | 439 | while (last_start > ISA_END_ADDRESS) { |
| 423 | if (last_start > step_size) { | 440 | if (last_start > step_size) { |
| 424 | start = round_down(last_start - 1, step_size); | 441 | start = round_down(last_start - 1, step_size); |
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 00d2efd674df..4f4e741d34b2 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c | |||
| @@ -28,6 +28,8 @@ | |||
| 28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
| 30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
| 31 | #include <linux/dmi.h> | ||
| 32 | #include <linux/delay.h> | ||
| 31 | #ifdef CONFIG_ACPI_PROCFS_POWER | 33 | #ifdef CONFIG_ACPI_PROCFS_POWER |
| 32 | #include <linux/proc_fs.h> | 34 | #include <linux/proc_fs.h> |
| 33 | #include <linux/seq_file.h> | 35 | #include <linux/seq_file.h> |
| @@ -74,6 +76,8 @@ static int acpi_ac_resume(struct device *dev); | |||
| 74 | #endif | 76 | #endif |
| 75 | static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); | 77 | static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); |
| 76 | 78 | ||
| 79 | static int ac_sleep_before_get_state_ms; | ||
| 80 | |||
| 77 | static struct acpi_driver acpi_ac_driver = { | 81 | static struct acpi_driver acpi_ac_driver = { |
| 78 | .name = "ac", | 82 | .name = "ac", |
| 79 | .class = ACPI_AC_CLASS, | 83 | .class = ACPI_AC_CLASS, |
| @@ -252,6 +256,16 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event) | |||
| 252 | case ACPI_AC_NOTIFY_STATUS: | 256 | case ACPI_AC_NOTIFY_STATUS: |
| 253 | case ACPI_NOTIFY_BUS_CHECK: | 257 | case ACPI_NOTIFY_BUS_CHECK: |
| 254 | case ACPI_NOTIFY_DEVICE_CHECK: | 258 | case ACPI_NOTIFY_DEVICE_CHECK: |
| 259 | /* | ||
| 260 | * A buggy BIOS may notify AC first and then sleep for | ||
| 261 | * a specific time before doing actual operations in the | ||
| 262 | * EC event handler (_Qxx). This will cause the AC state | ||
| 263 | * reported by the ACPI event to be incorrect, so wait for a | ||
| 264 | * specific time for the EC event handler to make progress. | ||
| 265 | */ | ||
| 266 | if (ac_sleep_before_get_state_ms > 0) | ||
| 267 | msleep(ac_sleep_before_get_state_ms); | ||
| 268 | |||
| 255 | acpi_ac_get_state(ac); | 269 | acpi_ac_get_state(ac); |
| 256 | acpi_bus_generate_proc_event(device, event, (u32) ac->state); | 270 | acpi_bus_generate_proc_event(device, event, (u32) ac->state); |
| 257 | acpi_bus_generate_netlink_event(device->pnp.device_class, | 271 | acpi_bus_generate_netlink_event(device->pnp.device_class, |
| @@ -264,6 +278,24 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event) | |||
| 264 | return; | 278 | return; |
| 265 | } | 279 | } |
| 266 | 280 | ||
| 281 | static int thinkpad_e530_quirk(const struct dmi_system_id *d) | ||
| 282 | { | ||
| 283 | ac_sleep_before_get_state_ms = 1000; | ||
| 284 | return 0; | ||
| 285 | } | ||
| 286 | |||
| 287 | static struct dmi_system_id ac_dmi_table[] = { | ||
| 288 | { | ||
| 289 | .callback = thinkpad_e530_quirk, | ||
| 290 | .ident = "thinkpad e530", | ||
| 291 | .matches = { | ||
| 292 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
| 293 | DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"), | ||
| 294 | }, | ||
| 295 | }, | ||
| 296 | {}, | ||
| 297 | }; | ||
| 298 | |||
| 267 | static int acpi_ac_add(struct acpi_device *device) | 299 | static int acpi_ac_add(struct acpi_device *device) |
| 268 | { | 300 | { |
| 269 | int result = 0; | 301 | int result = 0; |
| @@ -312,6 +344,7 @@ static int acpi_ac_add(struct acpi_device *device) | |||
| 312 | kfree(ac); | 344 | kfree(ac); |
| 313 | } | 345 | } |
| 314 | 346 | ||
| 347 | dmi_check_system(ac_dmi_table); | ||
| 315 | return result; | 348 | return result; |
| 316 | } | 349 | } |
| 317 | 350 | ||
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d45b2871d33b..edc00818c803 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -223,7 +223,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) | |||
| 223 | static int ec_poll(struct acpi_ec *ec) | 223 | static int ec_poll(struct acpi_ec *ec) |
| 224 | { | 224 | { |
| 225 | unsigned long flags; | 225 | unsigned long flags; |
| 226 | int repeat = 2; /* number of command restarts */ | 226 | int repeat = 5; /* number of command restarts */ |
| 227 | while (repeat--) { | 227 | while (repeat--) { |
| 228 | unsigned long delay = jiffies + | 228 | unsigned long delay = jiffies + |
| 229 | msecs_to_jiffies(ec_delay); | 229 | msecs_to_jiffies(ec_delay); |
| @@ -241,8 +241,6 @@ static int ec_poll(struct acpi_ec *ec) | |||
| 241 | } | 241 | } |
| 242 | advance_transaction(ec, acpi_ec_read_status(ec)); | 242 | advance_transaction(ec, acpi_ec_read_status(ec)); |
| 243 | } while (time_before(jiffies, delay)); | 243 | } while (time_before(jiffies, delay)); |
| 244 | if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) | ||
| 245 | break; | ||
| 246 | pr_debug(PREFIX "controller reset, restart transaction\n"); | 244 | pr_debug(PREFIX "controller reset, restart transaction\n"); |
| 247 | spin_lock_irqsave(&ec->lock, flags); | 245 | spin_lock_irqsave(&ec->lock, flags); |
| 248 | start_transaction(ec); | 246 | start_transaction(ec); |
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index bec717ffd25f..c266cdc11784 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c | |||
| @@ -95,9 +95,6 @@ static const struct acpi_device_id processor_device_ids[] = { | |||
| 95 | }; | 95 | }; |
| 96 | MODULE_DEVICE_TABLE(acpi, processor_device_ids); | 96 | MODULE_DEVICE_TABLE(acpi, processor_device_ids); |
| 97 | 97 | ||
| 98 | static SIMPLE_DEV_PM_OPS(acpi_processor_pm, | ||
| 99 | acpi_processor_suspend, acpi_processor_resume); | ||
| 100 | |||
| 101 | static struct acpi_driver acpi_processor_driver = { | 98 | static struct acpi_driver acpi_processor_driver = { |
| 102 | .name = "processor", | 99 | .name = "processor", |
| 103 | .class = ACPI_PROCESSOR_CLASS, | 100 | .class = ACPI_PROCESSOR_CLASS, |
| @@ -107,7 +104,6 @@ static struct acpi_driver acpi_processor_driver = { | |||
| 107 | .remove = acpi_processor_remove, | 104 | .remove = acpi_processor_remove, |
| 108 | .notify = acpi_processor_notify, | 105 | .notify = acpi_processor_notify, |
| 109 | }, | 106 | }, |
| 110 | .drv.pm = &acpi_processor_pm, | ||
| 111 | }; | 107 | }; |
| 112 | 108 | ||
| 113 | #define INSTALL_NOTIFY_HANDLER 1 | 109 | #define INSTALL_NOTIFY_HANDLER 1 |
| @@ -934,6 +930,8 @@ static int __init acpi_processor_init(void) | |||
| 934 | if (result < 0) | 930 | if (result < 0) |
| 935 | return result; | 931 | return result; |
| 936 | 932 | ||
| 933 | acpi_processor_syscore_init(); | ||
| 934 | |||
| 937 | acpi_processor_install_hotplug_notify(); | 935 | acpi_processor_install_hotplug_notify(); |
| 938 | 936 | ||
| 939 | acpi_thermal_cpufreq_init(); | 937 | acpi_thermal_cpufreq_init(); |
| @@ -956,6 +954,8 @@ static void __exit acpi_processor_exit(void) | |||
| 956 | 954 | ||
| 957 | acpi_processor_uninstall_hotplug_notify(); | 955 | acpi_processor_uninstall_hotplug_notify(); |
| 958 | 956 | ||
| 957 | acpi_processor_syscore_exit(); | ||
| 958 | |||
| 959 | acpi_bus_unregister_driver(&acpi_processor_driver); | 959 | acpi_bus_unregister_driver(&acpi_processor_driver); |
| 960 | 960 | ||
| 961 | return; | 961 | return; |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index f0df2c9434d2..eb133c77aadb 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include <linux/sched.h> /* need_resched() */ | 34 | #include <linux/sched.h> /* need_resched() */ |
| 35 | #include <linux/clockchips.h> | 35 | #include <linux/clockchips.h> |
| 36 | #include <linux/cpuidle.h> | 36 | #include <linux/cpuidle.h> |
| 37 | #include <linux/syscore_ops.h> | ||
| 37 | 38 | ||
| 38 | /* | 39 | /* |
| 39 | * Include the apic definitions for x86 to have the APIC timer related defines | 40 | * Include the apic definitions for x86 to have the APIC timer related defines |
| @@ -210,33 +211,41 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr, | |||
| 210 | 211 | ||
| 211 | #endif | 212 | #endif |
| 212 | 213 | ||
| 214 | #ifdef CONFIG_PM_SLEEP | ||
| 213 | static u32 saved_bm_rld; | 215 | static u32 saved_bm_rld; |
| 214 | 216 | ||
| 215 | static void acpi_idle_bm_rld_save(void) | 217 | int acpi_processor_suspend(void) |
| 216 | { | 218 | { |
| 217 | acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); | 219 | acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); |
| 220 | return 0; | ||
| 218 | } | 221 | } |
| 219 | static void acpi_idle_bm_rld_restore(void) | 222 | |
| 223 | void acpi_processor_resume(void) | ||
| 220 | { | 224 | { |
| 221 | u32 resumed_bm_rld; | 225 | u32 resumed_bm_rld; |
| 222 | 226 | ||
| 223 | acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); | 227 | acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); |
| 228 | if (resumed_bm_rld == saved_bm_rld) | ||
| 229 | return; | ||
| 224 | 230 | ||
| 225 | if (resumed_bm_rld != saved_bm_rld) | 231 | acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); |
| 226 | acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); | ||
| 227 | } | 232 | } |
| 228 | 233 | ||
| 229 | int acpi_processor_suspend(struct device *dev) | 234 | static struct syscore_ops acpi_processor_syscore_ops = { |
| 235 | .suspend = acpi_processor_suspend, | ||
| 236 | .resume = acpi_processor_resume, | ||
| 237 | }; | ||
| 238 | |||
| 239 | void acpi_processor_syscore_init(void) | ||
| 230 | { | 240 | { |
| 231 | acpi_idle_bm_rld_save(); | 241 | register_syscore_ops(&acpi_processor_syscore_ops); |
| 232 | return 0; | ||
| 233 | } | 242 | } |
| 234 | 243 | ||
| 235 | int acpi_processor_resume(struct device *dev) | 244 | void acpi_processor_syscore_exit(void) |
| 236 | { | 245 | { |
| 237 | acpi_idle_bm_rld_restore(); | 246 | unregister_syscore_ops(&acpi_processor_syscore_ops); |
| 238 | return 0; | ||
| 239 | } | 247 | } |
| 248 | #endif /* CONFIG_PM_SLEEP */ | ||
| 240 | 249 | ||
| 241 | #if defined(CONFIG_X86) | 250 | #if defined(CONFIG_X86) |
| 242 | static void tsc_check_state(int state) | 251 | static void tsc_check_state(int state) |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index fe158fd4f1df..c1bc608339a6 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
| @@ -1785,7 +1785,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type) | |||
| 1785 | acpi_set_pnp_ids(handle, &pnp, type); | 1785 | acpi_set_pnp_ids(handle, &pnp, type); |
| 1786 | 1786 | ||
| 1787 | if (!pnp.type.hardware_id) | 1787 | if (!pnp.type.hardware_id) |
| 1788 | return; | 1788 | goto out; |
| 1789 | 1789 | ||
| 1790 | /* | 1790 | /* |
| 1791 | * This relies on the fact that acpi_install_notify_handler() will not | 1791 | * This relies on the fact that acpi_install_notify_handler() will not |
| @@ -1800,6 +1800,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type) | |||
| 1800 | } | 1800 | } |
| 1801 | } | 1801 | } |
| 1802 | 1802 | ||
| 1803 | out: | ||
| 1803 | acpi_free_pnp_ids(&pnp); | 1804 | acpi_free_pnp_ids(&pnp); |
| 1804 | } | 1805 | } |
| 1805 | 1806 | ||
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index c3932d0876e0..5b32e15a65ce 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
| @@ -456,6 +456,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
| 456 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"), | 456 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"), |
| 457 | }, | 457 | }, |
| 458 | }, | 458 | }, |
| 459 | { | ||
| 460 | .callback = video_ignore_initial_backlight, | ||
| 461 | .ident = "HP 1000 Notebook PC", | ||
| 462 | .matches = { | ||
| 463 | DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), | ||
| 464 | DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"), | ||
| 465 | }, | ||
| 466 | }, | ||
| 459 | {} | 467 | {} |
| 460 | }; | 468 | }; |
| 461 | 469 | ||
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c index c1bfaf43d109..980b88e109fc 100644 --- a/drivers/ata/pata_ep93xx.c +++ b/drivers/ata/pata_ep93xx.c | |||
| @@ -933,11 +933,6 @@ static int ep93xx_pata_probe(struct platform_device *pdev) | |||
| 933 | } | 933 | } |
| 934 | 934 | ||
| 935 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 935 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 936 | if (!mem_res) { | ||
| 937 | err = -ENXIO; | ||
| 938 | goto err_rel_gpio; | ||
| 939 | } | ||
| 940 | |||
| 941 | ide_base = devm_ioremap_resource(&pdev->dev, mem_res); | 936 | ide_base = devm_ioremap_resource(&pdev->dev, mem_res); |
| 942 | if (IS_ERR(ide_base)) { | 937 | if (IS_ERR(ide_base)) { |
| 943 | err = PTR_ERR(ide_base); | 938 | err = PTR_ERR(ide_base); |
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index 39c32529b833..5da914041305 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c | |||
| @@ -61,24 +61,24 @@ EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); | |||
| 61 | int dev_pm_put_subsys_data(struct device *dev) | 61 | int dev_pm_put_subsys_data(struct device *dev) |
| 62 | { | 62 | { |
| 63 | struct pm_subsys_data *psd; | 63 | struct pm_subsys_data *psd; |
| 64 | int ret = 0; | 64 | int ret = 1; |
| 65 | 65 | ||
| 66 | spin_lock_irq(&dev->power.lock); | 66 | spin_lock_irq(&dev->power.lock); |
| 67 | 67 | ||
| 68 | psd = dev_to_psd(dev); | 68 | psd = dev_to_psd(dev); |
| 69 | if (!psd) { | 69 | if (!psd) |
| 70 | ret = -EINVAL; | ||
| 71 | goto out; | 70 | goto out; |
| 72 | } | ||
| 73 | 71 | ||
| 74 | if (--psd->refcount == 0) { | 72 | if (--psd->refcount == 0) { |
| 75 | dev->power.subsys_data = NULL; | 73 | dev->power.subsys_data = NULL; |
| 76 | kfree(psd); | 74 | } else { |
| 77 | ret = 1; | 75 | psd = NULL; |
| 76 | ret = 0; | ||
| 78 | } | 77 | } |
| 79 | 78 | ||
| 80 | out: | 79 | out: |
| 81 | spin_unlock_irq(&dev->power.lock); | 80 | spin_unlock_irq(&dev->power.lock); |
| 81 | kfree(psd); | ||
| 82 | 82 | ||
| 83 | return ret; | 83 | return ret; |
| 84 | } | 84 | } |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index ca63104136e0..d6d314027b5d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
| @@ -55,6 +55,39 @@ | |||
| 55 | #define SECTOR_SHIFT 9 | 55 | #define SECTOR_SHIFT 9 |
| 56 | #define SECTOR_SIZE (1ULL << SECTOR_SHIFT) | 56 | #define SECTOR_SIZE (1ULL << SECTOR_SHIFT) |
| 57 | 57 | ||
| 58 | /* | ||
| 59 | * Increment the given counter and return its updated value. | ||
| 60 | * If the counter is already 0 it will not be incremented. | ||
| 61 | * If the counter is already at its maximum value returns | ||
| 62 | * -EINVAL without updating it. | ||
| 63 | */ | ||
| 64 | static int atomic_inc_return_safe(atomic_t *v) | ||
| 65 | { | ||
| 66 | unsigned int counter; | ||
| 67 | |||
| 68 | counter = (unsigned int)__atomic_add_unless(v, 1, 0); | ||
| 69 | if (counter <= (unsigned int)INT_MAX) | ||
| 70 | return (int)counter; | ||
| 71 | |||
| 72 | atomic_dec(v); | ||
| 73 | |||
| 74 | return -EINVAL; | ||
| 75 | } | ||
| 76 | |||
| 77 | /* Decrement the counter. Return the resulting value, or -EINVAL */ | ||
| 78 | static int atomic_dec_return_safe(atomic_t *v) | ||
| 79 | { | ||
| 80 | int counter; | ||
| 81 | |||
| 82 | counter = atomic_dec_return(v); | ||
| 83 | if (counter >= 0) | ||
| 84 | return counter; | ||
| 85 | |||
| 86 | atomic_inc(v); | ||
| 87 | |||
| 88 | return -EINVAL; | ||
| 89 | } | ||
| 90 | |||
| 58 | #define RBD_DRV_NAME "rbd" | 91 | #define RBD_DRV_NAME "rbd" |
| 59 | #define RBD_DRV_NAME_LONG "rbd (rados block device)" | 92 | #define RBD_DRV_NAME_LONG "rbd (rados block device)" |
| 60 | 93 | ||
| @@ -100,21 +133,20 @@ | |||
| 100 | * block device image metadata (in-memory version) | 133 | * block device image metadata (in-memory version) |
| 101 | */ | 134 | */ |
| 102 | struct rbd_image_header { | 135 | struct rbd_image_header { |
| 103 | /* These four fields never change for a given rbd image */ | 136 | /* These six fields never change for a given rbd image */ |
| 104 | char *object_prefix; | 137 | char *object_prefix; |
| 105 | u64 features; | ||
| 106 | __u8 obj_order; | 138 | __u8 obj_order; |
| 107 | __u8 crypt_type; | 139 | __u8 crypt_type; |
| 108 | __u8 comp_type; | 140 | __u8 comp_type; |
| 141 | u64 stripe_unit; | ||
| 142 | u64 stripe_count; | ||
| 143 | u64 features; /* Might be changeable someday? */ | ||
| 109 | 144 | ||
| 110 | /* The remaining fields need to be updated occasionally */ | 145 | /* The remaining fields need to be updated occasionally */ |
| 111 | u64 image_size; | 146 | u64 image_size; |
| 112 | struct ceph_snap_context *snapc; | 147 | struct ceph_snap_context *snapc; |
| 113 | char *snap_names; | 148 | char *snap_names; /* format 1 only */ |
| 114 | u64 *snap_sizes; | 149 | u64 *snap_sizes; /* format 1 only */ |
| 115 | |||
| 116 | u64 stripe_unit; | ||
| 117 | u64 stripe_count; | ||
| 118 | }; | 150 | }; |
| 119 | 151 | ||
| 120 | /* | 152 | /* |
| @@ -225,6 +257,7 @@ struct rbd_obj_request { | |||
| 225 | }; | 257 | }; |
| 226 | }; | 258 | }; |
| 227 | struct page **copyup_pages; | 259 | struct page **copyup_pages; |
| 260 | u32 copyup_page_count; | ||
| 228 | 261 | ||
| 229 | struct ceph_osd_request *osd_req; | 262 | struct ceph_osd_request *osd_req; |
| 230 | 263 | ||
| @@ -257,6 +290,7 @@ struct rbd_img_request { | |||
| 257 | struct rbd_obj_request *obj_request; /* obj req initiator */ | 290 | struct rbd_obj_request *obj_request; /* obj req initiator */ |
| 258 | }; | 291 | }; |
| 259 | struct page **copyup_pages; | 292 | struct page **copyup_pages; |
| 293 | u32 copyup_page_count; | ||
| 260 | spinlock_t completion_lock;/* protects next_completion */ | 294 | spinlock_t completion_lock;/* protects next_completion */ |
| 261 | u32 next_completion; | 295 | u32 next_completion; |
| 262 | rbd_img_callback_t callback; | 296 | rbd_img_callback_t callback; |
| @@ -311,6 +345,7 @@ struct rbd_device { | |||
| 311 | 345 | ||
| 312 | struct rbd_spec *parent_spec; | 346 | struct rbd_spec *parent_spec; |
| 313 | u64 parent_overlap; | 347 | u64 parent_overlap; |
| 348 | atomic_t parent_ref; | ||
| 314 | struct rbd_device *parent; | 349 | struct rbd_device *parent; |
| 315 | 350 | ||
| 316 | /* protects updating the header */ | 351 | /* protects updating the header */ |
| @@ -359,7 +394,8 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf, | |||
| 359 | size_t count); | 394 | size_t count); |
| 360 | static ssize_t rbd_remove(struct bus_type *bus, const char *buf, | 395 | static ssize_t rbd_remove(struct bus_type *bus, const char *buf, |
| 361 | size_t count); | 396 | size_t count); |
| 362 | static int rbd_dev_image_probe(struct rbd_device *rbd_dev); | 397 | static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping); |
| 398 | static void rbd_spec_put(struct rbd_spec *spec); | ||
| 363 | 399 | ||
| 364 | static struct bus_attribute rbd_bus_attrs[] = { | 400 | static struct bus_attribute rbd_bus_attrs[] = { |
| 365 | __ATTR(add, S_IWUSR, NULL, rbd_add), | 401 | __ATTR(add, S_IWUSR, NULL, rbd_add), |
| @@ -426,7 +462,8 @@ static void rbd_img_parent_read(struct rbd_obj_request *obj_request); | |||
| 426 | static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); | 462 | static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); |
| 427 | 463 | ||
| 428 | static int rbd_dev_refresh(struct rbd_device *rbd_dev); | 464 | static int rbd_dev_refresh(struct rbd_device *rbd_dev); |
| 429 | static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev); | 465 | static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev); |
| 466 | static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev); | ||
| 430 | static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, | 467 | static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, |
| 431 | u64 snap_id); | 468 | u64 snap_id); |
| 432 | static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, | 469 | static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, |
| @@ -726,88 +763,123 @@ static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk) | |||
| 726 | } | 763 | } |
| 727 | 764 | ||
| 728 | /* | 765 | /* |
| 729 | * Create a new header structure, translate header format from the on-disk | 766 | * Fill an rbd image header with information from the given format 1 |
| 730 | * header. | 767 | * on-disk header. |
| 731 | */ | 768 | */ |
| 732 | static int rbd_header_from_disk(struct rbd_image_header *header, | 769 | static int rbd_header_from_disk(struct rbd_device *rbd_dev, |
| 733 | struct rbd_image_header_ondisk *ondisk) | 770 | struct rbd_image_header_ondisk *ondisk) |
| 734 | { | 771 | { |
| 772 | struct rbd_image_header *header = &rbd_dev->header; | ||
| 773 | bool first_time = header->object_prefix == NULL; | ||
| 774 | struct ceph_snap_context *snapc; | ||
| 775 | char *object_prefix = NULL; | ||
| 776 | char *snap_names = NULL; | ||
| 777 | u64 *snap_sizes = NULL; | ||
| 735 | u32 snap_count; | 778 | u32 snap_count; |
| 736 | size_t len; | ||
| 737 | size_t size; | 779 | size_t size; |
| 780 | int ret = -ENOMEM; | ||
| 738 | u32 i; | 781 | u32 i; |
| 739 | 782 | ||
| 740 | memset(header, 0, sizeof (*header)); | 783 | /* Allocate this now to avoid having to handle failure below */ |
| 741 | 784 | ||
| 742 | snap_count = le32_to_cpu(ondisk->snap_count); | 785 | if (first_time) { |
| 786 | size_t len; | ||
| 743 | 787 | ||
| 744 | len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix)); | 788 | len = strnlen(ondisk->object_prefix, |
| 745 | header->object_prefix = kmalloc(len + 1, GFP_KERNEL); | 789 | sizeof (ondisk->object_prefix)); |
| 746 | if (!header->object_prefix) | 790 | object_prefix = kmalloc(len + 1, GFP_KERNEL); |
| 747 | return -ENOMEM; | 791 | if (!object_prefix) |
| 748 | memcpy(header->object_prefix, ondisk->object_prefix, len); | 792 | return -ENOMEM; |
| 749 | header->object_prefix[len] = '\0'; | 793 | memcpy(object_prefix, ondisk->object_prefix, len); |
| 794 | object_prefix[len] = '\0'; | ||
| 795 | } | ||
| 750 | 796 | ||
| 797 | /* Allocate the snapshot context and fill it in */ | ||
| 798 | |||
| 799 | snap_count = le32_to_cpu(ondisk->snap_count); | ||
| 800 | snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); | ||
| 801 | if (!snapc) | ||
| 802 | goto out_err; | ||
| 803 | snapc->seq = le64_to_cpu(ondisk->snap_seq); | ||
| 751 | if (snap_count) { | 804 | if (snap_count) { |
| 805 | struct rbd_image_snap_ondisk *snaps; | ||
| 752 | u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len); | 806 | u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len); |
| 753 | 807 | ||
| 754 | /* Save a copy of the snapshot names */ | 808 | /* We'll keep a copy of the snapshot names... */ |
| 755 | 809 | ||
| 756 | if (snap_names_len > (u64) SIZE_MAX) | 810 | if (snap_names_len > (u64)SIZE_MAX) |
| 757 | return -EIO; | 811 | goto out_2big; |
| 758 | header->snap_names = kmalloc(snap_names_len, GFP_KERNEL); | 812 | snap_names = kmalloc(snap_names_len, GFP_KERNEL); |
| 759 | if (!header->snap_names) | 813 | if (!snap_names) |
| 760 | goto out_err; | 814 | goto out_err; |
| 815 | |||
| 816 | /* ...as well as the array of their sizes. */ | ||
| 817 | |||
| 818 | size = snap_count * sizeof (*header->snap_sizes); | ||
| 819 | snap_sizes = kmalloc(size, GFP_KERNEL); | ||
| 820 | if (!snap_sizes) | ||
| 821 | goto out_err; | ||
| 822 | |||
| 761 | /* | 823 | /* |
| 762 | * Note that rbd_dev_v1_header_read() guarantees | 824 | * Copy the names, and fill in each snapshot's id |
| 763 | * the ondisk buffer we're working with has | 825 | * and size. |
| 826 | * | ||
| 827 | * Note that rbd_dev_v1_header_info() guarantees the | ||
| 828 | * ondisk buffer we're working with has | ||
| 764 | * snap_names_len bytes beyond the end of the | 829 | * snap_names_len bytes beyond the end of the |
| 765 | * snapshot id array, this memcpy() is safe. | 830 | * snapshot id array, this memcpy() is safe. |
| 766 | */ | 831 | */ |
| 767 | memcpy(header->snap_names, &ondisk->snaps[snap_count], | 832 | memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len); |
| 768 | snap_names_len); | 833 | snaps = ondisk->snaps; |
| 834 | for (i = 0; i < snap_count; i++) { | ||
| 835 | snapc->snaps[i] = le64_to_cpu(snaps[i].id); | ||
| 836 | snap_sizes[i] = le64_to_cpu(snaps[i].image_size); | ||
| 837 | } | ||
| 838 | } | ||
| 769 | 839 | ||
| 770 | /* Record each snapshot's size */ | 840 | /* We won't fail any more, fill in the header */ |
| 771 | 841 | ||
| 772 | size = snap_count * sizeof (*header->snap_sizes); | 842 | down_write(&rbd_dev->header_rwsem); |
| 773 | header->snap_sizes = kmalloc(size, GFP_KERNEL); | 843 | if (first_time) { |
| 774 | if (!header->snap_sizes) | 844 | header->object_prefix = object_prefix; |
| 775 | goto out_err; | 845 | header->obj_order = ondisk->options.order; |
| 776 | for (i = 0; i < snap_count; i++) | 846 | header->crypt_type = ondisk->options.crypt_type; |
| 777 | header->snap_sizes[i] = | 847 | header->comp_type = ondisk->options.comp_type; |
| 778 | le64_to_cpu(ondisk->snaps[i].image_size); | 848 | /* The rest aren't used for format 1 images */ |
| 849 | header->stripe_unit = 0; | ||
| 850 | header->stripe_count = 0; | ||
| 851 | header->features = 0; | ||
| 779 | } else { | 852 | } else { |
| 780 | header->snap_names = NULL; | 853 | ceph_put_snap_context(header->snapc); |
| 781 | header->snap_sizes = NULL; | 854 | kfree(header->snap_names); |
| 855 | kfree(header->snap_sizes); | ||
| 782 | } | 856 | } |
| 783 | 857 | ||
| 784 | header->features = 0; /* No features support in v1 images */ | 858 | /* The remaining fields always get updated (when we refresh) */ |
| 785 | header->obj_order = ondisk->options.order; | ||
| 786 | header->crypt_type = ondisk->options.crypt_type; | ||
| 787 | header->comp_type = ondisk->options.comp_type; | ||
| 788 | |||
| 789 | /* Allocate and fill in the snapshot context */ | ||
| 790 | 859 | ||
| 791 | header->image_size = le64_to_cpu(ondisk->image_size); | 860 | header->image_size = le64_to_cpu(ondisk->image_size); |
| 861 | header->snapc = snapc; | ||
| 862 | header->snap_names = snap_names; | ||
| 863 | header->snap_sizes = snap_sizes; | ||
| 792 | 864 | ||
| 793 | header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); | 865 | /* Make sure mapping size is consistent with header info */ |
| 794 | if (!header->snapc) | ||
| 795 | goto out_err; | ||
| 796 | header->snapc->seq = le64_to_cpu(ondisk->snap_seq); | ||
| 797 | for (i = 0; i < snap_count; i++) | ||
| 798 | header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id); | ||
| 799 | 866 | ||
| 800 | return 0; | 867 | if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time) |
| 868 | if (rbd_dev->mapping.size != header->image_size) | ||
| 869 | rbd_dev->mapping.size = header->image_size; | ||
| 870 | |||
| 871 | up_write(&rbd_dev->header_rwsem); | ||
| 801 | 872 | ||
| 873 | return 0; | ||
| 874 | out_2big: | ||
| 875 | ret = -EIO; | ||
| 802 | out_err: | 876 | out_err: |
| 803 | kfree(header->snap_sizes); | 877 | kfree(snap_sizes); |
| 804 | header->snap_sizes = NULL; | 878 | kfree(snap_names); |
| 805 | kfree(header->snap_names); | 879 | ceph_put_snap_context(snapc); |
| 806 | header->snap_names = NULL; | 880 | kfree(object_prefix); |
| 807 | kfree(header->object_prefix); | ||
| 808 | header->object_prefix = NULL; | ||
| 809 | 881 | ||
| 810 | return -ENOMEM; | 882 | return ret; |
| 811 | } | 883 | } |
| 812 | 884 | ||
| 813 | static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which) | 885 | static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which) |
| @@ -934,20 +1006,11 @@ static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id, | |||
| 934 | 1006 | ||
| 935 | static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) | 1007 | static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) |
| 936 | { | 1008 | { |
| 937 | const char *snap_name = rbd_dev->spec->snap_name; | 1009 | u64 snap_id = rbd_dev->spec->snap_id; |
| 938 | u64 snap_id; | ||
| 939 | u64 size = 0; | 1010 | u64 size = 0; |
| 940 | u64 features = 0; | 1011 | u64 features = 0; |
| 941 | int ret; | 1012 | int ret; |
| 942 | 1013 | ||
| 943 | if (strcmp(snap_name, RBD_SNAP_HEAD_NAME)) { | ||
| 944 | snap_id = rbd_snap_id_by_name(rbd_dev, snap_name); | ||
| 945 | if (snap_id == CEPH_NOSNAP) | ||
| 946 | return -ENOENT; | ||
| 947 | } else { | ||
| 948 | snap_id = CEPH_NOSNAP; | ||
| 949 | } | ||
| 950 | |||
| 951 | ret = rbd_snap_size(rbd_dev, snap_id, &size); | 1014 | ret = rbd_snap_size(rbd_dev, snap_id, &size); |
| 952 | if (ret) | 1015 | if (ret) |
| 953 | return ret; | 1016 | return ret; |
| @@ -958,11 +1021,6 @@ static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) | |||
| 958 | rbd_dev->mapping.size = size; | 1021 | rbd_dev->mapping.size = size; |
| 959 | rbd_dev->mapping.features = features; | 1022 | rbd_dev->mapping.features = features; |
| 960 | 1023 | ||
| 961 | /* If we are mapping a snapshot it must be marked read-only */ | ||
| 962 | |||
| 963 | if (snap_id != CEPH_NOSNAP) | ||
| 964 | rbd_dev->mapping.read_only = true; | ||
| 965 | |||
| 966 | return 0; | 1024 | return 0; |
| 967 | } | 1025 | } |
| 968 | 1026 | ||
| @@ -970,14 +1028,6 @@ static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev) | |||
| 970 | { | 1028 | { |
| 971 | rbd_dev->mapping.size = 0; | 1029 | rbd_dev->mapping.size = 0; |
| 972 | rbd_dev->mapping.features = 0; | 1030 | rbd_dev->mapping.features = 0; |
| 973 | rbd_dev->mapping.read_only = true; | ||
| 974 | } | ||
| 975 | |||
| 976 | static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev) | ||
| 977 | { | ||
| 978 | rbd_dev->mapping.size = 0; | ||
| 979 | rbd_dev->mapping.features = 0; | ||
| 980 | rbd_dev->mapping.read_only = true; | ||
| 981 | } | 1031 | } |
| 982 | 1032 | ||
| 983 | static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) | 1033 | static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) |
| @@ -1342,20 +1392,18 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request) | |||
| 1342 | kref_put(&obj_request->kref, rbd_obj_request_destroy); | 1392 | kref_put(&obj_request->kref, rbd_obj_request_destroy); |
| 1343 | } | 1393 | } |
| 1344 | 1394 | ||
| 1345 | static void rbd_img_request_get(struct rbd_img_request *img_request) | 1395 | static bool img_request_child_test(struct rbd_img_request *img_request); |
| 1346 | { | 1396 | static void rbd_parent_request_destroy(struct kref *kref); |
| 1347 | dout("%s: img %p (was %d)\n", __func__, img_request, | ||
| 1348 | atomic_read(&img_request->kref.refcount)); | ||
| 1349 | kref_get(&img_request->kref); | ||
| 1350 | } | ||
| 1351 | |||
| 1352 | static void rbd_img_request_destroy(struct kref *kref); | 1397 | static void rbd_img_request_destroy(struct kref *kref); |
| 1353 | static void rbd_img_request_put(struct rbd_img_request *img_request) | 1398 | static void rbd_img_request_put(struct rbd_img_request *img_request) |
| 1354 | { | 1399 | { |
| 1355 | rbd_assert(img_request != NULL); | 1400 | rbd_assert(img_request != NULL); |
| 1356 | dout("%s: img %p (was %d)\n", __func__, img_request, | 1401 | dout("%s: img %p (was %d)\n", __func__, img_request, |
| 1357 | atomic_read(&img_request->kref.refcount)); | 1402 | atomic_read(&img_request->kref.refcount)); |
| 1358 | kref_put(&img_request->kref, rbd_img_request_destroy); | 1403 | if (img_request_child_test(img_request)) |
| 1404 | kref_put(&img_request->kref, rbd_parent_request_destroy); | ||
| 1405 | else | ||
| 1406 | kref_put(&img_request->kref, rbd_img_request_destroy); | ||
| 1359 | } | 1407 | } |
| 1360 | 1408 | ||
| 1361 | static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request, | 1409 | static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request, |
| @@ -1472,6 +1520,12 @@ static void img_request_child_set(struct rbd_img_request *img_request) | |||
| 1472 | smp_mb(); | 1520 | smp_mb(); |
| 1473 | } | 1521 | } |
| 1474 | 1522 | ||
| 1523 | static void img_request_child_clear(struct rbd_img_request *img_request) | ||
| 1524 | { | ||
| 1525 | clear_bit(IMG_REQ_CHILD, &img_request->flags); | ||
| 1526 | smp_mb(); | ||
| 1527 | } | ||
| 1528 | |||
| 1475 | static bool img_request_child_test(struct rbd_img_request *img_request) | 1529 | static bool img_request_child_test(struct rbd_img_request *img_request) |
| 1476 | { | 1530 | { |
| 1477 | smp_mb(); | 1531 | smp_mb(); |
| @@ -1484,6 +1538,12 @@ static void img_request_layered_set(struct rbd_img_request *img_request) | |||
| 1484 | smp_mb(); | 1538 | smp_mb(); |
| 1485 | } | 1539 | } |
| 1486 | 1540 | ||
| 1541 | static void img_request_layered_clear(struct rbd_img_request *img_request) | ||
| 1542 | { | ||
| 1543 | clear_bit(IMG_REQ_LAYERED, &img_request->flags); | ||
| 1544 | smp_mb(); | ||
| 1545 | } | ||
| 1546 | |||
| 1487 | static bool img_request_layered_test(struct rbd_img_request *img_request) | 1547 | static bool img_request_layered_test(struct rbd_img_request *img_request) |
| 1488 | { | 1548 | { |
| 1489 | smp_mb(); | 1549 | smp_mb(); |
| @@ -1827,6 +1887,74 @@ static void rbd_obj_request_destroy(struct kref *kref) | |||
| 1827 | kmem_cache_free(rbd_obj_request_cache, obj_request); | 1887 | kmem_cache_free(rbd_obj_request_cache, obj_request); |
| 1828 | } | 1888 | } |
| 1829 | 1889 | ||
| 1890 | /* It's OK to call this for a device with no parent */ | ||
| 1891 | |||
| 1892 | static void rbd_spec_put(struct rbd_spec *spec); | ||
| 1893 | static void rbd_dev_unparent(struct rbd_device *rbd_dev) | ||
| 1894 | { | ||
| 1895 | rbd_dev_remove_parent(rbd_dev); | ||
| 1896 | rbd_spec_put(rbd_dev->parent_spec); | ||
| 1897 | rbd_dev->parent_spec = NULL; | ||
| 1898 | rbd_dev->parent_overlap = 0; | ||
| 1899 | } | ||
| 1900 | |||
| 1901 | /* | ||
| 1902 | * Parent image reference counting is used to determine when an | ||
| 1903 | * image's parent fields can be safely torn down--after there are no | ||
| 1904 | * more in-flight requests to the parent image. When the last | ||
| 1905 | * reference is dropped, cleaning them up is safe. | ||
| 1906 | */ | ||
| 1907 | static void rbd_dev_parent_put(struct rbd_device *rbd_dev) | ||
| 1908 | { | ||
| 1909 | int counter; | ||
| 1910 | |||
| 1911 | if (!rbd_dev->parent_spec) | ||
| 1912 | return; | ||
| 1913 | |||
| 1914 | counter = atomic_dec_return_safe(&rbd_dev->parent_ref); | ||
| 1915 | if (counter > 0) | ||
| 1916 | return; | ||
| 1917 | |||
| 1918 | /* Last reference; clean up parent data structures */ | ||
| 1919 | |||
| 1920 | if (!counter) | ||
| 1921 | rbd_dev_unparent(rbd_dev); | ||
| 1922 | else | ||
| 1923 | rbd_warn(rbd_dev, "parent reference underflow\n"); | ||
| 1924 | } | ||
| 1925 | |||
| 1926 | /* | ||
| 1927 | * If an image has a non-zero parent overlap, get a reference to its | ||
| 1928 | * parent. | ||
| 1929 | * | ||
| 1930 | * We must get the reference before checking for the overlap to | ||
| 1931 | * coordinate properly with zeroing the parent overlap in | ||
| 1932 | * rbd_dev_v2_parent_info() when an image gets flattened. We | ||
| 1933 | * drop it again if there is no overlap. | ||
| 1934 | * | ||
| 1935 | * Returns true if the rbd device has a parent with a non-zero | ||
| 1936 | * overlap and a reference for it was successfully taken, or | ||
| 1937 | * false otherwise. | ||
| 1938 | */ | ||
| 1939 | static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) | ||
| 1940 | { | ||
| 1941 | int counter; | ||
| 1942 | |||
| 1943 | if (!rbd_dev->parent_spec) | ||
| 1944 | return false; | ||
| 1945 | |||
| 1946 | counter = atomic_inc_return_safe(&rbd_dev->parent_ref); | ||
| 1947 | if (counter > 0 && rbd_dev->parent_overlap) | ||
| 1948 | return true; | ||
| 1949 | |||
| 1950 | /* Image was flattened, but parent is not yet torn down */ | ||
| 1951 | |||
| 1952 | if (counter < 0) | ||
| 1953 | rbd_warn(rbd_dev, "parent reference overflow\n"); | ||
| 1954 | |||
| 1955 | return false; | ||
| 1956 | } | ||
| 1957 | |||
| 1830 | /* | 1958 | /* |
| 1831 | * Caller is responsible for filling in the list of object requests | 1959 | * Caller is responsible for filling in the list of object requests |
| 1832 | * that comprises the image request, and the Linux request pointer | 1960 | * that comprises the image request, and the Linux request pointer |
| @@ -1835,8 +1963,7 @@ static void rbd_obj_request_destroy(struct kref *kref) | |||
| 1835 | static struct rbd_img_request *rbd_img_request_create( | 1963 | static struct rbd_img_request *rbd_img_request_create( |
| 1836 | struct rbd_device *rbd_dev, | 1964 | struct rbd_device *rbd_dev, |
| 1837 | u64 offset, u64 length, | 1965 | u64 offset, u64 length, |
| 1838 | bool write_request, | 1966 | bool write_request) |
| 1839 | bool child_request) | ||
| 1840 | { | 1967 | { |
| 1841 | struct rbd_img_request *img_request; | 1968 | struct rbd_img_request *img_request; |
| 1842 | 1969 | ||
| @@ -1861,9 +1988,7 @@ static struct rbd_img_request *rbd_img_request_create( | |||
| 1861 | } else { | 1988 | } else { |
| 1862 | img_request->snap_id = rbd_dev->spec->snap_id; | 1989 | img_request->snap_id = rbd_dev->spec->snap_id; |
| 1863 | } | 1990 | } |
| 1864 | if (child_request) | 1991 | if (rbd_dev_parent_get(rbd_dev)) |
| 1865 | img_request_child_set(img_request); | ||
| 1866 | if (rbd_dev->parent_spec) | ||
| 1867 | img_request_layered_set(img_request); | 1992 | img_request_layered_set(img_request); |
| 1868 | spin_lock_init(&img_request->completion_lock); | 1993 | spin_lock_init(&img_request->completion_lock); |
| 1869 | img_request->next_completion = 0; | 1994 | img_request->next_completion = 0; |
| @@ -1873,9 +1998,6 @@ static struct rbd_img_request *rbd_img_request_create( | |||
| 1873 | INIT_LIST_HEAD(&img_request->obj_requests); | 1998 | INIT_LIST_HEAD(&img_request->obj_requests); |
| 1874 | kref_init(&img_request->kref); | 1999 | kref_init(&img_request->kref); |
| 1875 | 2000 | ||
| 1876 | rbd_img_request_get(img_request); /* Avoid a warning */ | ||
| 1877 | rbd_img_request_put(img_request); /* TEMPORARY */ | ||
| 1878 | |||
| 1879 | dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev, | 2001 | dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev, |
| 1880 | write_request ? "write" : "read", offset, length, | 2002 | write_request ? "write" : "read", offset, length, |
| 1881 | img_request); | 2003 | img_request); |
| @@ -1897,15 +2019,54 @@ static void rbd_img_request_destroy(struct kref *kref) | |||
| 1897 | rbd_img_obj_request_del(img_request, obj_request); | 2019 | rbd_img_obj_request_del(img_request, obj_request); |
| 1898 | rbd_assert(img_request->obj_request_count == 0); | 2020 | rbd_assert(img_request->obj_request_count == 0); |
| 1899 | 2021 | ||
| 2022 | if (img_request_layered_test(img_request)) { | ||
| 2023 | img_request_layered_clear(img_request); | ||
| 2024 | rbd_dev_parent_put(img_request->rbd_dev); | ||
| 2025 | } | ||
| 2026 | |||
| 1900 | if (img_request_write_test(img_request)) | 2027 | if (img_request_write_test(img_request)) |
| 1901 | ceph_put_snap_context(img_request->snapc); | 2028 | ceph_put_snap_context(img_request->snapc); |
| 1902 | 2029 | ||
| 1903 | if (img_request_child_test(img_request)) | ||
| 1904 | rbd_obj_request_put(img_request->obj_request); | ||
| 1905 | |||
| 1906 | kmem_cache_free(rbd_img_request_cache, img_request); | 2030 | kmem_cache_free(rbd_img_request_cache, img_request); |
| 1907 | } | 2031 | } |
| 1908 | 2032 | ||
| 2033 | static struct rbd_img_request *rbd_parent_request_create( | ||
| 2034 | struct rbd_obj_request *obj_request, | ||
| 2035 | u64 img_offset, u64 length) | ||
| 2036 | { | ||
| 2037 | struct rbd_img_request *parent_request; | ||
| 2038 | struct rbd_device *rbd_dev; | ||
| 2039 | |||
| 2040 | rbd_assert(obj_request->img_request); | ||
| 2041 | rbd_dev = obj_request->img_request->rbd_dev; | ||
| 2042 | |||
| 2043 | parent_request = rbd_img_request_create(rbd_dev->parent, | ||
| 2044 | img_offset, length, false); | ||
| 2045 | if (!parent_request) | ||
| 2046 | return NULL; | ||
| 2047 | |||
| 2048 | img_request_child_set(parent_request); | ||
| 2049 | rbd_obj_request_get(obj_request); | ||
| 2050 | parent_request->obj_request = obj_request; | ||
| 2051 | |||
| 2052 | return parent_request; | ||
| 2053 | } | ||
| 2054 | |||
| 2055 | static void rbd_parent_request_destroy(struct kref *kref) | ||
| 2056 | { | ||
| 2057 | struct rbd_img_request *parent_request; | ||
| 2058 | struct rbd_obj_request *orig_request; | ||
| 2059 | |||
| 2060 | parent_request = container_of(kref, struct rbd_img_request, kref); | ||
| 2061 | orig_request = parent_request->obj_request; | ||
| 2062 | |||
| 2063 | parent_request->obj_request = NULL; | ||
| 2064 | rbd_obj_request_put(orig_request); | ||
| 2065 | img_request_child_clear(parent_request); | ||
| 2066 | |||
| 2067 | rbd_img_request_destroy(kref); | ||
| 2068 | } | ||
| 2069 | |||
| 1909 | static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) | 2070 | static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) |
| 1910 | { | 2071 | { |
| 1911 | struct rbd_img_request *img_request; | 2072 | struct rbd_img_request *img_request; |
| @@ -2114,7 +2275,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) | |||
| 2114 | { | 2275 | { |
| 2115 | struct rbd_img_request *img_request; | 2276 | struct rbd_img_request *img_request; |
| 2116 | struct rbd_device *rbd_dev; | 2277 | struct rbd_device *rbd_dev; |
| 2117 | u64 length; | 2278 | struct page **pages; |
| 2118 | u32 page_count; | 2279 | u32 page_count; |
| 2119 | 2280 | ||
| 2120 | rbd_assert(obj_request->type == OBJ_REQUEST_BIO); | 2281 | rbd_assert(obj_request->type == OBJ_REQUEST_BIO); |
| @@ -2124,12 +2285,14 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) | |||
| 2124 | 2285 | ||
| 2125 | rbd_dev = img_request->rbd_dev; | 2286 | rbd_dev = img_request->rbd_dev; |
| 2126 | rbd_assert(rbd_dev); | 2287 | rbd_assert(rbd_dev); |
| 2127 | length = (u64)1 << rbd_dev->header.obj_order; | ||
| 2128 | page_count = (u32)calc_pages_for(0, length); | ||
| 2129 | 2288 | ||
| 2130 | rbd_assert(obj_request->copyup_pages); | 2289 | pages = obj_request->copyup_pages; |
| 2131 | ceph_release_page_vector(obj_request->copyup_pages, page_count); | 2290 | rbd_assert(pages != NULL); |
| 2132 | obj_request->copyup_pages = NULL; | 2291 | obj_request->copyup_pages = NULL; |
| 2292 | page_count = obj_request->copyup_page_count; | ||
| 2293 | rbd_assert(page_count); | ||
| 2294 | obj_request->copyup_page_count = 0; | ||
| 2295 | ceph_release_page_vector(pages, page_count); | ||
| 2133 | 2296 | ||
| 2134 | /* | 2297 | /* |
| 2135 | * We want the transfer count to reflect the size of the | 2298 | * We want the transfer count to reflect the size of the |
| @@ -2153,9 +2316,11 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) | |||
| 2153 | struct ceph_osd_client *osdc; | 2316 | struct ceph_osd_client *osdc; |
| 2154 | struct rbd_device *rbd_dev; | 2317 | struct rbd_device *rbd_dev; |
| 2155 | struct page **pages; | 2318 | struct page **pages; |
| 2156 | int result; | 2319 | u32 page_count; |
| 2157 | u64 obj_size; | 2320 | int img_result; |
| 2158 | u64 xferred; | 2321 | u64 parent_length; |
| 2322 | u64 offset; | ||
| 2323 | u64 length; | ||
| 2159 | 2324 | ||
| 2160 | rbd_assert(img_request_child_test(img_request)); | 2325 | rbd_assert(img_request_child_test(img_request)); |
| 2161 | 2326 | ||
| @@ -2164,46 +2329,74 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) | |||
| 2164 | pages = img_request->copyup_pages; | 2329 | pages = img_request->copyup_pages; |
| 2165 | rbd_assert(pages != NULL); | 2330 | rbd_assert(pages != NULL); |
| 2166 | img_request->copyup_pages = NULL; | 2331 | img_request->copyup_pages = NULL; |
| 2332 | page_count = img_request->copyup_page_count; | ||
| 2333 | rbd_assert(page_count); | ||
| 2334 | img_request->copyup_page_count = 0; | ||
| 2167 | 2335 | ||
| 2168 | orig_request = img_request->obj_request; | 2336 | orig_request = img_request->obj_request; |
| 2169 | rbd_assert(orig_request != NULL); | 2337 | rbd_assert(orig_request != NULL); |
| 2170 | rbd_assert(orig_request->type == OBJ_REQUEST_BIO); | 2338 | rbd_assert(obj_request_type_valid(orig_request->type)); |
| 2171 | result = img_request->result; | 2339 | img_result = img_request->result; |
| 2172 | obj_size = img_request->length; | 2340 | parent_length = img_request->length; |
| 2173 | xferred = img_request->xferred; | 2341 | rbd_assert(parent_length == img_request->xferred); |
| 2342 | rbd_img_request_put(img_request); | ||
| 2174 | 2343 | ||
| 2175 | rbd_dev = img_request->rbd_dev; | 2344 | rbd_assert(orig_request->img_request); |
| 2345 | rbd_dev = orig_request->img_request->rbd_dev; | ||
| 2176 | rbd_assert(rbd_dev); | 2346 | rbd_assert(rbd_dev); |
| 2177 | rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order); | ||
| 2178 | 2347 | ||
| 2179 | rbd_img_request_put(img_request); | 2348 | /* |
| 2349 | * If the overlap has become 0 (most likely because the | ||
| 2350 | * image has been flattened) we need to free the pages | ||
| 2351 | * and re-submit the original write request. | ||
| 2352 | */ | ||
| 2353 | if (!rbd_dev->parent_overlap) { | ||
| 2354 | struct ceph_osd_client *osdc; | ||
| 2180 | 2355 | ||
| 2181 | if (result) | 2356 | ceph_release_page_vector(pages, page_count); |
| 2182 | goto out_err; | 2357 | osdc = &rbd_dev->rbd_client->client->osdc; |
| 2358 | img_result = rbd_obj_request_submit(osdc, orig_request); | ||
| 2359 | if (!img_result) | ||
| 2360 | return; | ||
| 2361 | } | ||
| 2183 | 2362 | ||
| 2184 | /* Allocate the new copyup osd request for the original request */ | 2363 | if (img_result) |
| 2364 | goto out_err; | ||
| 2185 | 2365 | ||
| 2186 | result = -ENOMEM; | 2366 | /* |
| 2187 | rbd_assert(!orig_request->osd_req); | 2367 | * The original osd request is of no use to use any more. |
| 2368 | * We need a new one that can hold the two ops in a copyup | ||
| 2369 | * request. Allocate the new copyup osd request for the | ||
| 2370 | * original request, and release the old one. | ||
| 2371 | */ | ||
| 2372 | img_result = -ENOMEM; | ||
| 2188 | osd_req = rbd_osd_req_create_copyup(orig_request); | 2373 | osd_req = rbd_osd_req_create_copyup(orig_request); |
| 2189 | if (!osd_req) | 2374 | if (!osd_req) |
| 2190 | goto out_err; | 2375 | goto out_err; |
| 2376 | rbd_osd_req_destroy(orig_request->osd_req); | ||
| 2191 | orig_request->osd_req = osd_req; | 2377 | orig_request->osd_req = osd_req; |
| 2192 | orig_request->copyup_pages = pages; | 2378 | orig_request->copyup_pages = pages; |
| 2379 | orig_request->copyup_page_count = page_count; | ||
| 2193 | 2380 | ||
| 2194 | /* Initialize the copyup op */ | 2381 | /* Initialize the copyup op */ |
| 2195 | 2382 | ||
| 2196 | osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup"); | 2383 | osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup"); |
| 2197 | osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0, | 2384 | osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0, |
| 2198 | false, false); | 2385 | false, false); |
| 2199 | 2386 | ||
| 2200 | /* Then the original write request op */ | 2387 | /* Then the original write request op */ |
| 2201 | 2388 | ||
| 2389 | offset = orig_request->offset; | ||
| 2390 | length = orig_request->length; | ||
| 2202 | osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE, | 2391 | osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE, |
| 2203 | orig_request->offset, | 2392 | offset, length, 0, 0); |
| 2204 | orig_request->length, 0, 0); | 2393 | if (orig_request->type == OBJ_REQUEST_BIO) |
| 2205 | osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list, | 2394 | osd_req_op_extent_osd_data_bio(osd_req, 1, |
| 2206 | orig_request->length); | 2395 | orig_request->bio_list, length); |
| 2396 | else | ||
| 2397 | osd_req_op_extent_osd_data_pages(osd_req, 1, | ||
| 2398 | orig_request->pages, length, | ||
| 2399 | offset & ~PAGE_MASK, false, false); | ||
| 2207 | 2400 | ||
| 2208 | rbd_osd_req_format_write(orig_request); | 2401 | rbd_osd_req_format_write(orig_request); |
| 2209 | 2402 | ||
| @@ -2211,13 +2404,13 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) | |||
| 2211 | 2404 | ||
| 2212 | orig_request->callback = rbd_img_obj_copyup_callback; | 2405 | orig_request->callback = rbd_img_obj_copyup_callback; |
| 2213 | osdc = &rbd_dev->rbd_client->client->osdc; | 2406 | osdc = &rbd_dev->rbd_client->client->osdc; |
| 2214 | result = rbd_obj_request_submit(osdc, orig_request); | 2407 | img_result = rbd_obj_request_submit(osdc, orig_request); |
| 2215 | if (!result) | 2408 | if (!img_result) |
| 2216 | return; | 2409 | return; |
| 2217 | out_err: | 2410 | out_err: |
| 2218 | /* Record the error code and complete the request */ | 2411 | /* Record the error code and complete the request */ |
| 2219 | 2412 | ||
| 2220 | orig_request->result = result; | 2413 | orig_request->result = img_result; |
| 2221 | orig_request->xferred = 0; | 2414 | orig_request->xferred = 0; |
| 2222 | obj_request_done_set(orig_request); | 2415 | obj_request_done_set(orig_request); |
| 2223 | rbd_obj_request_complete(orig_request); | 2416 | rbd_obj_request_complete(orig_request); |
| @@ -2249,7 +2442,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) | |||
| 2249 | int result; | 2442 | int result; |
| 2250 | 2443 | ||
| 2251 | rbd_assert(obj_request_img_data_test(obj_request)); | 2444 | rbd_assert(obj_request_img_data_test(obj_request)); |
| 2252 | rbd_assert(obj_request->type == OBJ_REQUEST_BIO); | 2445 | rbd_assert(obj_request_type_valid(obj_request->type)); |
| 2253 | 2446 | ||
| 2254 | img_request = obj_request->img_request; | 2447 | img_request = obj_request->img_request; |
| 2255 | rbd_assert(img_request != NULL); | 2448 | rbd_assert(img_request != NULL); |
| @@ -2257,15 +2450,6 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) | |||
| 2257 | rbd_assert(rbd_dev->parent != NULL); | 2450 | rbd_assert(rbd_dev->parent != NULL); |
| 2258 | 2451 | ||
| 2259 | /* | 2452 | /* |
| 2260 | * First things first. The original osd request is of no | ||
| 2261 | * use to use any more, we'll need a new one that can hold | ||
| 2262 | * the two ops in a copyup request. We'll get that later, | ||
| 2263 | * but for now we can release the old one. | ||
| 2264 | */ | ||
| 2265 | rbd_osd_req_destroy(obj_request->osd_req); | ||
| 2266 | obj_request->osd_req = NULL; | ||
| 2267 | |||
| 2268 | /* | ||
| 2269 | * Determine the byte range covered by the object in the | 2453 | * Determine the byte range covered by the object in the |
| 2270 | * child image to which the original request was to be sent. | 2454 | * child image to which the original request was to be sent. |
| 2271 | */ | 2455 | */ |
| @@ -2295,18 +2479,16 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) | |||
| 2295 | } | 2479 | } |
| 2296 | 2480 | ||
| 2297 | result = -ENOMEM; | 2481 | result = -ENOMEM; |
| 2298 | parent_request = rbd_img_request_create(rbd_dev->parent, | 2482 | parent_request = rbd_parent_request_create(obj_request, |
| 2299 | img_offset, length, | 2483 | img_offset, length); |
| 2300 | false, true); | ||
| 2301 | if (!parent_request) | 2484 | if (!parent_request) |
| 2302 | goto out_err; | 2485 | goto out_err; |
| 2303 | rbd_obj_request_get(obj_request); | ||
| 2304 | parent_request->obj_request = obj_request; | ||
| 2305 | 2486 | ||
| 2306 | result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages); | 2487 | result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages); |
| 2307 | if (result) | 2488 | if (result) |
| 2308 | goto out_err; | 2489 | goto out_err; |
| 2309 | parent_request->copyup_pages = pages; | 2490 | parent_request->copyup_pages = pages; |
| 2491 | parent_request->copyup_page_count = page_count; | ||
| 2310 | 2492 | ||
| 2311 | parent_request->callback = rbd_img_obj_parent_read_full_callback; | 2493 | parent_request->callback = rbd_img_obj_parent_read_full_callback; |
| 2312 | result = rbd_img_request_submit(parent_request); | 2494 | result = rbd_img_request_submit(parent_request); |
| @@ -2314,6 +2496,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) | |||
| 2314 | return 0; | 2496 | return 0; |
| 2315 | 2497 | ||
| 2316 | parent_request->copyup_pages = NULL; | 2498 | parent_request->copyup_pages = NULL; |
| 2499 | parent_request->copyup_page_count = 0; | ||
| 2317 | parent_request->obj_request = NULL; | 2500 | parent_request->obj_request = NULL; |
| 2318 | rbd_obj_request_put(obj_request); | 2501 | rbd_obj_request_put(obj_request); |
| 2319 | out_err: | 2502 | out_err: |
| @@ -2331,6 +2514,7 @@ out_err: | |||
| 2331 | static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) | 2514 | static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) |
| 2332 | { | 2515 | { |
| 2333 | struct rbd_obj_request *orig_request; | 2516 | struct rbd_obj_request *orig_request; |
| 2517 | struct rbd_device *rbd_dev; | ||
| 2334 | int result; | 2518 | int result; |
| 2335 | 2519 | ||
| 2336 | rbd_assert(!obj_request_img_data_test(obj_request)); | 2520 | rbd_assert(!obj_request_img_data_test(obj_request)); |
| @@ -2353,8 +2537,21 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) | |||
| 2353 | obj_request->xferred, obj_request->length); | 2537 | obj_request->xferred, obj_request->length); |
| 2354 | rbd_obj_request_put(obj_request); | 2538 | rbd_obj_request_put(obj_request); |
| 2355 | 2539 | ||
| 2356 | rbd_assert(orig_request); | 2540 | /* |
| 2357 | rbd_assert(orig_request->img_request); | 2541 | * If the overlap has become 0 (most likely because the |
| 2542 | * image has been flattened) we need to free the pages | ||
| 2543 | * and re-submit the original write request. | ||
| 2544 | */ | ||
| 2545 | rbd_dev = orig_request->img_request->rbd_dev; | ||
| 2546 | if (!rbd_dev->parent_overlap) { | ||
| 2547 | struct ceph_osd_client *osdc; | ||
| 2548 | |||
| 2549 | rbd_obj_request_put(orig_request); | ||
| 2550 | osdc = &rbd_dev->rbd_client->client->osdc; | ||
| 2551 | result = rbd_obj_request_submit(osdc, orig_request); | ||
| 2552 | if (!result) | ||
| 2553 | return; | ||
| 2554 | } | ||
| 2358 | 2555 | ||
| 2359 | /* | 2556 | /* |
| 2360 | * Our only purpose here is to determine whether the object | 2557 | * Our only purpose here is to determine whether the object |
| @@ -2512,14 +2709,36 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) | |||
| 2512 | struct rbd_obj_request *obj_request; | 2709 | struct rbd_obj_request *obj_request; |
| 2513 | struct rbd_device *rbd_dev; | 2710 | struct rbd_device *rbd_dev; |
| 2514 | u64 obj_end; | 2711 | u64 obj_end; |
| 2712 | u64 img_xferred; | ||
| 2713 | int img_result; | ||
| 2515 | 2714 | ||
| 2516 | rbd_assert(img_request_child_test(img_request)); | 2715 | rbd_assert(img_request_child_test(img_request)); |
| 2517 | 2716 | ||
| 2717 | /* First get what we need from the image request and release it */ | ||
| 2718 | |||
| 2518 | obj_request = img_request->obj_request; | 2719 | obj_request = img_request->obj_request; |
| 2720 | img_xferred = img_request->xferred; | ||
| 2721 | img_result = img_request->result; | ||
| 2722 | rbd_img_request_put(img_request); | ||
| 2723 | |||
| 2724 | /* | ||
| 2725 | * If the overlap has become 0 (most likely because the | ||
| 2726 | * image has been flattened) we need to re-submit the | ||
| 2727 | * original request. | ||
| 2728 | */ | ||
| 2519 | rbd_assert(obj_request); | 2729 | rbd_assert(obj_request); |
| 2520 | rbd_assert(obj_request->img_request); | 2730 | rbd_assert(obj_request->img_request); |
| 2731 | rbd_dev = obj_request->img_request->rbd_dev; | ||
| 2732 | if (!rbd_dev->parent_overlap) { | ||
| 2733 | struct ceph_osd_client *osdc; | ||
| 2734 | |||
| 2735 | osdc = &rbd_dev->rbd_client->client->osdc; | ||
| 2736 | img_result = rbd_obj_request_submit(osdc, obj_request); | ||
| 2737 | if (!img_result) | ||
| 2738 | return; | ||
| 2739 | } | ||
| 2521 | 2740 | ||
| 2522 | obj_request->result = img_request->result; | 2741 | obj_request->result = img_result; |
| 2523 | if (obj_request->result) | 2742 | if (obj_request->result) |
| 2524 | goto out; | 2743 | goto out; |
| 2525 | 2744 | ||
| @@ -2532,7 +2751,6 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) | |||
| 2532 | */ | 2751 | */ |
| 2533 | rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length); | 2752 | rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length); |
| 2534 | obj_end = obj_request->img_offset + obj_request->length; | 2753 | obj_end = obj_request->img_offset + obj_request->length; |
| 2535 | rbd_dev = obj_request->img_request->rbd_dev; | ||
| 2536 | if (obj_end > rbd_dev->parent_overlap) { | 2754 | if (obj_end > rbd_dev->parent_overlap) { |
| 2537 | u64 xferred = 0; | 2755 | u64 xferred = 0; |
| 2538 | 2756 | ||
| @@ -2540,43 +2758,39 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) | |||
| 2540 | xferred = rbd_dev->parent_overlap - | 2758 | xferred = rbd_dev->parent_overlap - |
| 2541 | obj_request->img_offset; | 2759 | obj_request->img_offset; |
| 2542 | 2760 | ||
| 2543 | obj_request->xferred = min(img_request->xferred, xferred); | 2761 | obj_request->xferred = min(img_xferred, xferred); |
| 2544 | } else { | 2762 | } else { |
| 2545 | obj_request->xferred = img_request->xferred; | 2763 | obj_request->xferred = img_xferred; |
| 2546 | } | 2764 | } |
| 2547 | out: | 2765 | out: |
| 2548 | rbd_img_request_put(img_request); | ||
| 2549 | rbd_img_obj_request_read_callback(obj_request); | 2766 | rbd_img_obj_request_read_callback(obj_request); |
| 2550 | rbd_obj_request_complete(obj_request); | 2767 | rbd_obj_request_complete(obj_request); |
| 2551 | } | 2768 | } |
| 2552 | 2769 | ||
| 2553 | static void rbd_img_parent_read(struct rbd_obj_request *obj_request) | 2770 | static void rbd_img_parent_read(struct rbd_obj_request *obj_request) |
| 2554 | { | 2771 | { |
| 2555 | struct rbd_device *rbd_dev; | ||
| 2556 | struct rbd_img_request *img_request; | 2772 | struct rbd_img_request *img_request; |
| 2557 | int result; | 2773 | int result; |
| 2558 | 2774 | ||
| 2559 | rbd_assert(obj_request_img_data_test(obj_request)); | 2775 | rbd_assert(obj_request_img_data_test(obj_request)); |
| 2560 | rbd_assert(obj_request->img_request != NULL); | 2776 | rbd_assert(obj_request->img_request != NULL); |
| 2561 | rbd_assert(obj_request->result == (s32) -ENOENT); | 2777 | rbd_assert(obj_request->result == (s32) -ENOENT); |
| 2562 | rbd_assert(obj_request->type == OBJ_REQUEST_BIO); | 2778 | rbd_assert(obj_request_type_valid(obj_request->type)); |
| 2563 | 2779 | ||
| 2564 | rbd_dev = obj_request->img_request->rbd_dev; | ||
| 2565 | rbd_assert(rbd_dev->parent != NULL); | ||
| 2566 | /* rbd_read_finish(obj_request, obj_request->length); */ | 2780 | /* rbd_read_finish(obj_request, obj_request->length); */ |
| 2567 | img_request = rbd_img_request_create(rbd_dev->parent, | 2781 | img_request = rbd_parent_request_create(obj_request, |
| 2568 | obj_request->img_offset, | 2782 | obj_request->img_offset, |
| 2569 | obj_request->length, | 2783 | obj_request->length); |
| 2570 | false, true); | ||
| 2571 | result = -ENOMEM; | 2784 | result = -ENOMEM; |
| 2572 | if (!img_request) | 2785 | if (!img_request) |
| 2573 | goto out_err; | 2786 | goto out_err; |
| 2574 | 2787 | ||
| 2575 | rbd_obj_request_get(obj_request); | 2788 | if (obj_request->type == OBJ_REQUEST_BIO) |
| 2576 | img_request->obj_request = obj_request; | 2789 | result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, |
| 2577 | 2790 | obj_request->bio_list); | |
| 2578 | result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, | 2791 | else |
| 2579 | obj_request->bio_list); | 2792 | result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES, |
| 2793 | obj_request->pages); | ||
| 2580 | if (result) | 2794 | if (result) |
| 2581 | goto out_err; | 2795 | goto out_err; |
| 2582 | 2796 | ||
| @@ -2626,6 +2840,7 @@ out: | |||
| 2626 | static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) | 2840 | static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) |
| 2627 | { | 2841 | { |
| 2628 | struct rbd_device *rbd_dev = (struct rbd_device *)data; | 2842 | struct rbd_device *rbd_dev = (struct rbd_device *)data; |
| 2843 | int ret; | ||
| 2629 | 2844 | ||
| 2630 | if (!rbd_dev) | 2845 | if (!rbd_dev) |
| 2631 | return; | 2846 | return; |
| @@ -2633,7 +2848,9 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) | |||
| 2633 | dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, | 2848 | dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, |
| 2634 | rbd_dev->header_name, (unsigned long long)notify_id, | 2849 | rbd_dev->header_name, (unsigned long long)notify_id, |
| 2635 | (unsigned int)opcode); | 2850 | (unsigned int)opcode); |
| 2636 | (void)rbd_dev_refresh(rbd_dev); | 2851 | ret = rbd_dev_refresh(rbd_dev); |
| 2852 | if (ret) | ||
| 2853 | rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret); | ||
| 2637 | 2854 | ||
| 2638 | rbd_obj_notify_ack(rbd_dev, notify_id); | 2855 | rbd_obj_notify_ack(rbd_dev, notify_id); |
| 2639 | } | 2856 | } |
| @@ -2642,7 +2859,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) | |||
| 2642 | * Request sync osd watch/unwatch. The value of "start" determines | 2859 | * Request sync osd watch/unwatch. The value of "start" determines |
| 2643 | * whether a watch request is being initiated or torn down. | 2860 | * whether a watch request is being initiated or torn down. |
| 2644 | */ | 2861 | */ |
| 2645 | static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) | 2862 | static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start) |
| 2646 | { | 2863 | { |
| 2647 | struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; | 2864 | struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; |
| 2648 | struct rbd_obj_request *obj_request; | 2865 | struct rbd_obj_request *obj_request; |
| @@ -2676,7 +2893,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) | |||
| 2676 | rbd_dev->watch_request->osd_req); | 2893 | rbd_dev->watch_request->osd_req); |
| 2677 | 2894 | ||
| 2678 | osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, | 2895 | osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, |
| 2679 | rbd_dev->watch_event->cookie, 0, start); | 2896 | rbd_dev->watch_event->cookie, 0, start ? 1 : 0); |
| 2680 | rbd_osd_req_format_write(obj_request); | 2897 | rbd_osd_req_format_write(obj_request); |
| 2681 | 2898 | ||
| 2682 | ret = rbd_obj_request_submit(osdc, obj_request); | 2899 | ret = rbd_obj_request_submit(osdc, obj_request); |
| @@ -2869,9 +3086,16 @@ static void rbd_request_fn(struct request_queue *q) | |||
| 2869 | goto end_request; /* Shouldn't happen */ | 3086 | goto end_request; /* Shouldn't happen */ |
| 2870 | } | 3087 | } |
| 2871 | 3088 | ||
| 3089 | result = -EIO; | ||
| 3090 | if (offset + length > rbd_dev->mapping.size) { | ||
| 3091 | rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n", | ||
| 3092 | offset, length, rbd_dev->mapping.size); | ||
| 3093 | goto end_request; | ||
| 3094 | } | ||
| 3095 | |||
| 2872 | result = -ENOMEM; | 3096 | result = -ENOMEM; |
| 2873 | img_request = rbd_img_request_create(rbd_dev, offset, length, | 3097 | img_request = rbd_img_request_create(rbd_dev, offset, length, |
| 2874 | write_request, false); | 3098 | write_request); |
| 2875 | if (!img_request) | 3099 | if (!img_request) |
| 2876 | goto end_request; | 3100 | goto end_request; |
| 2877 | 3101 | ||
| @@ -3022,17 +3246,11 @@ out: | |||
| 3022 | } | 3246 | } |
| 3023 | 3247 | ||
| 3024 | /* | 3248 | /* |
| 3025 | * Read the complete header for the given rbd device. | 3249 | * Read the complete header for the given rbd device. On successful |
| 3026 | * | 3250 | * return, the rbd_dev->header field will contain up-to-date |
| 3027 | * Returns a pointer to a dynamically-allocated buffer containing | 3251 | * information about the image. |
| 3028 | * the complete and validated header. Caller can pass the address | ||
| 3029 | * of a variable that will be filled in with the version of the | ||
| 3030 | * header object at the time it was read. | ||
| 3031 | * | ||
| 3032 | * Returns a pointer-coded errno if a failure occurs. | ||
| 3033 | */ | 3252 | */ |
| 3034 | static struct rbd_image_header_ondisk * | 3253 | static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev) |
| 3035 | rbd_dev_v1_header_read(struct rbd_device *rbd_dev) | ||
| 3036 | { | 3254 | { |
| 3037 | struct rbd_image_header_ondisk *ondisk = NULL; | 3255 | struct rbd_image_header_ondisk *ondisk = NULL; |
| 3038 | u32 snap_count = 0; | 3256 | u32 snap_count = 0; |
| @@ -3057,22 +3275,22 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev) | |||
| 3057 | size += names_size; | 3275 | size += names_size; |
| 3058 | ondisk = kmalloc(size, GFP_KERNEL); | 3276 | ondisk = kmalloc(size, GFP_KERNEL); |
| 3059 | if (!ondisk) | 3277 | if (!ondisk) |
| 3060 | return ERR_PTR(-ENOMEM); | 3278 | return -ENOMEM; |
| 3061 | 3279 | ||
| 3062 | ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name, | 3280 | ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name, |
| 3063 | 0, size, ondisk); | 3281 | 0, size, ondisk); |
| 3064 | if (ret < 0) | 3282 | if (ret < 0) |
| 3065 | goto out_err; | 3283 | goto out; |
| 3066 | if ((size_t)ret < size) { | 3284 | if ((size_t)ret < size) { |
| 3067 | ret = -ENXIO; | 3285 | ret = -ENXIO; |
| 3068 | rbd_warn(rbd_dev, "short header read (want %zd got %d)", | 3286 | rbd_warn(rbd_dev, "short header read (want %zd got %d)", |
| 3069 | size, ret); | 3287 | size, ret); |
| 3070 | goto out_err; | 3288 | goto out; |
| 3071 | } | 3289 | } |
| 3072 | if (!rbd_dev_ondisk_valid(ondisk)) { | 3290 | if (!rbd_dev_ondisk_valid(ondisk)) { |
| 3073 | ret = -ENXIO; | 3291 | ret = -ENXIO; |
| 3074 | rbd_warn(rbd_dev, "invalid header"); | 3292 | rbd_warn(rbd_dev, "invalid header"); |
| 3075 | goto out_err; | 3293 | goto out; |
| 3076 | } | 3294 | } |
| 3077 | 3295 | ||
| 3078 | names_size = le64_to_cpu(ondisk->snap_names_len); | 3296 | names_size = le64_to_cpu(ondisk->snap_names_len); |
| @@ -3080,85 +3298,13 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev) | |||
| 3080 | snap_count = le32_to_cpu(ondisk->snap_count); | 3298 | snap_count = le32_to_cpu(ondisk->snap_count); |
| 3081 | } while (snap_count != want_count); | 3299 | } while (snap_count != want_count); |
| 3082 | 3300 | ||
| 3083 | return ondisk; | 3301 | ret = rbd_header_from_disk(rbd_dev, ondisk); |
| 3084 | 3302 | out: | |
| 3085 | out_err: | ||
| 3086 | kfree(ondisk); | ||
| 3087 | |||
| 3088 | return ERR_PTR(ret); | ||
| 3089 | } | ||
| 3090 | |||
| 3091 | /* | ||
| 3092 | * reload the ondisk the header | ||
| 3093 | */ | ||
| 3094 | static int rbd_read_header(struct rbd_device *rbd_dev, | ||
| 3095 | struct rbd_image_header *header) | ||
| 3096 | { | ||
| 3097 | struct rbd_image_header_ondisk *ondisk; | ||
| 3098 | int ret; | ||
| 3099 | |||
| 3100 | ondisk = rbd_dev_v1_header_read(rbd_dev); | ||
| 3101 | if (IS_ERR(ondisk)) | ||
| 3102 | return PTR_ERR(ondisk); | ||
| 3103 | ret = rbd_header_from_disk(header, ondisk); | ||
| 3104 | kfree(ondisk); | 3303 | kfree(ondisk); |
| 3105 | 3304 | ||
| 3106 | return ret; | 3305 | return ret; |
| 3107 | } | 3306 | } |
| 3108 | 3307 | ||
| 3109 | static void rbd_update_mapping_size(struct rbd_device *rbd_dev) | ||
| 3110 | { | ||
| 3111 | if (rbd_dev->spec->snap_id != CEPH_NOSNAP) | ||
| 3112 | return; | ||
| 3113 | |||
| 3114 | if (rbd_dev->mapping.size != rbd_dev->header.image_size) { | ||
| 3115 | sector_t size; | ||
| 3116 | |||
| 3117 | rbd_dev->mapping.size = rbd_dev->header.image_size; | ||
| 3118 | size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; | ||
| 3119 | dout("setting size to %llu sectors", (unsigned long long)size); | ||
| 3120 | set_capacity(rbd_dev->disk, size); | ||
| 3121 | } | ||
| 3122 | } | ||
| 3123 | |||
| 3124 | /* | ||
| 3125 | * only read the first part of the ondisk header, without the snaps info | ||
| 3126 | */ | ||
| 3127 | static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev) | ||
| 3128 | { | ||
| 3129 | int ret; | ||
| 3130 | struct rbd_image_header h; | ||
| 3131 | |||
| 3132 | ret = rbd_read_header(rbd_dev, &h); | ||
| 3133 | if (ret < 0) | ||
| 3134 | return ret; | ||
| 3135 | |||
| 3136 | down_write(&rbd_dev->header_rwsem); | ||
| 3137 | |||
| 3138 | /* Update image size, and check for resize of mapped image */ | ||
| 3139 | rbd_dev->header.image_size = h.image_size; | ||
| 3140 | rbd_update_mapping_size(rbd_dev); | ||
| 3141 | |||
| 3142 | /* rbd_dev->header.object_prefix shouldn't change */ | ||
| 3143 | kfree(rbd_dev->header.snap_sizes); | ||
| 3144 | kfree(rbd_dev->header.snap_names); | ||
| 3145 | /* osd requests may still refer to snapc */ | ||
| 3146 | ceph_put_snap_context(rbd_dev->header.snapc); | ||
| 3147 | |||
| 3148 | rbd_dev->header.image_size = h.image_size; | ||
| 3149 | rbd_dev->header.snapc = h.snapc; | ||
| 3150 | rbd_dev->header.snap_names = h.snap_names; | ||
| 3151 | rbd_dev->header.snap_sizes = h.snap_sizes; | ||
| 3152 | /* Free the extra copy of the object prefix */ | ||
| 3153 | if (strcmp(rbd_dev->header.object_prefix, h.object_prefix)) | ||
| 3154 | rbd_warn(rbd_dev, "object prefix changed (ignoring)"); | ||
| 3155 | kfree(h.object_prefix); | ||
| 3156 | |||
| 3157 | up_write(&rbd_dev->header_rwsem); | ||
| 3158 | |||
| 3159 | return ret; | ||
| 3160 | } | ||
| 3161 | |||
| 3162 | /* | 3308 | /* |
| 3163 | * Clear the rbd device's EXISTS flag if the snapshot it's mapped to | 3309 | * Clear the rbd device's EXISTS flag if the snapshot it's mapped to |
| 3164 | * has disappeared from the (just updated) snapshot context. | 3310 | * has disappeared from the (just updated) snapshot context. |
| @@ -3180,26 +3326,29 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev) | |||
| 3180 | 3326 | ||
| 3181 | static int rbd_dev_refresh(struct rbd_device *rbd_dev) | 3327 | static int rbd_dev_refresh(struct rbd_device *rbd_dev) |
| 3182 | { | 3328 | { |
| 3183 | u64 image_size; | 3329 | u64 mapping_size; |
| 3184 | int ret; | 3330 | int ret; |
| 3185 | 3331 | ||
| 3186 | rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); | 3332 | rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); |
| 3187 | image_size = rbd_dev->header.image_size; | 3333 | mapping_size = rbd_dev->mapping.size; |
| 3188 | mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); | 3334 | mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); |
| 3189 | if (rbd_dev->image_format == 1) | 3335 | if (rbd_dev->image_format == 1) |
| 3190 | ret = rbd_dev_v1_refresh(rbd_dev); | 3336 | ret = rbd_dev_v1_header_info(rbd_dev); |
| 3191 | else | 3337 | else |
| 3192 | ret = rbd_dev_v2_refresh(rbd_dev); | 3338 | ret = rbd_dev_v2_header_info(rbd_dev); |
| 3193 | 3339 | ||
| 3194 | /* If it's a mapped snapshot, validate its EXISTS flag */ | 3340 | /* If it's a mapped snapshot, validate its EXISTS flag */ |
| 3195 | 3341 | ||
| 3196 | rbd_exists_validate(rbd_dev); | 3342 | rbd_exists_validate(rbd_dev); |
| 3197 | mutex_unlock(&ctl_mutex); | 3343 | mutex_unlock(&ctl_mutex); |
| 3198 | if (ret) | 3344 | if (mapping_size != rbd_dev->mapping.size) { |
| 3199 | rbd_warn(rbd_dev, "got notification but failed to " | 3345 | sector_t size; |
| 3200 | " update snaps: %d\n", ret); | 3346 | |
| 3201 | if (image_size != rbd_dev->header.image_size) | 3347 | size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; |
| 3348 | dout("setting size to %llu sectors", (unsigned long long)size); | ||
| 3349 | set_capacity(rbd_dev->disk, size); | ||
| 3202 | revalidate_disk(rbd_dev->disk); | 3350 | revalidate_disk(rbd_dev->disk); |
| 3351 | } | ||
| 3203 | 3352 | ||
| 3204 | return ret; | 3353 | return ret; |
| 3205 | } | 3354 | } |
| @@ -3403,6 +3552,8 @@ static ssize_t rbd_image_refresh(struct device *dev, | |||
| 3403 | int ret; | 3552 | int ret; |
| 3404 | 3553 | ||
| 3405 | ret = rbd_dev_refresh(rbd_dev); | 3554 | ret = rbd_dev_refresh(rbd_dev); |
| 3555 | if (ret) | ||
| 3556 | rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret); | ||
| 3406 | 3557 | ||
| 3407 | return ret < 0 ? ret : size; | 3558 | return ret < 0 ? ret : size; |
| 3408 | } | 3559 | } |
| @@ -3501,6 +3652,7 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, | |||
| 3501 | 3652 | ||
| 3502 | spin_lock_init(&rbd_dev->lock); | 3653 | spin_lock_init(&rbd_dev->lock); |
| 3503 | rbd_dev->flags = 0; | 3654 | rbd_dev->flags = 0; |
| 3655 | atomic_set(&rbd_dev->parent_ref, 0); | ||
| 3504 | INIT_LIST_HEAD(&rbd_dev->node); | 3656 | INIT_LIST_HEAD(&rbd_dev->node); |
| 3505 | init_rwsem(&rbd_dev->header_rwsem); | 3657 | init_rwsem(&rbd_dev->header_rwsem); |
| 3506 | 3658 | ||
| @@ -3650,6 +3802,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
| 3650 | __le64 snapid; | 3802 | __le64 snapid; |
| 3651 | void *p; | 3803 | void *p; |
| 3652 | void *end; | 3804 | void *end; |
| 3805 | u64 pool_id; | ||
| 3653 | char *image_id; | 3806 | char *image_id; |
| 3654 | u64 overlap; | 3807 | u64 overlap; |
| 3655 | int ret; | 3808 | int ret; |
| @@ -3680,18 +3833,37 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
| 3680 | p = reply_buf; | 3833 | p = reply_buf; |
| 3681 | end = reply_buf + ret; | 3834 | end = reply_buf + ret; |
| 3682 | ret = -ERANGE; | 3835 | ret = -ERANGE; |
| 3683 | ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err); | 3836 | ceph_decode_64_safe(&p, end, pool_id, out_err); |
| 3684 | if (parent_spec->pool_id == CEPH_NOPOOL) | 3837 | if (pool_id == CEPH_NOPOOL) { |
| 3838 | /* | ||
| 3839 | * Either the parent never existed, or we have | ||
| 3840 | * record of it but the image got flattened so it no | ||
| 3841 | * longer has a parent. When the parent of a | ||
| 3842 | * layered image disappears we immediately set the | ||
| 3843 | * overlap to 0. The effect of this is that all new | ||
| 3844 | * requests will be treated as if the image had no | ||
| 3845 | * parent. | ||
| 3846 | */ | ||
| 3847 | if (rbd_dev->parent_overlap) { | ||
| 3848 | rbd_dev->parent_overlap = 0; | ||
| 3849 | smp_mb(); | ||
| 3850 | rbd_dev_parent_put(rbd_dev); | ||
| 3851 | pr_info("%s: clone image has been flattened\n", | ||
| 3852 | rbd_dev->disk->disk_name); | ||
| 3853 | } | ||
| 3854 | |||
| 3685 | goto out; /* No parent? No problem. */ | 3855 | goto out; /* No parent? No problem. */ |
| 3856 | } | ||
| 3686 | 3857 | ||
| 3687 | /* The ceph file layout needs to fit pool id in 32 bits */ | 3858 | /* The ceph file layout needs to fit pool id in 32 bits */ |
| 3688 | 3859 | ||
| 3689 | ret = -EIO; | 3860 | ret = -EIO; |
| 3690 | if (parent_spec->pool_id > (u64)U32_MAX) { | 3861 | if (pool_id > (u64)U32_MAX) { |
| 3691 | rbd_warn(NULL, "parent pool id too large (%llu > %u)\n", | 3862 | rbd_warn(NULL, "parent pool id too large (%llu > %u)\n", |
| 3692 | (unsigned long long)parent_spec->pool_id, U32_MAX); | 3863 | (unsigned long long)pool_id, U32_MAX); |
| 3693 | goto out_err; | 3864 | goto out_err; |
| 3694 | } | 3865 | } |
| 3866 | parent_spec->pool_id = pool_id; | ||
| 3695 | 3867 | ||
| 3696 | image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); | 3868 | image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); |
| 3697 | if (IS_ERR(image_id)) { | 3869 | if (IS_ERR(image_id)) { |
| @@ -3702,9 +3874,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
| 3702 | ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err); | 3874 | ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err); |
| 3703 | ceph_decode_64_safe(&p, end, overlap, out_err); | 3875 | ceph_decode_64_safe(&p, end, overlap, out_err); |
| 3704 | 3876 | ||
| 3705 | rbd_dev->parent_overlap = overlap; | 3877 | if (overlap) { |
| 3706 | rbd_dev->parent_spec = parent_spec; | 3878 | rbd_spec_put(rbd_dev->parent_spec); |
| 3707 | parent_spec = NULL; /* rbd_dev now owns this */ | 3879 | rbd_dev->parent_spec = parent_spec; |
| 3880 | parent_spec = NULL; /* rbd_dev now owns this */ | ||
| 3881 | rbd_dev->parent_overlap = overlap; | ||
| 3882 | } else { | ||
| 3883 | rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n"); | ||
| 3884 | } | ||
| 3708 | out: | 3885 | out: |
| 3709 | ret = 0; | 3886 | ret = 0; |
| 3710 | out_err: | 3887 | out_err: |
| @@ -4002,6 +4179,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) | |||
| 4002 | for (i = 0; i < snap_count; i++) | 4179 | for (i = 0; i < snap_count; i++) |
| 4003 | snapc->snaps[i] = ceph_decode_64(&p); | 4180 | snapc->snaps[i] = ceph_decode_64(&p); |
| 4004 | 4181 | ||
| 4182 | ceph_put_snap_context(rbd_dev->header.snapc); | ||
| 4005 | rbd_dev->header.snapc = snapc; | 4183 | rbd_dev->header.snapc = snapc; |
| 4006 | 4184 | ||
| 4007 | dout(" snap context seq = %llu, snap_count = %u\n", | 4185 | dout(" snap context seq = %llu, snap_count = %u\n", |
| @@ -4053,21 +4231,56 @@ out: | |||
| 4053 | return snap_name; | 4231 | return snap_name; |
| 4054 | } | 4232 | } |
| 4055 | 4233 | ||
| 4056 | static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev) | 4234 | static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) |
| 4057 | { | 4235 | { |
| 4236 | bool first_time = rbd_dev->header.object_prefix == NULL; | ||
| 4058 | int ret; | 4237 | int ret; |
| 4059 | 4238 | ||
| 4060 | down_write(&rbd_dev->header_rwsem); | 4239 | down_write(&rbd_dev->header_rwsem); |
| 4061 | 4240 | ||
| 4241 | if (first_time) { | ||
| 4242 | ret = rbd_dev_v2_header_onetime(rbd_dev); | ||
| 4243 | if (ret) | ||
| 4244 | goto out; | ||
| 4245 | } | ||
| 4246 | |||
| 4247 | /* | ||
| 4248 | * If the image supports layering, get the parent info. We | ||
| 4249 | * need to probe the first time regardless. Thereafter we | ||
| 4250 | * only need to if there's a parent, to see if it has | ||
| 4251 | * disappeared due to the mapped image getting flattened. | ||
| 4252 | */ | ||
| 4253 | if (rbd_dev->header.features & RBD_FEATURE_LAYERING && | ||
| 4254 | (first_time || rbd_dev->parent_spec)) { | ||
| 4255 | bool warn; | ||
| 4256 | |||
| 4257 | ret = rbd_dev_v2_parent_info(rbd_dev); | ||
| 4258 | if (ret) | ||
| 4259 | goto out; | ||
| 4260 | |||
| 4261 | /* | ||
| 4262 | * Print a warning if this is the initial probe and | ||
| 4263 | * the image has a parent. Don't print it if the | ||
| 4264 | * image now being probed is itself a parent. We | ||
| 4265 | * can tell at this point because we won't know its | ||
| 4266 | * pool name yet (just its pool id). | ||
| 4267 | */ | ||
| 4268 | warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name; | ||
| 4269 | if (first_time && warn) | ||
| 4270 | rbd_warn(rbd_dev, "WARNING: kernel layering " | ||
| 4271 | "is EXPERIMENTAL!"); | ||
| 4272 | } | ||
| 4273 | |||
| 4062 | ret = rbd_dev_v2_image_size(rbd_dev); | 4274 | ret = rbd_dev_v2_image_size(rbd_dev); |
| 4063 | if (ret) | 4275 | if (ret) |
| 4064 | goto out; | 4276 | goto out; |
| 4065 | rbd_update_mapping_size(rbd_dev); | 4277 | |
| 4278 | if (rbd_dev->spec->snap_id == CEPH_NOSNAP) | ||
| 4279 | if (rbd_dev->mapping.size != rbd_dev->header.image_size) | ||
| 4280 | rbd_dev->mapping.size = rbd_dev->header.image_size; | ||
| 4066 | 4281 | ||
| 4067 | ret = rbd_dev_v2_snap_context(rbd_dev); | 4282 | ret = rbd_dev_v2_snap_context(rbd_dev); |
| 4068 | dout("rbd_dev_v2_snap_context returned %d\n", ret); | 4283 | dout("rbd_dev_v2_snap_context returned %d\n", ret); |
| 4069 | if (ret) | ||
| 4070 | goto out; | ||
| 4071 | out: | 4284 | out: |
| 4072 | up_write(&rbd_dev->header_rwsem); | 4285 | up_write(&rbd_dev->header_rwsem); |
| 4073 | 4286 | ||
| @@ -4490,10 +4703,10 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev) | |||
| 4490 | { | 4703 | { |
| 4491 | struct rbd_image_header *header; | 4704 | struct rbd_image_header *header; |
| 4492 | 4705 | ||
| 4493 | rbd_dev_remove_parent(rbd_dev); | 4706 | /* Drop parent reference unless it's already been done (or none) */ |
| 4494 | rbd_spec_put(rbd_dev->parent_spec); | 4707 | |
| 4495 | rbd_dev->parent_spec = NULL; | 4708 | if (rbd_dev->parent_overlap) |
| 4496 | rbd_dev->parent_overlap = 0; | 4709 | rbd_dev_parent_put(rbd_dev); |
| 4497 | 4710 | ||
| 4498 | /* Free dynamic fields from the header, then zero it out */ | 4711 | /* Free dynamic fields from the header, then zero it out */ |
| 4499 | 4712 | ||
| @@ -4505,72 +4718,22 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev) | |||
| 4505 | memset(header, 0, sizeof (*header)); | 4718 | memset(header, 0, sizeof (*header)); |
| 4506 | } | 4719 | } |
| 4507 | 4720 | ||
| 4508 | static int rbd_dev_v1_probe(struct rbd_device *rbd_dev) | 4721 | static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev) |
| 4509 | { | 4722 | { |
| 4510 | int ret; | 4723 | int ret; |
| 4511 | 4724 | ||
| 4512 | /* Populate rbd image metadata */ | ||
| 4513 | |||
| 4514 | ret = rbd_read_header(rbd_dev, &rbd_dev->header); | ||
| 4515 | if (ret < 0) | ||
| 4516 | goto out_err; | ||
| 4517 | |||
| 4518 | /* Version 1 images have no parent (no layering) */ | ||
| 4519 | |||
| 4520 | rbd_dev->parent_spec = NULL; | ||
| 4521 | rbd_dev->parent_overlap = 0; | ||
| 4522 | |||
| 4523 | dout("discovered version 1 image, header name is %s\n", | ||
| 4524 | rbd_dev->header_name); | ||
| 4525 | |||
| 4526 | return 0; | ||
| 4527 | |||
| 4528 | out_err: | ||
| 4529 | kfree(rbd_dev->header_name); | ||
| 4530 | rbd_dev->header_name = NULL; | ||
| 4531 | kfree(rbd_dev->spec->image_id); | ||
| 4532 | rbd_dev->spec->image_id = NULL; | ||
| 4533 | |||
| 4534 | return ret; | ||
| 4535 | } | ||
| 4536 | |||
| 4537 | static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) | ||
| 4538 | { | ||
| 4539 | int ret; | ||
| 4540 | |||
| 4541 | ret = rbd_dev_v2_image_size(rbd_dev); | ||
| 4542 | if (ret) | ||
| 4543 | goto out_err; | ||
| 4544 | |||
| 4545 | /* Get the object prefix (a.k.a. block_name) for the image */ | ||
| 4546 | |||
| 4547 | ret = rbd_dev_v2_object_prefix(rbd_dev); | 4725 | ret = rbd_dev_v2_object_prefix(rbd_dev); |
| 4548 | if (ret) | 4726 | if (ret) |
| 4549 | goto out_err; | 4727 | goto out_err; |
| 4550 | 4728 | ||
| 4551 | /* Get the and check features for the image */ | 4729 | /* |
| 4552 | 4730 | * Get the and check features for the image. Currently the | |
| 4731 | * features are assumed to never change. | ||
| 4732 | */ | ||
| 4553 | ret = rbd_dev_v2_features(rbd_dev); | 4733 | ret = rbd_dev_v2_features(rbd_dev); |
| 4554 | if (ret) | 4734 | if (ret) |
| 4555 | goto out_err; | 4735 | goto out_err; |
| 4556 | 4736 | ||
| 4557 | /* If the image supports layering, get the parent info */ | ||
| 4558 | |||
| 4559 | if (rbd_dev->header.features & RBD_FEATURE_LAYERING) { | ||
| 4560 | ret = rbd_dev_v2_parent_info(rbd_dev); | ||
| 4561 | if (ret) | ||
| 4562 | goto out_err; | ||
| 4563 | |||
| 4564 | /* | ||
| 4565 | * Don't print a warning for parent images. We can | ||
| 4566 | * tell this point because we won't know its pool | ||
| 4567 | * name yet (just its pool id). | ||
| 4568 | */ | ||
| 4569 | if (rbd_dev->spec->pool_name) | ||
| 4570 | rbd_warn(rbd_dev, "WARNING: kernel layering " | ||
| 4571 | "is EXPERIMENTAL!"); | ||
| 4572 | } | ||
| 4573 | |||
| 4574 | /* If the image supports fancy striping, get its parameters */ | 4737 | /* If the image supports fancy striping, get its parameters */ |
| 4575 | 4738 | ||
| 4576 | if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) { | 4739 | if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) { |
| @@ -4578,28 +4741,11 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) | |||
| 4578 | if (ret < 0) | 4741 | if (ret < 0) |
| 4579 | goto out_err; | 4742 | goto out_err; |
| 4580 | } | 4743 | } |
| 4581 | 4744 | /* No support for crypto and compression type format 2 images */ | |
| 4582 | /* crypto and compression type aren't (yet) supported for v2 images */ | ||
| 4583 | |||
| 4584 | rbd_dev->header.crypt_type = 0; | ||
| 4585 | rbd_dev->header.comp_type = 0; | ||
| 4586 | |||
| 4587 | /* Get the snapshot context, plus the header version */ | ||
| 4588 | |||
| 4589 | ret = rbd_dev_v2_snap_context(rbd_dev); | ||
| 4590 | if (ret) | ||
| 4591 | goto out_err; | ||
| 4592 | |||
| 4593 | dout("discovered version 2 image, header name is %s\n", | ||
| 4594 | rbd_dev->header_name); | ||
| 4595 | 4745 | ||
| 4596 | return 0; | 4746 | return 0; |
| 4597 | out_err: | 4747 | out_err: |
| 4598 | rbd_dev->parent_overlap = 0; | 4748 | rbd_dev->header.features = 0; |
| 4599 | rbd_spec_put(rbd_dev->parent_spec); | ||
| 4600 | rbd_dev->parent_spec = NULL; | ||
| 4601 | kfree(rbd_dev->header_name); | ||
| 4602 | rbd_dev->header_name = NULL; | ||
| 4603 | kfree(rbd_dev->header.object_prefix); | 4749 | kfree(rbd_dev->header.object_prefix); |
| 4604 | rbd_dev->header.object_prefix = NULL; | 4750 | rbd_dev->header.object_prefix = NULL; |
| 4605 | 4751 | ||
| @@ -4628,15 +4774,16 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev) | |||
| 4628 | if (!parent) | 4774 | if (!parent) |
| 4629 | goto out_err; | 4775 | goto out_err; |
| 4630 | 4776 | ||
| 4631 | ret = rbd_dev_image_probe(parent); | 4777 | ret = rbd_dev_image_probe(parent, false); |
| 4632 | if (ret < 0) | 4778 | if (ret < 0) |
| 4633 | goto out_err; | 4779 | goto out_err; |
| 4634 | rbd_dev->parent = parent; | 4780 | rbd_dev->parent = parent; |
| 4781 | atomic_set(&rbd_dev->parent_ref, 1); | ||
| 4635 | 4782 | ||
| 4636 | return 0; | 4783 | return 0; |
| 4637 | out_err: | 4784 | out_err: |
| 4638 | if (parent) { | 4785 | if (parent) { |
| 4639 | rbd_spec_put(rbd_dev->parent_spec); | 4786 | rbd_dev_unparent(rbd_dev); |
| 4640 | kfree(rbd_dev->header_name); | 4787 | kfree(rbd_dev->header_name); |
| 4641 | rbd_dev_destroy(parent); | 4788 | rbd_dev_destroy(parent); |
| 4642 | } else { | 4789 | } else { |
| @@ -4651,10 +4798,6 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) | |||
| 4651 | { | 4798 | { |
| 4652 | int ret; | 4799 | int ret; |
| 4653 | 4800 | ||
| 4654 | ret = rbd_dev_mapping_set(rbd_dev); | ||
| 4655 | if (ret) | ||
| 4656 | return ret; | ||
| 4657 | |||
| 4658 | /* generate unique id: find highest unique id, add one */ | 4801 | /* generate unique id: find highest unique id, add one */ |
| 4659 | rbd_dev_id_get(rbd_dev); | 4802 | rbd_dev_id_get(rbd_dev); |
| 4660 | 4803 | ||
| @@ -4676,13 +4819,17 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) | |||
| 4676 | if (ret) | 4819 | if (ret) |
| 4677 | goto err_out_blkdev; | 4820 | goto err_out_blkdev; |
| 4678 | 4821 | ||
| 4679 | ret = rbd_bus_add_dev(rbd_dev); | 4822 | ret = rbd_dev_mapping_set(rbd_dev); |
| 4680 | if (ret) | 4823 | if (ret) |
| 4681 | goto err_out_disk; | 4824 | goto err_out_disk; |
| 4825 | set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); | ||
| 4826 | |||
| 4827 | ret = rbd_bus_add_dev(rbd_dev); | ||
| 4828 | if (ret) | ||
| 4829 | goto err_out_mapping; | ||
| 4682 | 4830 | ||
| 4683 | /* Everything's ready. Announce the disk to the world. */ | 4831 | /* Everything's ready. Announce the disk to the world. */ |
| 4684 | 4832 | ||
| 4685 | set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); | ||
| 4686 | set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); | 4833 | set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); |
| 4687 | add_disk(rbd_dev->disk); | 4834 | add_disk(rbd_dev->disk); |
| 4688 | 4835 | ||
| @@ -4691,6 +4838,8 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) | |||
| 4691 | 4838 | ||
| 4692 | return ret; | 4839 | return ret; |
| 4693 | 4840 | ||
| 4841 | err_out_mapping: | ||
| 4842 | rbd_dev_mapping_clear(rbd_dev); | ||
| 4694 | err_out_disk: | 4843 | err_out_disk: |
| 4695 | rbd_free_disk(rbd_dev); | 4844 | rbd_free_disk(rbd_dev); |
| 4696 | err_out_blkdev: | 4845 | err_out_blkdev: |
| @@ -4731,12 +4880,7 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev) | |||
| 4731 | 4880 | ||
| 4732 | static void rbd_dev_image_release(struct rbd_device *rbd_dev) | 4881 | static void rbd_dev_image_release(struct rbd_device *rbd_dev) |
| 4733 | { | 4882 | { |
| 4734 | int ret; | ||
| 4735 | |||
| 4736 | rbd_dev_unprobe(rbd_dev); | 4883 | rbd_dev_unprobe(rbd_dev); |
| 4737 | ret = rbd_dev_header_watch_sync(rbd_dev, 0); | ||
| 4738 | if (ret) | ||
| 4739 | rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); | ||
| 4740 | kfree(rbd_dev->header_name); | 4884 | kfree(rbd_dev->header_name); |
| 4741 | rbd_dev->header_name = NULL; | 4885 | rbd_dev->header_name = NULL; |
| 4742 | rbd_dev->image_format = 0; | 4886 | rbd_dev->image_format = 0; |
| @@ -4748,10 +4892,11 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev) | |||
| 4748 | 4892 | ||
| 4749 | /* | 4893 | /* |
| 4750 | * Probe for the existence of the header object for the given rbd | 4894 | * Probe for the existence of the header object for the given rbd |
| 4751 | * device. For format 2 images this includes determining the image | 4895 | * device. If this image is the one being mapped (i.e., not a |
| 4752 | * id. | 4896 | * parent), initiate a watch on its header object before using that |
| 4897 | * object to get detailed information about the rbd image. | ||
| 4753 | */ | 4898 | */ |
| 4754 | static int rbd_dev_image_probe(struct rbd_device *rbd_dev) | 4899 | static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) |
| 4755 | { | 4900 | { |
| 4756 | int ret; | 4901 | int ret; |
| 4757 | int tmp; | 4902 | int tmp; |
| @@ -4771,14 +4916,16 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev) | |||
| 4771 | if (ret) | 4916 | if (ret) |
| 4772 | goto err_out_format; | 4917 | goto err_out_format; |
| 4773 | 4918 | ||
| 4774 | ret = rbd_dev_header_watch_sync(rbd_dev, 1); | 4919 | if (mapping) { |
| 4775 | if (ret) | 4920 | ret = rbd_dev_header_watch_sync(rbd_dev, true); |
| 4776 | goto out_header_name; | 4921 | if (ret) |
| 4922 | goto out_header_name; | ||
| 4923 | } | ||
| 4777 | 4924 | ||
| 4778 | if (rbd_dev->image_format == 1) | 4925 | if (rbd_dev->image_format == 1) |
| 4779 | ret = rbd_dev_v1_probe(rbd_dev); | 4926 | ret = rbd_dev_v1_header_info(rbd_dev); |
| 4780 | else | 4927 | else |
| 4781 | ret = rbd_dev_v2_probe(rbd_dev); | 4928 | ret = rbd_dev_v2_header_info(rbd_dev); |
| 4782 | if (ret) | 4929 | if (ret) |
| 4783 | goto err_out_watch; | 4930 | goto err_out_watch; |
| 4784 | 4931 | ||
| @@ -4787,15 +4934,22 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev) | |||
| 4787 | goto err_out_probe; | 4934 | goto err_out_probe; |
| 4788 | 4935 | ||
| 4789 | ret = rbd_dev_probe_parent(rbd_dev); | 4936 | ret = rbd_dev_probe_parent(rbd_dev); |
| 4790 | if (!ret) | 4937 | if (ret) |
| 4791 | return 0; | 4938 | goto err_out_probe; |
| 4939 | |||
| 4940 | dout("discovered format %u image, header name is %s\n", | ||
| 4941 | rbd_dev->image_format, rbd_dev->header_name); | ||
| 4792 | 4942 | ||
| 4943 | return 0; | ||
| 4793 | err_out_probe: | 4944 | err_out_probe: |
| 4794 | rbd_dev_unprobe(rbd_dev); | 4945 | rbd_dev_unprobe(rbd_dev); |
| 4795 | err_out_watch: | 4946 | err_out_watch: |
| 4796 | tmp = rbd_dev_header_watch_sync(rbd_dev, 0); | 4947 | if (mapping) { |
| 4797 | if (tmp) | 4948 | tmp = rbd_dev_header_watch_sync(rbd_dev, false); |
| 4798 | rbd_warn(rbd_dev, "unable to tear down watch request\n"); | 4949 | if (tmp) |
| 4950 | rbd_warn(rbd_dev, "unable to tear down " | ||
| 4951 | "watch request (%d)\n", tmp); | ||
| 4952 | } | ||
| 4799 | out_header_name: | 4953 | out_header_name: |
| 4800 | kfree(rbd_dev->header_name); | 4954 | kfree(rbd_dev->header_name); |
| 4801 | rbd_dev->header_name = NULL; | 4955 | rbd_dev->header_name = NULL; |
| @@ -4819,6 +4973,7 @@ static ssize_t rbd_add(struct bus_type *bus, | |||
| 4819 | struct rbd_spec *spec = NULL; | 4973 | struct rbd_spec *spec = NULL; |
| 4820 | struct rbd_client *rbdc; | 4974 | struct rbd_client *rbdc; |
| 4821 | struct ceph_osd_client *osdc; | 4975 | struct ceph_osd_client *osdc; |
| 4976 | bool read_only; | ||
| 4822 | int rc = -ENOMEM; | 4977 | int rc = -ENOMEM; |
| 4823 | 4978 | ||
| 4824 | if (!try_module_get(THIS_MODULE)) | 4979 | if (!try_module_get(THIS_MODULE)) |
| @@ -4828,6 +4983,9 @@ static ssize_t rbd_add(struct bus_type *bus, | |||
| 4828 | rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec); | 4983 | rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec); |
| 4829 | if (rc < 0) | 4984 | if (rc < 0) |
| 4830 | goto err_out_module; | 4985 | goto err_out_module; |
| 4986 | read_only = rbd_opts->read_only; | ||
| 4987 | kfree(rbd_opts); | ||
| 4988 | rbd_opts = NULL; /* done with this */ | ||
| 4831 | 4989 | ||
| 4832 | rbdc = rbd_get_client(ceph_opts); | 4990 | rbdc = rbd_get_client(ceph_opts); |
| 4833 | if (IS_ERR(rbdc)) { | 4991 | if (IS_ERR(rbdc)) { |
| @@ -4858,14 +5016,16 @@ static ssize_t rbd_add(struct bus_type *bus, | |||
| 4858 | rbdc = NULL; /* rbd_dev now owns this */ | 5016 | rbdc = NULL; /* rbd_dev now owns this */ |
| 4859 | spec = NULL; /* rbd_dev now owns this */ | 5017 | spec = NULL; /* rbd_dev now owns this */ |
| 4860 | 5018 | ||
| 4861 | rbd_dev->mapping.read_only = rbd_opts->read_only; | 5019 | rc = rbd_dev_image_probe(rbd_dev, true); |
| 4862 | kfree(rbd_opts); | ||
| 4863 | rbd_opts = NULL; /* done with this */ | ||
| 4864 | |||
| 4865 | rc = rbd_dev_image_probe(rbd_dev); | ||
| 4866 | if (rc < 0) | 5020 | if (rc < 0) |
| 4867 | goto err_out_rbd_dev; | 5021 | goto err_out_rbd_dev; |
| 4868 | 5022 | ||
| 5023 | /* If we are mapping a snapshot it must be marked read-only */ | ||
| 5024 | |||
| 5025 | if (rbd_dev->spec->snap_id != CEPH_NOSNAP) | ||
| 5026 | read_only = true; | ||
| 5027 | rbd_dev->mapping.read_only = read_only; | ||
| 5028 | |||
| 4869 | rc = rbd_dev_device_setup(rbd_dev); | 5029 | rc = rbd_dev_device_setup(rbd_dev); |
| 4870 | if (!rc) | 5030 | if (!rc) |
| 4871 | return count; | 5031 | return count; |
| @@ -4911,7 +5071,7 @@ static void rbd_dev_device_release(struct device *dev) | |||
| 4911 | 5071 | ||
| 4912 | rbd_free_disk(rbd_dev); | 5072 | rbd_free_disk(rbd_dev); |
| 4913 | clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); | 5073 | clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); |
| 4914 | rbd_dev_clear_mapping(rbd_dev); | 5074 | rbd_dev_mapping_clear(rbd_dev); |
| 4915 | unregister_blkdev(rbd_dev->major, rbd_dev->name); | 5075 | unregister_blkdev(rbd_dev->major, rbd_dev->name); |
| 4916 | rbd_dev->major = 0; | 5076 | rbd_dev->major = 0; |
| 4917 | rbd_dev_id_put(rbd_dev); | 5077 | rbd_dev_id_put(rbd_dev); |
| @@ -4978,10 +5138,13 @@ static ssize_t rbd_remove(struct bus_type *bus, | |||
| 4978 | spin_unlock_irq(&rbd_dev->lock); | 5138 | spin_unlock_irq(&rbd_dev->lock); |
| 4979 | if (ret < 0) | 5139 | if (ret < 0) |
| 4980 | goto done; | 5140 | goto done; |
| 4981 | ret = count; | ||
| 4982 | rbd_bus_del_dev(rbd_dev); | 5141 | rbd_bus_del_dev(rbd_dev); |
| 5142 | ret = rbd_dev_header_watch_sync(rbd_dev, false); | ||
| 5143 | if (ret) | ||
| 5144 | rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); | ||
| 4983 | rbd_dev_image_release(rbd_dev); | 5145 | rbd_dev_image_release(rbd_dev); |
| 4984 | module_put(THIS_MODULE); | 5146 | module_put(THIS_MODULE); |
| 5147 | ret = count; | ||
| 4985 | done: | 5148 | done: |
| 4986 | mutex_unlock(&ctl_mutex); | 5149 | mutex_unlock(&ctl_mutex); |
| 4987 | 5150 | ||
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c index 4ca35e8a5d8c..19a12ac64a9e 100644 --- a/drivers/char/hw_random/mxc-rnga.c +++ b/drivers/char/hw_random/mxc-rnga.c | |||
| @@ -167,11 +167,6 @@ static int __init mxc_rnga_probe(struct platform_device *pdev) | |||
| 167 | clk_prepare_enable(mxc_rng->clk); | 167 | clk_prepare_enable(mxc_rng->clk); |
| 168 | 168 | ||
| 169 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 169 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 170 | if (!res) { | ||
| 171 | err = -ENOENT; | ||
| 172 | goto err_region; | ||
| 173 | } | ||
| 174 | |||
| 175 | mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res); | 170 | mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res); |
| 176 | if (IS_ERR(mxc_rng->mem)) { | 171 | if (IS_ERR(mxc_rng->mem)) { |
| 177 | err = PTR_ERR(mxc_rng->mem); | 172 | err = PTR_ERR(mxc_rng->mem); |
| @@ -189,7 +184,6 @@ static int __init mxc_rnga_probe(struct platform_device *pdev) | |||
| 189 | return 0; | 184 | return 0; |
| 190 | 185 | ||
| 191 | err_ioremap: | 186 | err_ioremap: |
| 192 | err_region: | ||
| 193 | clk_disable_unprepare(mxc_rng->clk); | 187 | clk_disable_unprepare(mxc_rng->clk); |
| 194 | 188 | ||
| 195 | out: | 189 | out: |
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 749dc16ca2cc..d2903e772270 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c | |||
| @@ -119,11 +119,6 @@ static int omap_rng_probe(struct platform_device *pdev) | |||
| 119 | dev_set_drvdata(&pdev->dev, priv); | 119 | dev_set_drvdata(&pdev->dev, priv); |
| 120 | 120 | ||
| 121 | priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 121 | priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 122 | if (!priv->mem_res) { | ||
| 123 | ret = -ENOENT; | ||
| 124 | goto err_ioremap; | ||
| 125 | } | ||
| 126 | |||
| 127 | priv->base = devm_ioremap_resource(&pdev->dev, priv->mem_res); | 122 | priv->base = devm_ioremap_resource(&pdev->dev, priv->mem_res); |
| 128 | if (IS_ERR(priv->base)) { | 123 | if (IS_ERR(priv->base)) { |
| 129 | ret = PTR_ERR(priv->base); | 124 | ret = PTR_ERR(priv->base); |
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c index cdd4c09fda96..a22a7a502740 100644 --- a/drivers/char/ipmi/ipmi_bt_sm.c +++ b/drivers/char/ipmi/ipmi_bt_sm.c | |||
| @@ -95,9 +95,9 @@ struct si_sm_data { | |||
| 95 | enum bt_states state; | 95 | enum bt_states state; |
| 96 | unsigned char seq; /* BT sequence number */ | 96 | unsigned char seq; /* BT sequence number */ |
| 97 | struct si_sm_io *io; | 97 | struct si_sm_io *io; |
| 98 | unsigned char write_data[IPMI_MAX_MSG_LENGTH]; | 98 | unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ |
| 99 | int write_count; | 99 | int write_count; |
| 100 | unsigned char read_data[IPMI_MAX_MSG_LENGTH]; | 100 | unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ |
| 101 | int read_count; | 101 | int read_count; |
| 102 | int truncated; | 102 | int truncated; |
| 103 | long timeout; /* microseconds countdown */ | 103 | long timeout; /* microseconds countdown */ |
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index 9eb360ff8cab..d5a5f020810a 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c | |||
| @@ -837,13 +837,25 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, | |||
| 837 | return ipmi_ioctl(filep, cmd, arg); | 837 | return ipmi_ioctl(filep, cmd, arg); |
| 838 | } | 838 | } |
| 839 | } | 839 | } |
| 840 | |||
| 841 | static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd, | ||
| 842 | unsigned long arg) | ||
| 843 | { | ||
| 844 | int ret; | ||
| 845 | |||
| 846 | mutex_lock(&ipmi_mutex); | ||
| 847 | ret = compat_ipmi_ioctl(filep, cmd, arg); | ||
| 848 | mutex_unlock(&ipmi_mutex); | ||
| 849 | |||
| 850 | return ret; | ||
| 851 | } | ||
| 840 | #endif | 852 | #endif |
| 841 | 853 | ||
| 842 | static const struct file_operations ipmi_fops = { | 854 | static const struct file_operations ipmi_fops = { |
| 843 | .owner = THIS_MODULE, | 855 | .owner = THIS_MODULE, |
| 844 | .unlocked_ioctl = ipmi_unlocked_ioctl, | 856 | .unlocked_ioctl = ipmi_unlocked_ioctl, |
| 845 | #ifdef CONFIG_COMPAT | 857 | #ifdef CONFIG_COMPAT |
| 846 | .compat_ioctl = compat_ipmi_ioctl, | 858 | .compat_ioctl = unlocked_compat_ipmi_ioctl, |
| 847 | #endif | 859 | #endif |
| 848 | .open = ipmi_open, | 860 | .open = ipmi_open, |
| 849 | .release = ipmi_release, | 861 | .release = ipmi_release, |
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 4d439d2fcfd6..4445fa164a2d 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
| @@ -2037,12 +2037,11 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, | |||
| 2037 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | 2037 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
| 2038 | if (!entry) | 2038 | if (!entry) |
| 2039 | return -ENOMEM; | 2039 | return -ENOMEM; |
| 2040 | entry->name = kmalloc(strlen(name)+1, GFP_KERNEL); | 2040 | entry->name = kstrdup(name, GFP_KERNEL); |
| 2041 | if (!entry->name) { | 2041 | if (!entry->name) { |
| 2042 | kfree(entry); | 2042 | kfree(entry); |
| 2043 | return -ENOMEM; | 2043 | return -ENOMEM; |
| 2044 | } | 2044 | } |
| 2045 | strcpy(entry->name, name); | ||
| 2046 | 2045 | ||
| 2047 | file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data); | 2046 | file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data); |
| 2048 | if (!file) { | 2047 | if (!file) { |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 313538abe63c..af4b23ffc5a6 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
| @@ -663,8 +663,10 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
| 663 | /* We got the flags from the SMI, now handle them. */ | 663 | /* We got the flags from the SMI, now handle them. */ |
| 664 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); | 664 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); |
| 665 | if (msg[2] != 0) { | 665 | if (msg[2] != 0) { |
| 666 | dev_warn(smi_info->dev, "Could not enable interrupts" | 666 | dev_warn(smi_info->dev, |
| 667 | ", failed get, using polled mode.\n"); | 667 | "Couldn't get irq info: %x.\n", msg[2]); |
| 668 | dev_warn(smi_info->dev, | ||
| 669 | "Maybe ok, but ipmi might run very slowly.\n"); | ||
| 668 | smi_info->si_state = SI_NORMAL; | 670 | smi_info->si_state = SI_NORMAL; |
| 669 | } else { | 671 | } else { |
| 670 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 672 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
| @@ -685,10 +687,12 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
| 685 | 687 | ||
| 686 | /* We got the flags from the SMI, now handle them. */ | 688 | /* We got the flags from the SMI, now handle them. */ |
| 687 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); | 689 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); |
| 688 | if (msg[2] != 0) | 690 | if (msg[2] != 0) { |
| 689 | dev_warn(smi_info->dev, "Could not enable interrupts" | 691 | dev_warn(smi_info->dev, |
| 690 | ", failed set, using polled mode.\n"); | 692 | "Couldn't set irq info: %x.\n", msg[2]); |
| 691 | else | 693 | dev_warn(smi_info->dev, |
| 694 | "Maybe ok, but ipmi might run very slowly.\n"); | ||
| 695 | } else | ||
| 692 | smi_info->interrupt_disabled = 0; | 696 | smi_info->interrupt_disabled = 0; |
| 693 | smi_info->si_state = SI_NORMAL; | 697 | smi_info->si_state = SI_NORMAL; |
| 694 | break; | 698 | break; |
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index 8292a00c3de9..075db0c99edb 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c | |||
| @@ -872,6 +872,14 @@ static void __init tegra20_periph_clk_init(void) | |||
| 872 | struct clk *clk; | 872 | struct clk *clk; |
| 873 | int i; | 873 | int i; |
| 874 | 874 | ||
| 875 | /* ac97 */ | ||
| 876 | clk = tegra_clk_register_periph_gate("ac97", "pll_a_out0", | ||
| 877 | TEGRA_PERIPH_ON_APB, | ||
| 878 | clk_base, 0, 3, &periph_l_regs, | ||
| 879 | periph_clk_enb_refcnt); | ||
| 880 | clk_register_clkdev(clk, NULL, "tegra20-ac97"); | ||
| 881 | clks[ac97] = clk; | ||
| 882 | |||
| 875 | /* apbdma */ | 883 | /* apbdma */ |
| 876 | clk = tegra_clk_register_periph_gate("apbdma", "pclk", 0, clk_base, | 884 | clk = tegra_clk_register_periph_gate("apbdma", "pclk", 0, clk_base, |
| 877 | 0, 34, &periph_h_regs, | 885 | 0, 34, &periph_h_regs, |
| @@ -1234,9 +1242,6 @@ static __initdata struct tegra_clk_init_table init_table[] = { | |||
| 1234 | {uartc, pll_p, 0, 0}, | 1242 | {uartc, pll_p, 0, 0}, |
| 1235 | {uartd, pll_p, 0, 0}, | 1243 | {uartd, pll_p, 0, 0}, |
| 1236 | {uarte, pll_p, 0, 0}, | 1244 | {uarte, pll_p, 0, 0}, |
| 1237 | {usbd, clk_max, 12000000, 0}, | ||
| 1238 | {usb2, clk_max, 12000000, 0}, | ||
| 1239 | {usb3, clk_max, 12000000, 0}, | ||
| 1240 | {pll_a, clk_max, 56448000, 1}, | 1245 | {pll_a, clk_max, 56448000, 1}, |
| 1241 | {pll_a_out0, clk_max, 11289600, 1}, | 1246 | {pll_a_out0, clk_max, 11289600, 1}, |
| 1242 | {cdev1, clk_max, 0, 1}, | 1247 | {cdev1, clk_max, 0, 1}, |
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index a1488f58f6ca..534fcb825153 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig | |||
| @@ -47,7 +47,7 @@ config CPU_FREQ_STAT_DETAILS | |||
| 47 | 47 | ||
| 48 | choice | 48 | choice |
| 49 | prompt "Default CPUFreq governor" | 49 | prompt "Default CPUFreq governor" |
| 50 | default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110 | 50 | default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ |
| 51 | default CPU_FREQ_DEFAULT_GOV_PERFORMANCE | 51 | default CPU_FREQ_DEFAULT_GOV_PERFORMANCE |
| 52 | help | 52 | help |
| 53 | This option sets which CPUFreq governor shall be loaded at | 53 | This option sets which CPUFreq governor shall be loaded at |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index f3af18b9acc5..6e57543fe0b9 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
| @@ -3,16 +3,17 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | config ARM_BIG_LITTLE_CPUFREQ | 5 | config ARM_BIG_LITTLE_CPUFREQ |
| 6 | tristate | 6 | tristate "Generic ARM big LITTLE CPUfreq driver" |
| 7 | depends on ARM_CPU_TOPOLOGY | 7 | depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK |
| 8 | help | ||
| 9 | This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. | ||
| 8 | 10 | ||
| 9 | config ARM_DT_BL_CPUFREQ | 11 | config ARM_DT_BL_CPUFREQ |
| 10 | tristate "Generic ARM big LITTLE CPUfreq driver probed via DT" | 12 | tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver" |
| 11 | select ARM_BIG_LITTLE_CPUFREQ | 13 | depends on ARM_BIG_LITTLE_CPUFREQ && OF |
| 12 | depends on OF && HAVE_CLK | ||
| 13 | help | 14 | help |
| 14 | This enables the Generic CPUfreq driver for ARM big.LITTLE platform. | 15 | This enables probing via DT for Generic CPUfreq driver for ARM |
| 15 | This gets frequency tables from DT. | 16 | big.LITTLE platform. This gets frequency tables from DT. |
| 16 | 17 | ||
| 17 | config ARM_EXYNOS_CPUFREQ | 18 | config ARM_EXYNOS_CPUFREQ |
| 18 | bool "SAMSUNG EXYNOS SoCs" | 19 | bool "SAMSUNG EXYNOS SoCs" |
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index dbdf677d2f36..5d7f53fcd6f5 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c | |||
| @@ -40,11 +40,6 @@ static struct clk *clk[MAX_CLUSTERS]; | |||
| 40 | static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS]; | 40 | static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS]; |
| 41 | static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)}; | 41 | static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)}; |
| 42 | 42 | ||
| 43 | static int cpu_to_cluster(int cpu) | ||
| 44 | { | ||
| 45 | return topology_physical_package_id(cpu); | ||
| 46 | } | ||
| 47 | |||
| 48 | static unsigned int bL_cpufreq_get(unsigned int cpu) | 43 | static unsigned int bL_cpufreq_get(unsigned int cpu) |
| 49 | { | 44 | { |
| 50 | u32 cur_cluster = cpu_to_cluster(cpu); | 45 | u32 cur_cluster = cpu_to_cluster(cpu); |
| @@ -192,7 +187,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) | |||
| 192 | 187 | ||
| 193 | cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); | 188 | cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); |
| 194 | 189 | ||
| 195 | dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu); | 190 | dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); |
| 196 | return 0; | 191 | return 0; |
| 197 | } | 192 | } |
| 198 | 193 | ||
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h index 70f18fc12d4a..79b2ce17884d 100644 --- a/drivers/cpufreq/arm_big_little.h +++ b/drivers/cpufreq/arm_big_little.h | |||
| @@ -34,6 +34,11 @@ struct cpufreq_arm_bL_ops { | |||
| 34 | int (*init_opp_table)(struct device *cpu_dev); | 34 | int (*init_opp_table)(struct device *cpu_dev); |
| 35 | }; | 35 | }; |
| 36 | 36 | ||
| 37 | static inline int cpu_to_cluster(int cpu) | ||
| 38 | { | ||
| 39 | return topology_physical_package_id(cpu); | ||
| 40 | } | ||
| 41 | |||
| 37 | int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); | 42 | int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); |
| 38 | void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops); | 43 | void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops); |
| 39 | 44 | ||
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c index 44be3115375c..173ed059d95f 100644 --- a/drivers/cpufreq/arm_big_little_dt.c +++ b/drivers/cpufreq/arm_big_little_dt.c | |||
| @@ -66,8 +66,8 @@ static int dt_get_transition_latency(struct device *cpu_dev) | |||
| 66 | 66 | ||
| 67 | parent = of_find_node_by_path("/cpus"); | 67 | parent = of_find_node_by_path("/cpus"); |
| 68 | if (!parent) { | 68 | if (!parent) { |
| 69 | pr_err("failed to find OF /cpus\n"); | 69 | pr_info("Failed to find OF /cpus. Use CPUFREQ_ETERNAL transition latency\n"); |
| 70 | return -ENOENT; | 70 | return CPUFREQ_ETERNAL; |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | for_each_child_of_node(parent, np) { | 73 | for_each_child_of_node(parent, np) { |
| @@ -78,10 +78,11 @@ static int dt_get_transition_latency(struct device *cpu_dev) | |||
| 78 | of_node_put(np); | 78 | of_node_put(np); |
| 79 | of_node_put(parent); | 79 | of_node_put(parent); |
| 80 | 80 | ||
| 81 | return 0; | 81 | return transition_latency; |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | return -ENODEV; | 84 | pr_info("clock-latency isn't found, use CPUFREQ_ETERNAL transition latency\n"); |
| 85 | return CPUFREQ_ETERNAL; | ||
| 85 | } | 86 | } |
| 86 | 87 | ||
| 87 | static struct cpufreq_arm_bL_ops dt_bL_ops = { | 88 | static struct cpufreq_arm_bL_ops dt_bL_ops = { |
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 3ab8294eab04..a64eb8b70444 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c | |||
| @@ -189,12 +189,29 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) | |||
| 189 | 189 | ||
| 190 | if (!np) { | 190 | if (!np) { |
| 191 | pr_err("failed to find cpu0 node\n"); | 191 | pr_err("failed to find cpu0 node\n"); |
| 192 | return -ENOENT; | 192 | ret = -ENOENT; |
| 193 | goto out_put_parent; | ||
| 193 | } | 194 | } |
| 194 | 195 | ||
| 195 | cpu_dev = &pdev->dev; | 196 | cpu_dev = &pdev->dev; |
| 196 | cpu_dev->of_node = np; | 197 | cpu_dev->of_node = np; |
| 197 | 198 | ||
| 199 | cpu_reg = devm_regulator_get(cpu_dev, "cpu0"); | ||
| 200 | if (IS_ERR(cpu_reg)) { | ||
| 201 | /* | ||
| 202 | * If cpu0 regulator supply node is present, but regulator is | ||
| 203 | * not yet registered, we should try defering probe. | ||
| 204 | */ | ||
| 205 | if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { | ||
| 206 | dev_err(cpu_dev, "cpu0 regulator not ready, retry\n"); | ||
| 207 | ret = -EPROBE_DEFER; | ||
| 208 | goto out_put_node; | ||
| 209 | } | ||
| 210 | pr_warn("failed to get cpu0 regulator: %ld\n", | ||
| 211 | PTR_ERR(cpu_reg)); | ||
| 212 | cpu_reg = NULL; | ||
| 213 | } | ||
| 214 | |||
| 198 | cpu_clk = devm_clk_get(cpu_dev, NULL); | 215 | cpu_clk = devm_clk_get(cpu_dev, NULL); |
| 199 | if (IS_ERR(cpu_clk)) { | 216 | if (IS_ERR(cpu_clk)) { |
| 200 | ret = PTR_ERR(cpu_clk); | 217 | ret = PTR_ERR(cpu_clk); |
| @@ -202,12 +219,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) | |||
| 202 | goto out_put_node; | 219 | goto out_put_node; |
| 203 | } | 220 | } |
| 204 | 221 | ||
| 205 | cpu_reg = devm_regulator_get(cpu_dev, "cpu0"); | ||
| 206 | if (IS_ERR(cpu_reg)) { | ||
| 207 | pr_warn("failed to get cpu0 regulator\n"); | ||
| 208 | cpu_reg = NULL; | ||
| 209 | } | ||
| 210 | |||
| 211 | ret = of_init_opp_table(cpu_dev); | 222 | ret = of_init_opp_table(cpu_dev); |
| 212 | if (ret) { | 223 | if (ret) { |
| 213 | pr_err("failed to init OPP table: %d\n", ret); | 224 | pr_err("failed to init OPP table: %d\n", ret); |
| @@ -264,6 +275,8 @@ out_free_table: | |||
| 264 | opp_free_cpufreq_table(cpu_dev, &freq_table); | 275 | opp_free_cpufreq_table(cpu_dev, &freq_table); |
| 265 | out_put_node: | 276 | out_put_node: |
| 266 | of_node_put(np); | 277 | of_node_put(np); |
| 278 | out_put_parent: | ||
| 279 | of_node_put(parent); | ||
| 267 | return ret; | 280 | return ret; |
| 268 | } | 281 | } |
| 269 | 282 | ||
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 1b8a48eaf90f..4b8c7f297d74 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -1075,14 +1075,14 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif | |||
| 1075 | __func__, cpu_dev->id, cpu); | 1075 | __func__, cpu_dev->id, cpu); |
| 1076 | } | 1076 | } |
| 1077 | 1077 | ||
| 1078 | if ((cpus == 1) && (cpufreq_driver->target)) | ||
| 1079 | __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); | ||
| 1080 | |||
| 1078 | pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); | 1081 | pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); |
| 1079 | cpufreq_cpu_put(data); | 1082 | cpufreq_cpu_put(data); |
| 1080 | 1083 | ||
| 1081 | /* If cpu is last user of policy, free policy */ | 1084 | /* If cpu is last user of policy, free policy */ |
| 1082 | if (cpus == 1) { | 1085 | if (cpus == 1) { |
| 1083 | if (cpufreq_driver->target) | ||
| 1084 | __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); | ||
| 1085 | |||
| 1086 | lock_policy_rwsem_read(cpu); | 1086 | lock_policy_rwsem_read(cpu); |
| 1087 | kobj = &data->kobj; | 1087 | kobj = &data->kobj; |
| 1088 | cmp = &data->kobj_unregister; | 1088 | cmp = &data->kobj_unregister; |
| @@ -1832,15 +1832,13 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, | |||
| 1832 | if (dev) { | 1832 | if (dev) { |
| 1833 | switch (action) { | 1833 | switch (action) { |
| 1834 | case CPU_ONLINE: | 1834 | case CPU_ONLINE: |
| 1835 | case CPU_ONLINE_FROZEN: | ||
| 1836 | cpufreq_add_dev(dev, NULL); | 1835 | cpufreq_add_dev(dev, NULL); |
| 1837 | break; | 1836 | break; |
| 1838 | case CPU_DOWN_PREPARE: | 1837 | case CPU_DOWN_PREPARE: |
| 1839 | case CPU_DOWN_PREPARE_FROZEN: | 1838 | case CPU_UP_CANCELED_FROZEN: |
| 1840 | __cpufreq_remove_dev(dev, NULL); | 1839 | __cpufreq_remove_dev(dev, NULL); |
| 1841 | break; | 1840 | break; |
| 1842 | case CPU_DOWN_FAILED: | 1841 | case CPU_DOWN_FAILED: |
| 1843 | case CPU_DOWN_FAILED_FROZEN: | ||
| 1844 | cpufreq_add_dev(dev, NULL); | 1842 | cpufreq_add_dev(dev, NULL); |
| 1845 | break; | 1843 | break; |
| 1846 | } | 1844 | } |
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 443442df113b..5af40ad82d23 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c | |||
| @@ -255,6 +255,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
| 255 | if (have_governor_per_policy()) { | 255 | if (have_governor_per_policy()) { |
| 256 | WARN_ON(dbs_data); | 256 | WARN_ON(dbs_data); |
| 257 | } else if (dbs_data) { | 257 | } else if (dbs_data) { |
| 258 | dbs_data->usage_count++; | ||
| 258 | policy->governor_data = dbs_data; | 259 | policy->governor_data = dbs_data; |
| 259 | return 0; | 260 | return 0; |
| 260 | } | 261 | } |
| @@ -266,6 +267,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
| 266 | } | 267 | } |
| 267 | 268 | ||
| 268 | dbs_data->cdata = cdata; | 269 | dbs_data->cdata = cdata; |
| 270 | dbs_data->usage_count = 1; | ||
| 269 | rc = cdata->init(dbs_data); | 271 | rc = cdata->init(dbs_data); |
| 270 | if (rc) { | 272 | if (rc) { |
| 271 | pr_err("%s: POLICY_INIT: init() failed\n", __func__); | 273 | pr_err("%s: POLICY_INIT: init() failed\n", __func__); |
| @@ -294,7 +296,8 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
| 294 | set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, | 296 | set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, |
| 295 | latency * LATENCY_MULTIPLIER)); | 297 | latency * LATENCY_MULTIPLIER)); |
| 296 | 298 | ||
| 297 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { | 299 | if ((cdata->governor == GOV_CONSERVATIVE) && |
| 300 | (!policy->governor->initialized)) { | ||
| 298 | struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; | 301 | struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; |
| 299 | 302 | ||
| 300 | cpufreq_register_notifier(cs_ops->notifier_block, | 303 | cpufreq_register_notifier(cs_ops->notifier_block, |
| @@ -306,12 +309,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
| 306 | 309 | ||
| 307 | return 0; | 310 | return 0; |
| 308 | case CPUFREQ_GOV_POLICY_EXIT: | 311 | case CPUFREQ_GOV_POLICY_EXIT: |
| 309 | if ((policy->governor->initialized == 1) || | 312 | if (!--dbs_data->usage_count) { |
| 310 | have_governor_per_policy()) { | ||
| 311 | sysfs_remove_group(get_governor_parent_kobj(policy), | 313 | sysfs_remove_group(get_governor_parent_kobj(policy), |
| 312 | get_sysfs_attr(dbs_data)); | 314 | get_sysfs_attr(dbs_data)); |
| 313 | 315 | ||
| 314 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { | 316 | if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) && |
| 317 | (policy->governor->initialized == 1)) { | ||
| 315 | struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; | 318 | struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; |
| 316 | 319 | ||
| 317 | cpufreq_unregister_notifier(cs_ops->notifier_block, | 320 | cpufreq_unregister_notifier(cs_ops->notifier_block, |
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 8ac33538d0bd..e16a96130cb3 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h | |||
| @@ -211,6 +211,7 @@ struct common_dbs_data { | |||
| 211 | struct dbs_data { | 211 | struct dbs_data { |
| 212 | struct common_dbs_data *cdata; | 212 | struct common_dbs_data *cdata; |
| 213 | unsigned int min_sampling_rate; | 213 | unsigned int min_sampling_rate; |
| 214 | int usage_count; | ||
| 214 | void *tuners; | 215 | void *tuners; |
| 215 | 216 | ||
| 216 | /* dbs_mutex protects dbs_enable in governor start/stop */ | 217 | /* dbs_mutex protects dbs_enable in governor start/stop */ |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index b0ffef96bf77..4b9bb5def6f1 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
| @@ -547,7 +547,6 @@ static int od_init(struct dbs_data *dbs_data) | |||
| 547 | tuners->io_is_busy = should_io_be_busy(); | 547 | tuners->io_is_busy = should_io_be_busy(); |
| 548 | 548 | ||
| 549 | dbs_data->tuners = tuners; | 549 | dbs_data->tuners = tuners; |
| 550 | pr_info("%s: tuners %p\n", __func__, tuners); | ||
| 551 | mutex_init(&dbs_data->mutex); | 550 | mutex_init(&dbs_data->mutex); |
| 552 | return 0; | 551 | return 0; |
| 553 | } | 552 | } |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index bfd6273fd873..fb65decffa28 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
| @@ -349,15 +349,16 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, | |||
| 349 | 349 | ||
| 350 | switch (action) { | 350 | switch (action) { |
| 351 | case CPU_ONLINE: | 351 | case CPU_ONLINE: |
| 352 | case CPU_ONLINE_FROZEN: | ||
| 353 | cpufreq_update_policy(cpu); | 352 | cpufreq_update_policy(cpu); |
| 354 | break; | 353 | break; |
| 355 | case CPU_DOWN_PREPARE: | 354 | case CPU_DOWN_PREPARE: |
| 356 | case CPU_DOWN_PREPARE_FROZEN: | ||
| 357 | cpufreq_stats_free_sysfs(cpu); | 355 | cpufreq_stats_free_sysfs(cpu); |
| 358 | break; | 356 | break; |
| 359 | case CPU_DEAD: | 357 | case CPU_DEAD: |
| 360 | case CPU_DEAD_FROZEN: | 358 | cpufreq_stats_free_table(cpu); |
| 359 | break; | ||
| 360 | case CPU_UP_CANCELED_FROZEN: | ||
| 361 | cpufreq_stats_free_sysfs(cpu); | ||
| 361 | cpufreq_stats_free_table(cpu); | 362 | cpufreq_stats_free_table(cpu); |
| 362 | break; | 363 | break; |
| 363 | } | 364 | } |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index cc3a8e6c92be..9c36ace92a39 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -48,12 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y) | |||
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | struct sample { | 50 | struct sample { |
| 51 | ktime_t start_time; | ||
| 52 | ktime_t end_time; | ||
| 53 | int core_pct_busy; | 51 | int core_pct_busy; |
| 54 | int pstate_pct_busy; | ||
| 55 | u64 duration_us; | ||
| 56 | u64 idletime_us; | ||
| 57 | u64 aperf; | 52 | u64 aperf; |
| 58 | u64 mperf; | 53 | u64 mperf; |
| 59 | int freq; | 54 | int freq; |
| @@ -86,13 +81,9 @@ struct cpudata { | |||
| 86 | struct pstate_adjust_policy *pstate_policy; | 81 | struct pstate_adjust_policy *pstate_policy; |
| 87 | struct pstate_data pstate; | 82 | struct pstate_data pstate; |
| 88 | struct _pid pid; | 83 | struct _pid pid; |
| 89 | struct _pid idle_pid; | ||
| 90 | 84 | ||
| 91 | int min_pstate_count; | 85 | int min_pstate_count; |
| 92 | int idle_mode; | ||
| 93 | 86 | ||
| 94 | ktime_t prev_sample; | ||
| 95 | u64 prev_idle_time_us; | ||
| 96 | u64 prev_aperf; | 87 | u64 prev_aperf; |
| 97 | u64 prev_mperf; | 88 | u64 prev_mperf; |
| 98 | int sample_ptr; | 89 | int sample_ptr; |
| @@ -124,6 +115,8 @@ struct perf_limits { | |||
| 124 | int min_perf_pct; | 115 | int min_perf_pct; |
| 125 | int32_t max_perf; | 116 | int32_t max_perf; |
| 126 | int32_t min_perf; | 117 | int32_t min_perf; |
| 118 | int max_policy_pct; | ||
| 119 | int max_sysfs_pct; | ||
| 127 | }; | 120 | }; |
| 128 | 121 | ||
| 129 | static struct perf_limits limits = { | 122 | static struct perf_limits limits = { |
| @@ -132,6 +125,8 @@ static struct perf_limits limits = { | |||
| 132 | .max_perf = int_tofp(1), | 125 | .max_perf = int_tofp(1), |
| 133 | .min_perf_pct = 0, | 126 | .min_perf_pct = 0, |
| 134 | .min_perf = 0, | 127 | .min_perf = 0, |
| 128 | .max_policy_pct = 100, | ||
| 129 | .max_sysfs_pct = 100, | ||
| 135 | }; | 130 | }; |
| 136 | 131 | ||
| 137 | static inline void pid_reset(struct _pid *pid, int setpoint, int busy, | 132 | static inline void pid_reset(struct _pid *pid, int setpoint, int busy, |
| @@ -202,19 +197,6 @@ static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) | |||
| 202 | 0); | 197 | 0); |
| 203 | } | 198 | } |
| 204 | 199 | ||
| 205 | static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu) | ||
| 206 | { | ||
| 207 | pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct); | ||
| 208 | pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct); | ||
| 209 | pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct); | ||
| 210 | |||
| 211 | pid_reset(&cpu->idle_pid, | ||
| 212 | 75, | ||
| 213 | 50, | ||
| 214 | cpu->pstate_policy->deadband, | ||
| 215 | 0); | ||
| 216 | } | ||
| 217 | |||
| 218 | static inline void intel_pstate_reset_all_pid(void) | 200 | static inline void intel_pstate_reset_all_pid(void) |
| 219 | { | 201 | { |
| 220 | unsigned int cpu; | 202 | unsigned int cpu; |
| @@ -302,7 +284,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, | |||
| 302 | if (ret != 1) | 284 | if (ret != 1) |
| 303 | return -EINVAL; | 285 | return -EINVAL; |
| 304 | 286 | ||
| 305 | limits.max_perf_pct = clamp_t(int, input, 0 , 100); | 287 | limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); |
| 288 | limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); | ||
| 306 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); | 289 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); |
| 307 | return count; | 290 | return count; |
| 308 | } | 291 | } |
| @@ -408,9 +391,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) | |||
| 408 | if (pstate == cpu->pstate.current_pstate) | 391 | if (pstate == cpu->pstate.current_pstate) |
| 409 | return; | 392 | return; |
| 410 | 393 | ||
| 411 | #ifndef MODULE | ||
| 412 | trace_cpu_frequency(pstate * 100000, cpu->cpu); | 394 | trace_cpu_frequency(pstate * 100000, cpu->cpu); |
| 413 | #endif | 395 | |
| 414 | cpu->pstate.current_pstate = pstate; | 396 | cpu->pstate.current_pstate = pstate; |
| 415 | wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); | 397 | wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); |
| 416 | 398 | ||
| @@ -450,48 +432,26 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu, | |||
| 450 | struct sample *sample) | 432 | struct sample *sample) |
| 451 | { | 433 | { |
| 452 | u64 core_pct; | 434 | u64 core_pct; |
| 453 | sample->pstate_pct_busy = 100 - div64_u64( | ||
| 454 | sample->idletime_us * 100, | ||
| 455 | sample->duration_us); | ||
| 456 | core_pct = div64_u64(sample->aperf * 100, sample->mperf); | 435 | core_pct = div64_u64(sample->aperf * 100, sample->mperf); |
| 457 | sample->freq = cpu->pstate.max_pstate * core_pct * 1000; | 436 | sample->freq = cpu->pstate.max_pstate * core_pct * 1000; |
| 458 | 437 | ||
| 459 | sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct), | 438 | sample->core_pct_busy = core_pct; |
| 460 | 100); | ||
| 461 | } | 439 | } |
| 462 | 440 | ||
| 463 | static inline void intel_pstate_sample(struct cpudata *cpu) | 441 | static inline void intel_pstate_sample(struct cpudata *cpu) |
| 464 | { | 442 | { |
| 465 | ktime_t now; | ||
| 466 | u64 idle_time_us; | ||
| 467 | u64 aperf, mperf; | 443 | u64 aperf, mperf; |
| 468 | 444 | ||
| 469 | now = ktime_get(); | ||
| 470 | idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL); | ||
| 471 | |||
| 472 | rdmsrl(MSR_IA32_APERF, aperf); | 445 | rdmsrl(MSR_IA32_APERF, aperf); |
| 473 | rdmsrl(MSR_IA32_MPERF, mperf); | 446 | rdmsrl(MSR_IA32_MPERF, mperf); |
| 474 | /* for the first sample, don't actually record a sample, just | 447 | cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; |
| 475 | * set the baseline */ | 448 | cpu->samples[cpu->sample_ptr].aperf = aperf; |
| 476 | if (cpu->prev_idle_time_us > 0) { | 449 | cpu->samples[cpu->sample_ptr].mperf = mperf; |
| 477 | cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; | 450 | cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; |
| 478 | cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample; | 451 | cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; |
| 479 | cpu->samples[cpu->sample_ptr].end_time = now; | 452 | |
| 480 | cpu->samples[cpu->sample_ptr].duration_us = | 453 | intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); |
| 481 | ktime_us_delta(now, cpu->prev_sample); | ||
| 482 | cpu->samples[cpu->sample_ptr].idletime_us = | ||
| 483 | idle_time_us - cpu->prev_idle_time_us; | ||
| 484 | |||
| 485 | cpu->samples[cpu->sample_ptr].aperf = aperf; | ||
| 486 | cpu->samples[cpu->sample_ptr].mperf = mperf; | ||
| 487 | cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; | ||
| 488 | cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; | ||
| 489 | |||
| 490 | intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); | ||
| 491 | } | ||
| 492 | 454 | ||
| 493 | cpu->prev_sample = now; | ||
| 494 | cpu->prev_idle_time_us = idle_time_us; | ||
| 495 | cpu->prev_aperf = aperf; | 455 | cpu->prev_aperf = aperf; |
| 496 | cpu->prev_mperf = mperf; | 456 | cpu->prev_mperf = mperf; |
| 497 | } | 457 | } |
| @@ -505,16 +465,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) | |||
| 505 | mod_timer_pinned(&cpu->timer, jiffies + delay); | 465 | mod_timer_pinned(&cpu->timer, jiffies + delay); |
| 506 | } | 466 | } |
| 507 | 467 | ||
| 508 | static inline void intel_pstate_idle_mode(struct cpudata *cpu) | ||
| 509 | { | ||
| 510 | cpu->idle_mode = 1; | ||
| 511 | } | ||
| 512 | |||
| 513 | static inline void intel_pstate_normal_mode(struct cpudata *cpu) | ||
| 514 | { | ||
| 515 | cpu->idle_mode = 0; | ||
| 516 | } | ||
| 517 | |||
| 518 | static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) | 468 | static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) |
| 519 | { | 469 | { |
| 520 | int32_t busy_scaled; | 470 | int32_t busy_scaled; |
| @@ -547,50 +497,21 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) | |||
| 547 | intel_pstate_pstate_decrease(cpu, steps); | 497 | intel_pstate_pstate_decrease(cpu, steps); |
| 548 | } | 498 | } |
| 549 | 499 | ||
| 550 | static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu) | ||
| 551 | { | ||
| 552 | int busy_scaled; | ||
| 553 | struct _pid *pid; | ||
| 554 | int ctl = 0; | ||
| 555 | int steps; | ||
| 556 | |||
| 557 | pid = &cpu->idle_pid; | ||
| 558 | |||
| 559 | busy_scaled = intel_pstate_get_scaled_busy(cpu); | ||
| 560 | |||
| 561 | ctl = pid_calc(pid, 100 - busy_scaled); | ||
| 562 | |||
| 563 | steps = abs(ctl); | ||
| 564 | if (ctl < 0) | ||
| 565 | intel_pstate_pstate_decrease(cpu, steps); | ||
| 566 | else | ||
| 567 | intel_pstate_pstate_increase(cpu, steps); | ||
| 568 | |||
| 569 | if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) | ||
| 570 | intel_pstate_normal_mode(cpu); | ||
| 571 | } | ||
| 572 | |||
| 573 | static void intel_pstate_timer_func(unsigned long __data) | 500 | static void intel_pstate_timer_func(unsigned long __data) |
| 574 | { | 501 | { |
| 575 | struct cpudata *cpu = (struct cpudata *) __data; | 502 | struct cpudata *cpu = (struct cpudata *) __data; |
| 576 | 503 | ||
| 577 | intel_pstate_sample(cpu); | 504 | intel_pstate_sample(cpu); |
| 505 | intel_pstate_adjust_busy_pstate(cpu); | ||
| 578 | 506 | ||
| 579 | if (!cpu->idle_mode) | ||
| 580 | intel_pstate_adjust_busy_pstate(cpu); | ||
| 581 | else | ||
| 582 | intel_pstate_adjust_idle_pstate(cpu); | ||
| 583 | |||
| 584 | #if defined(XPERF_FIX) | ||
| 585 | if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { | 507 | if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { |
| 586 | cpu->min_pstate_count++; | 508 | cpu->min_pstate_count++; |
| 587 | if (!(cpu->min_pstate_count % 5)) { | 509 | if (!(cpu->min_pstate_count % 5)) { |
| 588 | intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); | 510 | intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); |
| 589 | intel_pstate_idle_mode(cpu); | ||
| 590 | } | 511 | } |
| 591 | } else | 512 | } else |
| 592 | cpu->min_pstate_count = 0; | 513 | cpu->min_pstate_count = 0; |
| 593 | #endif | 514 | |
| 594 | intel_pstate_set_sample_time(cpu); | 515 | intel_pstate_set_sample_time(cpu); |
| 595 | } | 516 | } |
| 596 | 517 | ||
| @@ -631,7 +552,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) | |||
| 631 | (unsigned long)cpu; | 552 | (unsigned long)cpu; |
| 632 | cpu->timer.expires = jiffies + HZ/100; | 553 | cpu->timer.expires = jiffies + HZ/100; |
| 633 | intel_pstate_busy_pid_reset(cpu); | 554 | intel_pstate_busy_pid_reset(cpu); |
| 634 | intel_pstate_idle_pid_reset(cpu); | ||
| 635 | intel_pstate_sample(cpu); | 555 | intel_pstate_sample(cpu); |
| 636 | intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); | 556 | intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); |
| 637 | 557 | ||
| @@ -675,8 +595,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 675 | limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); | 595 | limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); |
| 676 | limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); | 596 | limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); |
| 677 | 597 | ||
| 678 | limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq; | 598 | limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq; |
| 679 | limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100); | 599 | limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); |
| 600 | limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); | ||
| 680 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); | 601 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); |
| 681 | 602 | ||
| 682 | return 0; | 603 | return 0; |
| @@ -788,10 +709,9 @@ static int __init intel_pstate_init(void) | |||
| 788 | 709 | ||
| 789 | pr_info("Intel P-state driver initializing.\n"); | 710 | pr_info("Intel P-state driver initializing.\n"); |
| 790 | 711 | ||
| 791 | all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus()); | 712 | all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); |
| 792 | if (!all_cpu_data) | 713 | if (!all_cpu_data) |
| 793 | return -ENOMEM; | 714 | return -ENOMEM; |
| 794 | memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus()); | ||
| 795 | 715 | ||
| 796 | rc = cpufreq_register_driver(&intel_pstate_driver); | 716 | rc = cpufreq_register_driver(&intel_pstate_driver); |
| 797 | if (rc) | 717 | if (rc) |
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index d36ea8dc96eb..b2644af985ec 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c | |||
| @@ -171,10 +171,6 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) | |||
| 171 | priv.dev = &pdev->dev; | 171 | priv.dev = &pdev->dev; |
| 172 | 172 | ||
| 173 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 173 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 174 | if (!res) { | ||
| 175 | dev_err(&pdev->dev, "Cannot get memory resource\n"); | ||
| 176 | return -ENODEV; | ||
| 177 | } | ||
| 178 | priv.base = devm_ioremap_resource(&pdev->dev, res); | 174 | priv.base = devm_ioremap_resource(&pdev->dev, res); |
| 179 | if (IS_ERR(priv.base)) | 175 | if (IS_ERR(priv.base)) |
| 180 | return PTR_ERR(priv.base); | 176 | return PTR_ERR(priv.base); |
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index ce193409ebd3..33f59ecd256e 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
| @@ -1273,11 +1273,6 @@ static int tegra_dma_probe(struct platform_device *pdev) | |||
| 1273 | platform_set_drvdata(pdev, tdma); | 1273 | platform_set_drvdata(pdev, tdma); |
| 1274 | 1274 | ||
| 1275 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1275 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1276 | if (!res) { | ||
| 1277 | dev_err(&pdev->dev, "No mem resource for DMA\n"); | ||
| 1278 | return -EINVAL; | ||
| 1279 | } | ||
| 1280 | |||
| 1281 | tdma->base_addr = devm_ioremap_resource(&pdev->dev, res); | 1276 | tdma->base_addr = devm_ioremap_resource(&pdev->dev, res); |
| 1282 | if (IS_ERR(tdma->base_addr)) | 1277 | if (IS_ERR(tdma->base_addr)) |
| 1283 | return PTR_ERR(tdma->base_addr); | 1278 | return PTR_ERR(tdma->base_addr); |
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index bf69a7eff370..3a4816adc137 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c | |||
| @@ -619,11 +619,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev) | |||
| 619 | * per-CPU registers */ | 619 | * per-CPU registers */ |
| 620 | if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) { | 620 | if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) { |
| 621 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 621 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
| 622 | if (!res) { | ||
| 623 | dev_err(&pdev->dev, "Cannot get memory resource\n"); | ||
| 624 | return -ENODEV; | ||
| 625 | } | ||
| 626 | |||
| 627 | mvchip->percpu_membase = devm_ioremap_resource(&pdev->dev, | 622 | mvchip->percpu_membase = devm_ioremap_resource(&pdev->dev, |
| 628 | res); | 623 | res); |
| 629 | if (IS_ERR(mvchip->percpu_membase)) | 624 | if (IS_ERR(mvchip->percpu_membase)) |
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index da4cb5b0cb87..9a62672f1bed 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c | |||
| @@ -463,11 +463,6 @@ static int tegra_gpio_probe(struct platform_device *pdev) | |||
| 463 | } | 463 | } |
| 464 | 464 | ||
| 465 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 465 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 466 | if (!res) { | ||
| 467 | dev_err(&pdev->dev, "Missing MEM resource\n"); | ||
| 468 | return -ENODEV; | ||
| 469 | } | ||
| 470 | |||
| 471 | regs = devm_ioremap_resource(&pdev->dev, res); | 466 | regs = devm_ioremap_resource(&pdev->dev, res); |
| 472 | if (IS_ERR(regs)) | 467 | if (IS_ERR(regs)) |
| 473 | return PTR_ERR(regs); | 468 | return PTR_ERR(regs); |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 3a8f7e6db295..e7e92429d10f 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
| @@ -78,6 +78,10 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev) | |||
| 78 | { | 78 | { |
| 79 | struct drm_crtc *crtc; | 79 | struct drm_crtc *crtc; |
| 80 | 80 | ||
| 81 | /* Locking is currently fubar in the panic handler. */ | ||
| 82 | if (oops_in_progress) | ||
| 83 | return; | ||
| 84 | |||
| 81 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | 85 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
| 82 | WARN_ON(!mutex_is_locked(&crtc->mutex)); | 86 | WARN_ON(!mutex_is_locked(&crtc->mutex)); |
| 83 | 87 | ||
| @@ -246,6 +250,7 @@ char *drm_get_connector_status_name(enum drm_connector_status status) | |||
| 246 | else | 250 | else |
| 247 | return "unknown"; | 251 | return "unknown"; |
| 248 | } | 252 | } |
| 253 | EXPORT_SYMBOL(drm_get_connector_status_name); | ||
| 249 | 254 | ||
| 250 | /** | 255 | /** |
| 251 | * drm_mode_object_get - allocate a new modeset identifier | 256 | * drm_mode_object_get - allocate a new modeset identifier |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index e974f9309b72..ed1334e27c33 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
| @@ -121,6 +121,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
| 121 | connector->helper_private; | 121 | connector->helper_private; |
| 122 | int count = 0; | 122 | int count = 0; |
| 123 | int mode_flags = 0; | 123 | int mode_flags = 0; |
| 124 | bool verbose_prune = true; | ||
| 124 | 125 | ||
| 125 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, | 126 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, |
| 126 | drm_get_connector_name(connector)); | 127 | drm_get_connector_name(connector)); |
| @@ -149,6 +150,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
| 149 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", | 150 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", |
| 150 | connector->base.id, drm_get_connector_name(connector)); | 151 | connector->base.id, drm_get_connector_name(connector)); |
| 151 | drm_mode_connector_update_edid_property(connector, NULL); | 152 | drm_mode_connector_update_edid_property(connector, NULL); |
| 153 | verbose_prune = false; | ||
| 152 | goto prune; | 154 | goto prune; |
| 153 | } | 155 | } |
| 154 | 156 | ||
| @@ -182,7 +184,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
| 182 | } | 184 | } |
| 183 | 185 | ||
| 184 | prune: | 186 | prune: |
| 185 | drm_mode_prune_invalid(dev, &connector->modes, true); | 187 | drm_mode_prune_invalid(dev, &connector->modes, verbose_prune); |
| 186 | 188 | ||
| 187 | if (list_empty(&connector->modes)) | 189 | if (list_empty(&connector->modes)) |
| 188 | return 0; | 190 | return 0; |
| @@ -1005,12 +1007,20 @@ static void output_poll_execute(struct work_struct *work) | |||
| 1005 | continue; | 1007 | continue; |
| 1006 | 1008 | ||
| 1007 | connector->status = connector->funcs->detect(connector, false); | 1009 | connector->status = connector->funcs->detect(connector, false); |
| 1008 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", | 1010 | if (old_status != connector->status) { |
| 1009 | connector->base.id, | 1011 | const char *old, *new; |
| 1010 | drm_get_connector_name(connector), | 1012 | |
| 1011 | old_status, connector->status); | 1013 | old = drm_get_connector_status_name(old_status); |
| 1012 | if (old_status != connector->status) | 1014 | new = drm_get_connector_status_name(connector->status); |
| 1015 | |||
| 1016 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] " | ||
| 1017 | "status updated from %s to %s\n", | ||
| 1018 | connector->base.id, | ||
| 1019 | drm_get_connector_name(connector), | ||
| 1020 | old, new); | ||
| 1021 | |||
| 1013 | changed = true; | 1022 | changed = true; |
| 1023 | } | ||
| 1014 | } | 1024 | } |
| 1015 | 1025 | ||
| 1016 | mutex_unlock(&dev->mode_config.mutex); | 1026 | mutex_unlock(&dev->mode_config.mutex); |
| @@ -1083,10 +1093,11 @@ void drm_helper_hpd_irq_event(struct drm_device *dev) | |||
| 1083 | old_status = connector->status; | 1093 | old_status = connector->status; |
| 1084 | 1094 | ||
| 1085 | connector->status = connector->funcs->detect(connector, false); | 1095 | connector->status = connector->funcs->detect(connector, false); |
| 1086 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", | 1096 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", |
| 1087 | connector->base.id, | 1097 | connector->base.id, |
| 1088 | drm_get_connector_name(connector), | 1098 | drm_get_connector_name(connector), |
| 1089 | old_status, connector->status); | 1099 | drm_get_connector_status_name(old_status), |
| 1100 | drm_get_connector_status_name(connector->status)); | ||
| 1090 | if (old_status != connector->status) | 1101 | if (old_status != connector->status) |
| 1091 | changed = true; | 1102 | changed = true; |
| 1092 | } | 1103 | } |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 8d4f29075af5..9cc247f55502 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
| @@ -57,7 +57,7 @@ static int drm_version(struct drm_device *dev, void *data, | |||
| 57 | struct drm_file *file_priv); | 57 | struct drm_file *file_priv); |
| 58 | 58 | ||
| 59 | #define DRM_IOCTL_DEF(ioctl, _func, _flags) \ | 59 | #define DRM_IOCTL_DEF(ioctl, _func, _flags) \ |
| 60 | [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0} | 60 | [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} |
| 61 | 61 | ||
| 62 | /** Ioctl table */ | 62 | /** Ioctl table */ |
| 63 | static const struct drm_ioctl_desc drm_ioctls[] = { | 63 | static const struct drm_ioctl_desc drm_ioctls[] = { |
| @@ -375,7 +375,7 @@ long drm_ioctl(struct file *filp, | |||
| 375 | { | 375 | { |
| 376 | struct drm_file *file_priv = filp->private_data; | 376 | struct drm_file *file_priv = filp->private_data; |
| 377 | struct drm_device *dev; | 377 | struct drm_device *dev; |
| 378 | const struct drm_ioctl_desc *ioctl; | 378 | const struct drm_ioctl_desc *ioctl = NULL; |
| 379 | drm_ioctl_t *func; | 379 | drm_ioctl_t *func; |
| 380 | unsigned int nr = DRM_IOCTL_NR(cmd); | 380 | unsigned int nr = DRM_IOCTL_NR(cmd); |
| 381 | int retcode = -EINVAL; | 381 | int retcode = -EINVAL; |
| @@ -392,11 +392,6 @@ long drm_ioctl(struct file *filp, | |||
| 392 | atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); | 392 | atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); |
| 393 | ++file_priv->ioctl_count; | 393 | ++file_priv->ioctl_count; |
| 394 | 394 | ||
| 395 | DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", | ||
| 396 | task_pid_nr(current), cmd, nr, | ||
| 397 | (long)old_encode_dev(file_priv->minor->device), | ||
| 398 | file_priv->authenticated); | ||
| 399 | |||
| 400 | if ((nr >= DRM_CORE_IOCTL_COUNT) && | 395 | if ((nr >= DRM_CORE_IOCTL_COUNT) && |
| 401 | ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) | 396 | ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) |
| 402 | goto err_i1; | 397 | goto err_i1; |
| @@ -417,6 +412,11 @@ long drm_ioctl(struct file *filp, | |||
| 417 | } else | 412 | } else |
| 418 | goto err_i1; | 413 | goto err_i1; |
| 419 | 414 | ||
| 415 | DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", | ||
| 416 | task_pid_nr(current), | ||
| 417 | (long)old_encode_dev(file_priv->minor->device), | ||
| 418 | file_priv->authenticated, ioctl->name); | ||
| 419 | |||
| 420 | /* Do not trust userspace, use our own definition */ | 420 | /* Do not trust userspace, use our own definition */ |
| 421 | func = ioctl->func; | 421 | func = ioctl->func; |
| 422 | /* is there a local override? */ | 422 | /* is there a local override? */ |
| @@ -471,6 +471,12 @@ long drm_ioctl(struct file *filp, | |||
| 471 | } | 471 | } |
| 472 | 472 | ||
| 473 | err_i1: | 473 | err_i1: |
| 474 | if (!ioctl) | ||
| 475 | DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n", | ||
| 476 | task_pid_nr(current), | ||
| 477 | (long)old_encode_dev(file_priv->minor->device), | ||
| 478 | file_priv->authenticated, cmd, nr); | ||
| 479 | |||
| 474 | if (kdata != stack_kdata) | 480 | if (kdata != stack_kdata) |
| 475 | kfree(kdata); | 481 | kfree(kdata); |
| 476 | atomic_dec(&dev->ioctl_count); | 482 | atomic_dec(&dev->ioctl_count); |
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c index 48c52f7df4e6..0cfb60f54766 100644 --- a/drivers/gpu/drm/drm_encoder_slave.c +++ b/drivers/gpu/drm/drm_encoder_slave.c | |||
| @@ -54,16 +54,12 @@ int drm_i2c_encoder_init(struct drm_device *dev, | |||
| 54 | struct i2c_adapter *adap, | 54 | struct i2c_adapter *adap, |
| 55 | const struct i2c_board_info *info) | 55 | const struct i2c_board_info *info) |
| 56 | { | 56 | { |
| 57 | char modalias[sizeof(I2C_MODULE_PREFIX) | ||
| 58 | + I2C_NAME_SIZE]; | ||
| 59 | struct module *module = NULL; | 57 | struct module *module = NULL; |
| 60 | struct i2c_client *client; | 58 | struct i2c_client *client; |
| 61 | struct drm_i2c_encoder_driver *encoder_drv; | 59 | struct drm_i2c_encoder_driver *encoder_drv; |
| 62 | int err = 0; | 60 | int err = 0; |
| 63 | 61 | ||
| 64 | snprintf(modalias, sizeof(modalias), | 62 | request_module("%s%s", I2C_MODULE_PREFIX, info->type); |
| 65 | "%s%s", I2C_MODULE_PREFIX, info->type); | ||
| 66 | request_module(modalias); | ||
| 67 | 63 | ||
| 68 | client = i2c_new_device(adap, info); | 64 | client = i2c_new_device(adap, info); |
| 69 | if (!client) { | 65 | if (!client) { |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index db1e2d6f90d7..07cf99cc8862 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
| @@ -755,33 +755,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) | |||
| 755 | EXPORT_SYMBOL(drm_mm_debug_table); | 755 | EXPORT_SYMBOL(drm_mm_debug_table); |
| 756 | 756 | ||
| 757 | #if defined(CONFIG_DEBUG_FS) | 757 | #if defined(CONFIG_DEBUG_FS) |
| 758 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) | 758 | static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) |
| 759 | { | 759 | { |
| 760 | struct drm_mm_node *entry; | ||
| 761 | unsigned long total_used = 0, total_free = 0, total = 0; | ||
| 762 | unsigned long hole_start, hole_end, hole_size; | 760 | unsigned long hole_start, hole_end, hole_size; |
| 763 | 761 | ||
| 764 | hole_start = drm_mm_hole_node_start(&mm->head_node); | 762 | if (entry->hole_follows) { |
| 765 | hole_end = drm_mm_hole_node_end(&mm->head_node); | 763 | hole_start = drm_mm_hole_node_start(entry); |
| 766 | hole_size = hole_end - hole_start; | 764 | hole_end = drm_mm_hole_node_end(entry); |
| 767 | if (hole_size) | 765 | hole_size = hole_end - hole_start; |
| 768 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", | 766 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", |
| 769 | hole_start, hole_end, hole_size); | 767 | hole_start, hole_end, hole_size); |
| 770 | total_free += hole_size; | 768 | return hole_size; |
| 769 | } | ||
| 770 | |||
| 771 | return 0; | ||
| 772 | } | ||
| 773 | |||
| 774 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) | ||
| 775 | { | ||
| 776 | struct drm_mm_node *entry; | ||
| 777 | unsigned long total_used = 0, total_free = 0, total = 0; | ||
| 778 | |||
| 779 | total_free += drm_mm_dump_hole(m, &mm->head_node); | ||
| 771 | 780 | ||
| 772 | drm_mm_for_each_node(entry, mm) { | 781 | drm_mm_for_each_node(entry, mm) { |
| 773 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", | 782 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", |
| 774 | entry->start, entry->start + entry->size, | 783 | entry->start, entry->start + entry->size, |
| 775 | entry->size); | 784 | entry->size); |
| 776 | total_used += entry->size; | 785 | total_used += entry->size; |
| 777 | if (entry->hole_follows) { | 786 | total_free += drm_mm_dump_hole(m, entry); |
| 778 | hole_start = drm_mm_hole_node_start(entry); | ||
| 779 | hole_end = drm_mm_hole_node_end(entry); | ||
| 780 | hole_size = hole_end - hole_start; | ||
| 781 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", | ||
| 782 | hole_start, hole_end, hole_size); | ||
| 783 | total_free += hole_size; | ||
| 784 | } | ||
| 785 | } | 787 | } |
| 786 | total = total_free + total_used; | 788 | total = total_free + total_used; |
| 787 | 789 | ||
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index faa79df02648..a371ff865a88 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
| @@ -1143,6 +1143,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, | |||
| 1143 | was_digit = false; | 1143 | was_digit = false; |
| 1144 | } else | 1144 | } else |
| 1145 | goto done; | 1145 | goto done; |
| 1146 | break; | ||
| 1146 | case '0' ... '9': | 1147 | case '0' ... '9': |
| 1147 | was_digit = true; | 1148 | was_digit = true; |
| 1148 | break; | 1149 | break; |
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index bbfc3840080c..6652597586a1 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
| @@ -2005,11 +2005,6 @@ static int hdmi_probe(struct platform_device *pdev) | |||
| 2005 | } | 2005 | } |
| 2006 | 2006 | ||
| 2007 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2007 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 2008 | if (!res) { | ||
| 2009 | DRM_ERROR("failed to find registers\n"); | ||
| 2010 | return -ENOENT; | ||
| 2011 | } | ||
| 2012 | |||
| 2013 | hdata->regs = devm_ioremap_resource(&pdev->dev, res); | 2008 | hdata->regs = devm_ioremap_resource(&pdev->dev, res); |
| 2014 | if (IS_ERR(hdata->regs)) | 2009 | if (IS_ERR(hdata->regs)) |
| 2015 | return PTR_ERR(hdata->regs); | 2010 | return PTR_ERR(hdata->regs); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6be940effefd..6165535d15f0 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -1045,6 +1045,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | |||
| 1045 | if (timeout) { | 1045 | if (timeout) { |
| 1046 | struct timespec sleep_time = timespec_sub(now, before); | 1046 | struct timespec sleep_time = timespec_sub(now, before); |
| 1047 | *timeout = timespec_sub(*timeout, sleep_time); | 1047 | *timeout = timespec_sub(*timeout, sleep_time); |
| 1048 | if (!timespec_valid(timeout)) /* i.e. negative time remains */ | ||
| 1049 | set_normalized_timespec(timeout, 0, 0); | ||
| 1048 | } | 1050 | } |
| 1049 | 1051 | ||
| 1050 | switch (end) { | 1052 | switch (end) { |
| @@ -1053,8 +1055,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | |||
| 1053 | case -ERESTARTSYS: /* Signal */ | 1055 | case -ERESTARTSYS: /* Signal */ |
| 1054 | return (int)end; | 1056 | return (int)end; |
| 1055 | case 0: /* Timeout */ | 1057 | case 0: /* Timeout */ |
| 1056 | if (timeout) | ||
| 1057 | set_normalized_timespec(timeout, 0, 0); | ||
| 1058 | return -ETIME; | 1058 | return -ETIME; |
| 1059 | default: /* Completed */ | 1059 | default: /* Completed */ |
| 1060 | WARN_ON(end < 0); /* We're not aware of other errors */ | 1060 | WARN_ON(end < 0); /* We're not aware of other errors */ |
| @@ -2377,10 +2377,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
| 2377 | mutex_unlock(&dev->struct_mutex); | 2377 | mutex_unlock(&dev->struct_mutex); |
| 2378 | 2378 | ||
| 2379 | ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); | 2379 | ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); |
| 2380 | if (timeout) { | 2380 | if (timeout) |
| 2381 | WARN_ON(!timespec_valid(timeout)); | ||
| 2382 | args->timeout_ns = timespec_to_ns(timeout); | 2381 | args->timeout_ns = timespec_to_ns(timeout); |
| 2383 | } | ||
| 2384 | return ret; | 2382 | return ret; |
| 2385 | 2383 | ||
| 2386 | out: | 2384 | out: |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index dca614de71b6..bdb0d7717bc7 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
| @@ -709,15 +709,6 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) | |||
| 709 | return snb_gmch_ctl << 25; /* 32 MB units */ | 709 | return snb_gmch_ctl << 25; /* 32 MB units */ |
| 710 | } | 710 | } |
| 711 | 711 | ||
| 712 | static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl) | ||
| 713 | { | ||
| 714 | static const int stolen_decoder[] = { | ||
| 715 | 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352}; | ||
| 716 | snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT; | ||
| 717 | snb_gmch_ctl &= IVB_GMCH_GMS_MASK; | ||
| 718 | return stolen_decoder[snb_gmch_ctl] << 20; | ||
| 719 | } | ||
| 720 | |||
| 721 | static int gen6_gmch_probe(struct drm_device *dev, | 712 | static int gen6_gmch_probe(struct drm_device *dev, |
| 722 | size_t *gtt_total, | 713 | size_t *gtt_total, |
| 723 | size_t *stolen, | 714 | size_t *stolen, |
| @@ -747,11 +738,7 @@ static int gen6_gmch_probe(struct drm_device *dev, | |||
| 747 | pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); | 738 | pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
| 748 | gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); | 739 | gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); |
| 749 | 740 | ||
| 750 | if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) | 741 | *stolen = gen6_get_stolen_size(snb_gmch_ctl); |
| 751 | *stolen = gen7_get_stolen_size(snb_gmch_ctl); | ||
| 752 | else | ||
| 753 | *stolen = gen6_get_stolen_size(snb_gmch_ctl); | ||
| 754 | |||
| 755 | *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; | 742 | *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; |
| 756 | 743 | ||
| 757 | /* For Modern GENs the PTEs and register space are split in the BAR */ | 744 | /* For Modern GENs the PTEs and register space are split in the BAR */ |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 83f9c26e1adb..2d6b62e42daf 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -46,8 +46,6 @@ | |||
| 46 | #define SNB_GMCH_GGMS_MASK 0x3 | 46 | #define SNB_GMCH_GGMS_MASK 0x3 |
| 47 | #define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ | 47 | #define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ |
| 48 | #define SNB_GMCH_GMS_MASK 0x1f | 48 | #define SNB_GMCH_GMS_MASK 0x1f |
| 49 | #define IVB_GMCH_GMS_SHIFT 4 | ||
| 50 | #define IVB_GMCH_GMS_MASK 0xf | ||
| 51 | 49 | ||
| 52 | 50 | ||
| 53 | /* PCI config space */ | 51 | /* PCI config space */ |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 26a0a570f92e..fb961bb81903 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -1265,6 +1265,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) | |||
| 1265 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); | 1265 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); |
| 1266 | intel_dp_start_link_train(intel_dp); | 1266 | intel_dp_start_link_train(intel_dp); |
| 1267 | intel_dp_complete_link_train(intel_dp); | 1267 | intel_dp_complete_link_train(intel_dp); |
| 1268 | if (port != PORT_A) | ||
| 1269 | intel_dp_stop_link_train(intel_dp); | ||
| 1268 | } | 1270 | } |
| 1269 | } | 1271 | } |
| 1270 | 1272 | ||
| @@ -1326,6 +1328,9 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) | |||
| 1326 | } else if (type == INTEL_OUTPUT_EDP) { | 1328 | } else if (type == INTEL_OUTPUT_EDP) { |
| 1327 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1329 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
| 1328 | 1330 | ||
| 1331 | if (port == PORT_A) | ||
| 1332 | intel_dp_stop_link_train(intel_dp); | ||
| 1333 | |||
| 1329 | ironlake_edp_backlight_on(intel_dp); | 1334 | ironlake_edp_backlight_on(intel_dp); |
| 1330 | } | 1335 | } |
| 1331 | 1336 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index fb2fbc1e08b9..3d704b706a8d 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -702,6 +702,9 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
| 702 | /* Walk through all bpp values. Luckily they're all nicely spaced with 2 | 702 | /* Walk through all bpp values. Luckily they're all nicely spaced with 2 |
| 703 | * bpc in between. */ | 703 | * bpc in between. */ |
| 704 | bpp = min_t(int, 8*3, pipe_config->pipe_bpp); | 704 | bpp = min_t(int, 8*3, pipe_config->pipe_bpp); |
| 705 | if (is_edp(intel_dp) && dev_priv->edp.bpp) | ||
| 706 | bpp = min_t(int, bpp, dev_priv->edp.bpp); | ||
| 707 | |||
| 705 | for (; bpp >= 6*3; bpp -= 2*3) { | 708 | for (; bpp >= 6*3; bpp -= 2*3) { |
| 706 | mode_rate = intel_dp_link_required(target_clock, bpp); | 709 | mode_rate = intel_dp_link_required(target_clock, bpp); |
| 707 | 710 | ||
| @@ -739,6 +742,7 @@ found: | |||
| 739 | intel_dp->link_bw = bws[clock]; | 742 | intel_dp->link_bw = bws[clock]; |
| 740 | intel_dp->lane_count = lane_count; | 743 | intel_dp->lane_count = lane_count; |
| 741 | adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); | 744 | adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); |
| 745 | pipe_config->pipe_bpp = bpp; | ||
| 742 | pipe_config->pixel_target_clock = target_clock; | 746 | pipe_config->pixel_target_clock = target_clock; |
| 743 | 747 | ||
| 744 | DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", | 748 | DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", |
| @@ -751,20 +755,6 @@ found: | |||
| 751 | target_clock, adjusted_mode->clock, | 755 | target_clock, adjusted_mode->clock, |
| 752 | &pipe_config->dp_m_n); | 756 | &pipe_config->dp_m_n); |
| 753 | 757 | ||
| 754 | /* | ||
| 755 | * XXX: We have a strange regression where using the vbt edp bpp value | ||
| 756 | * for the link bw computation results in black screens, the panel only | ||
| 757 | * works when we do the computation at the usual 24bpp (but still | ||
| 758 | * requires us to use 18bpp). Until that's fully debugged, stay | ||
| 759 | * bug-for-bug compatible with the old code. | ||
| 760 | */ | ||
| 761 | if (is_edp(intel_dp) && dev_priv->edp.bpp) { | ||
| 762 | DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", | ||
| 763 | bpp, dev_priv->edp.bpp); | ||
| 764 | bpp = min_t(int, bpp, dev_priv->edp.bpp); | ||
| 765 | } | ||
| 766 | pipe_config->pipe_bpp = bpp; | ||
| 767 | |||
| 768 | return true; | 758 | return true; |
| 769 | } | 759 | } |
| 770 | 760 | ||
| @@ -1389,6 +1379,7 @@ static void intel_enable_dp(struct intel_encoder *encoder) | |||
| 1389 | ironlake_edp_panel_on(intel_dp); | 1379 | ironlake_edp_panel_on(intel_dp); |
| 1390 | ironlake_edp_panel_vdd_off(intel_dp, true); | 1380 | ironlake_edp_panel_vdd_off(intel_dp, true); |
| 1391 | intel_dp_complete_link_train(intel_dp); | 1381 | intel_dp_complete_link_train(intel_dp); |
| 1382 | intel_dp_stop_link_train(intel_dp); | ||
| 1392 | ironlake_edp_backlight_on(intel_dp); | 1383 | ironlake_edp_backlight_on(intel_dp); |
| 1393 | } | 1384 | } |
| 1394 | 1385 | ||
| @@ -1711,10 +1702,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
| 1711 | struct drm_i915_private *dev_priv = dev->dev_private; | 1702 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1712 | enum port port = intel_dig_port->port; | 1703 | enum port port = intel_dig_port->port; |
| 1713 | int ret; | 1704 | int ret; |
| 1714 | uint32_t temp; | ||
| 1715 | 1705 | ||
| 1716 | if (HAS_DDI(dev)) { | 1706 | if (HAS_DDI(dev)) { |
| 1717 | temp = I915_READ(DP_TP_CTL(port)); | 1707 | uint32_t temp = I915_READ(DP_TP_CTL(port)); |
| 1718 | 1708 | ||
| 1719 | if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) | 1709 | if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) |
| 1720 | temp |= DP_TP_CTL_SCRAMBLE_DISABLE; | 1710 | temp |= DP_TP_CTL_SCRAMBLE_DISABLE; |
| @@ -1724,18 +1714,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
| 1724 | temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; | 1714 | temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; |
| 1725 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { | 1715 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { |
| 1726 | case DP_TRAINING_PATTERN_DISABLE: | 1716 | case DP_TRAINING_PATTERN_DISABLE: |
| 1727 | |||
| 1728 | if (port != PORT_A) { | ||
| 1729 | temp |= DP_TP_CTL_LINK_TRAIN_IDLE; | ||
| 1730 | I915_WRITE(DP_TP_CTL(port), temp); | ||
| 1731 | |||
| 1732 | if (wait_for((I915_READ(DP_TP_STATUS(port)) & | ||
| 1733 | DP_TP_STATUS_IDLE_DONE), 1)) | ||
| 1734 | DRM_ERROR("Timed out waiting for DP idle patterns\n"); | ||
| 1735 | |||
| 1736 | temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; | ||
| 1737 | } | ||
| 1738 | |||
| 1739 | temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; | 1717 | temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; |
| 1740 | 1718 | ||
| 1741 | break; | 1719 | break; |
| @@ -1811,6 +1789,37 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
| 1811 | return true; | 1789 | return true; |
| 1812 | } | 1790 | } |
| 1813 | 1791 | ||
| 1792 | static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) | ||
| 1793 | { | ||
| 1794 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
| 1795 | struct drm_device *dev = intel_dig_port->base.base.dev; | ||
| 1796 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 1797 | enum port port = intel_dig_port->port; | ||
| 1798 | uint32_t val; | ||
| 1799 | |||
| 1800 | if (!HAS_DDI(dev)) | ||
| 1801 | return; | ||
| 1802 | |||
| 1803 | val = I915_READ(DP_TP_CTL(port)); | ||
| 1804 | val &= ~DP_TP_CTL_LINK_TRAIN_MASK; | ||
| 1805 | val |= DP_TP_CTL_LINK_TRAIN_IDLE; | ||
| 1806 | I915_WRITE(DP_TP_CTL(port), val); | ||
| 1807 | |||
| 1808 | /* | ||
| 1809 | * On PORT_A we can have only eDP in SST mode. There the only reason | ||
| 1810 | * we need to set idle transmission mode is to work around a HW issue | ||
| 1811 | * where we enable the pipe while not in idle link-training mode. | ||
| 1812 | * In this case there is requirement to wait for a minimum number of | ||
| 1813 | * idle patterns to be sent. | ||
| 1814 | */ | ||
| 1815 | if (port == PORT_A) | ||
| 1816 | return; | ||
| 1817 | |||
| 1818 | if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE), | ||
| 1819 | 1)) | ||
| 1820 | DRM_ERROR("Timed out waiting for DP idle patterns\n"); | ||
| 1821 | } | ||
| 1822 | |||
| 1814 | /* Enable corresponding port and start training pattern 1 */ | 1823 | /* Enable corresponding port and start training pattern 1 */ |
| 1815 | void | 1824 | void |
| 1816 | intel_dp_start_link_train(struct intel_dp *intel_dp) | 1825 | intel_dp_start_link_train(struct intel_dp *intel_dp) |
| @@ -1953,10 +1962,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
| 1953 | ++tries; | 1962 | ++tries; |
| 1954 | } | 1963 | } |
| 1955 | 1964 | ||
| 1965 | intel_dp_set_idle_link_train(intel_dp); | ||
| 1966 | |||
| 1967 | intel_dp->DP = DP; | ||
| 1968 | |||
| 1956 | if (channel_eq) | 1969 | if (channel_eq) |
| 1957 | DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); | 1970 | DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); |
| 1958 | 1971 | ||
| 1959 | intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); | 1972 | } |
| 1973 | |||
| 1974 | void intel_dp_stop_link_train(struct intel_dp *intel_dp) | ||
| 1975 | { | ||
| 1976 | intel_dp_set_link_train(intel_dp, intel_dp->DP, | ||
| 1977 | DP_TRAINING_PATTERN_DISABLE); | ||
| 1960 | } | 1978 | } |
| 1961 | 1979 | ||
| 1962 | static void | 1980 | static void |
| @@ -2164,6 +2182,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) | |||
| 2164 | drm_get_encoder_name(&intel_encoder->base)); | 2182 | drm_get_encoder_name(&intel_encoder->base)); |
| 2165 | intel_dp_start_link_train(intel_dp); | 2183 | intel_dp_start_link_train(intel_dp); |
| 2166 | intel_dp_complete_link_train(intel_dp); | 2184 | intel_dp_complete_link_train(intel_dp); |
| 2185 | intel_dp_stop_link_train(intel_dp); | ||
| 2167 | } | 2186 | } |
| 2168 | } | 2187 | } |
| 2169 | 2188 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index b5b6d19e6dd3..624a9e6b8d71 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -499,6 +499,7 @@ extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
| 499 | extern void intel_dp_init_link_config(struct intel_dp *intel_dp); | 499 | extern void intel_dp_init_link_config(struct intel_dp *intel_dp); |
| 500 | extern void intel_dp_start_link_train(struct intel_dp *intel_dp); | 500 | extern void intel_dp_start_link_train(struct intel_dp *intel_dp); |
| 501 | extern void intel_dp_complete_link_train(struct intel_dp *intel_dp); | 501 | extern void intel_dp_complete_link_train(struct intel_dp *intel_dp); |
| 502 | extern void intel_dp_stop_link_train(struct intel_dp *intel_dp); | ||
| 502 | extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); | 503 | extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); |
| 503 | extern void intel_dp_encoder_destroy(struct drm_encoder *encoder); | 504 | extern void intel_dp_encoder_destroy(struct drm_encoder *encoder); |
| 504 | extern void intel_dp_check_link_status(struct intel_dp *intel_dp); | 505 | extern void intel_dp_check_link_status(struct intel_dp *intel_dp); |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 0e19e575a1b4..6b7c3ca2c035 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
| @@ -262,10 +262,22 @@ void intel_fbdev_fini(struct drm_device *dev) | |||
| 262 | void intel_fbdev_set_suspend(struct drm_device *dev, int state) | 262 | void intel_fbdev_set_suspend(struct drm_device *dev, int state) |
| 263 | { | 263 | { |
| 264 | drm_i915_private_t *dev_priv = dev->dev_private; | 264 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 265 | if (!dev_priv->fbdev) | 265 | struct intel_fbdev *ifbdev = dev_priv->fbdev; |
| 266 | struct fb_info *info; | ||
| 267 | |||
| 268 | if (!ifbdev) | ||
| 266 | return; | 269 | return; |
| 267 | 270 | ||
| 268 | fb_set_suspend(dev_priv->fbdev->helper.fbdev, state); | 271 | info = ifbdev->helper.fbdev; |
| 272 | |||
| 273 | /* On resume from hibernation: If the object is shmemfs backed, it has | ||
| 274 | * been restored from swap. If the object is stolen however, it will be | ||
| 275 | * full of whatever garbage was left in there. | ||
| 276 | */ | ||
| 277 | if (!state && ifbdev->ifb.obj->stolen) | ||
| 278 | memset_io(info->screen_base, 0, info->screen_size); | ||
| 279 | |||
| 280 | fb_set_suspend(info, state); | ||
| 269 | } | 281 | } |
| 270 | 282 | ||
| 271 | MODULE_LICENSE("GPL and additional rights"); | 283 | MODULE_LICENSE("GPL and additional rights"); |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index de3b0dc5658b..aa01128ff192 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -1301,17 +1301,17 @@ static void valleyview_update_wm(struct drm_device *dev) | |||
| 1301 | 1301 | ||
| 1302 | vlv_update_drain_latency(dev); | 1302 | vlv_update_drain_latency(dev); |
| 1303 | 1303 | ||
| 1304 | if (g4x_compute_wm0(dev, 0, | 1304 | if (g4x_compute_wm0(dev, PIPE_A, |
| 1305 | &valleyview_wm_info, latency_ns, | 1305 | &valleyview_wm_info, latency_ns, |
| 1306 | &valleyview_cursor_wm_info, latency_ns, | 1306 | &valleyview_cursor_wm_info, latency_ns, |
| 1307 | &planea_wm, &cursora_wm)) | 1307 | &planea_wm, &cursora_wm)) |
| 1308 | enabled |= 1; | 1308 | enabled |= 1 << PIPE_A; |
| 1309 | 1309 | ||
| 1310 | if (g4x_compute_wm0(dev, 1, | 1310 | if (g4x_compute_wm0(dev, PIPE_B, |
| 1311 | &valleyview_wm_info, latency_ns, | 1311 | &valleyview_wm_info, latency_ns, |
| 1312 | &valleyview_cursor_wm_info, latency_ns, | 1312 | &valleyview_cursor_wm_info, latency_ns, |
| 1313 | &planeb_wm, &cursorb_wm)) | 1313 | &planeb_wm, &cursorb_wm)) |
| 1314 | enabled |= 2; | 1314 | enabled |= 1 << PIPE_B; |
| 1315 | 1315 | ||
| 1316 | if (single_plane_enabled(enabled) && | 1316 | if (single_plane_enabled(enabled) && |
| 1317 | g4x_compute_srwm(dev, ffs(enabled) - 1, | 1317 | g4x_compute_srwm(dev, ffs(enabled) - 1, |
| @@ -1357,17 +1357,17 @@ static void g4x_update_wm(struct drm_device *dev) | |||
| 1357 | int plane_sr, cursor_sr; | 1357 | int plane_sr, cursor_sr; |
| 1358 | unsigned int enabled = 0; | 1358 | unsigned int enabled = 0; |
| 1359 | 1359 | ||
| 1360 | if (g4x_compute_wm0(dev, 0, | 1360 | if (g4x_compute_wm0(dev, PIPE_A, |
| 1361 | &g4x_wm_info, latency_ns, | 1361 | &g4x_wm_info, latency_ns, |
| 1362 | &g4x_cursor_wm_info, latency_ns, | 1362 | &g4x_cursor_wm_info, latency_ns, |
| 1363 | &planea_wm, &cursora_wm)) | 1363 | &planea_wm, &cursora_wm)) |
| 1364 | enabled |= 1; | 1364 | enabled |= 1 << PIPE_A; |
| 1365 | 1365 | ||
| 1366 | if (g4x_compute_wm0(dev, 1, | 1366 | if (g4x_compute_wm0(dev, PIPE_B, |
| 1367 | &g4x_wm_info, latency_ns, | 1367 | &g4x_wm_info, latency_ns, |
| 1368 | &g4x_cursor_wm_info, latency_ns, | 1368 | &g4x_cursor_wm_info, latency_ns, |
| 1369 | &planeb_wm, &cursorb_wm)) | 1369 | &planeb_wm, &cursorb_wm)) |
| 1370 | enabled |= 2; | 1370 | enabled |= 1 << PIPE_B; |
| 1371 | 1371 | ||
| 1372 | if (single_plane_enabled(enabled) && | 1372 | if (single_plane_enabled(enabled) && |
| 1373 | g4x_compute_srwm(dev, ffs(enabled) - 1, | 1373 | g4x_compute_srwm(dev, ffs(enabled) - 1, |
| @@ -1716,7 +1716,7 @@ static void ironlake_update_wm(struct drm_device *dev) | |||
| 1716 | unsigned int enabled; | 1716 | unsigned int enabled; |
| 1717 | 1717 | ||
| 1718 | enabled = 0; | 1718 | enabled = 0; |
| 1719 | if (g4x_compute_wm0(dev, 0, | 1719 | if (g4x_compute_wm0(dev, PIPE_A, |
| 1720 | &ironlake_display_wm_info, | 1720 | &ironlake_display_wm_info, |
| 1721 | ILK_LP0_PLANE_LATENCY, | 1721 | ILK_LP0_PLANE_LATENCY, |
| 1722 | &ironlake_cursor_wm_info, | 1722 | &ironlake_cursor_wm_info, |
| @@ -1727,10 +1727,10 @@ static void ironlake_update_wm(struct drm_device *dev) | |||
| 1727 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | 1727 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" |
| 1728 | " plane %d, " "cursor: %d\n", | 1728 | " plane %d, " "cursor: %d\n", |
| 1729 | plane_wm, cursor_wm); | 1729 | plane_wm, cursor_wm); |
| 1730 | enabled |= 1; | 1730 | enabled |= 1 << PIPE_A; |
| 1731 | } | 1731 | } |
| 1732 | 1732 | ||
| 1733 | if (g4x_compute_wm0(dev, 1, | 1733 | if (g4x_compute_wm0(dev, PIPE_B, |
| 1734 | &ironlake_display_wm_info, | 1734 | &ironlake_display_wm_info, |
| 1735 | ILK_LP0_PLANE_LATENCY, | 1735 | ILK_LP0_PLANE_LATENCY, |
| 1736 | &ironlake_cursor_wm_info, | 1736 | &ironlake_cursor_wm_info, |
| @@ -1741,7 +1741,7 @@ static void ironlake_update_wm(struct drm_device *dev) | |||
| 1741 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | 1741 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" |
| 1742 | " plane %d, cursor: %d\n", | 1742 | " plane %d, cursor: %d\n", |
| 1743 | plane_wm, cursor_wm); | 1743 | plane_wm, cursor_wm); |
| 1744 | enabled |= 2; | 1744 | enabled |= 1 << PIPE_B; |
| 1745 | } | 1745 | } |
| 1746 | 1746 | ||
| 1747 | /* | 1747 | /* |
| @@ -1801,7 +1801,7 @@ static void sandybridge_update_wm(struct drm_device *dev) | |||
| 1801 | unsigned int enabled; | 1801 | unsigned int enabled; |
| 1802 | 1802 | ||
| 1803 | enabled = 0; | 1803 | enabled = 0; |
| 1804 | if (g4x_compute_wm0(dev, 0, | 1804 | if (g4x_compute_wm0(dev, PIPE_A, |
| 1805 | &sandybridge_display_wm_info, latency, | 1805 | &sandybridge_display_wm_info, latency, |
| 1806 | &sandybridge_cursor_wm_info, latency, | 1806 | &sandybridge_cursor_wm_info, latency, |
| 1807 | &plane_wm, &cursor_wm)) { | 1807 | &plane_wm, &cursor_wm)) { |
| @@ -1812,10 +1812,10 @@ static void sandybridge_update_wm(struct drm_device *dev) | |||
| 1812 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | 1812 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" |
| 1813 | " plane %d, " "cursor: %d\n", | 1813 | " plane %d, " "cursor: %d\n", |
| 1814 | plane_wm, cursor_wm); | 1814 | plane_wm, cursor_wm); |
| 1815 | enabled |= 1; | 1815 | enabled |= 1 << PIPE_A; |
| 1816 | } | 1816 | } |
| 1817 | 1817 | ||
| 1818 | if (g4x_compute_wm0(dev, 1, | 1818 | if (g4x_compute_wm0(dev, PIPE_B, |
| 1819 | &sandybridge_display_wm_info, latency, | 1819 | &sandybridge_display_wm_info, latency, |
| 1820 | &sandybridge_cursor_wm_info, latency, | 1820 | &sandybridge_cursor_wm_info, latency, |
| 1821 | &plane_wm, &cursor_wm)) { | 1821 | &plane_wm, &cursor_wm)) { |
| @@ -1826,7 +1826,7 @@ static void sandybridge_update_wm(struct drm_device *dev) | |||
| 1826 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | 1826 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" |
| 1827 | " plane %d, cursor: %d\n", | 1827 | " plane %d, cursor: %d\n", |
| 1828 | plane_wm, cursor_wm); | 1828 | plane_wm, cursor_wm); |
| 1829 | enabled |= 2; | 1829 | enabled |= 1 << PIPE_B; |
| 1830 | } | 1830 | } |
| 1831 | 1831 | ||
| 1832 | /* | 1832 | /* |
| @@ -1904,7 +1904,7 @@ static void ivybridge_update_wm(struct drm_device *dev) | |||
| 1904 | unsigned int enabled; | 1904 | unsigned int enabled; |
| 1905 | 1905 | ||
| 1906 | enabled = 0; | 1906 | enabled = 0; |
| 1907 | if (g4x_compute_wm0(dev, 0, | 1907 | if (g4x_compute_wm0(dev, PIPE_A, |
| 1908 | &sandybridge_display_wm_info, latency, | 1908 | &sandybridge_display_wm_info, latency, |
| 1909 | &sandybridge_cursor_wm_info, latency, | 1909 | &sandybridge_cursor_wm_info, latency, |
| 1910 | &plane_wm, &cursor_wm)) { | 1910 | &plane_wm, &cursor_wm)) { |
| @@ -1915,10 +1915,10 @@ static void ivybridge_update_wm(struct drm_device *dev) | |||
| 1915 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | 1915 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" |
| 1916 | " plane %d, " "cursor: %d\n", | 1916 | " plane %d, " "cursor: %d\n", |
| 1917 | plane_wm, cursor_wm); | 1917 | plane_wm, cursor_wm); |
| 1918 | enabled |= 1; | 1918 | enabled |= 1 << PIPE_A; |
| 1919 | } | 1919 | } |
| 1920 | 1920 | ||
| 1921 | if (g4x_compute_wm0(dev, 1, | 1921 | if (g4x_compute_wm0(dev, PIPE_B, |
| 1922 | &sandybridge_display_wm_info, latency, | 1922 | &sandybridge_display_wm_info, latency, |
| 1923 | &sandybridge_cursor_wm_info, latency, | 1923 | &sandybridge_cursor_wm_info, latency, |
| 1924 | &plane_wm, &cursor_wm)) { | 1924 | &plane_wm, &cursor_wm)) { |
| @@ -1929,10 +1929,10 @@ static void ivybridge_update_wm(struct drm_device *dev) | |||
| 1929 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | 1929 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" |
| 1930 | " plane %d, cursor: %d\n", | 1930 | " plane %d, cursor: %d\n", |
| 1931 | plane_wm, cursor_wm); | 1931 | plane_wm, cursor_wm); |
| 1932 | enabled |= 2; | 1932 | enabled |= 1 << PIPE_B; |
| 1933 | } | 1933 | } |
| 1934 | 1934 | ||
| 1935 | if (g4x_compute_wm0(dev, 2, | 1935 | if (g4x_compute_wm0(dev, PIPE_C, |
| 1936 | &sandybridge_display_wm_info, latency, | 1936 | &sandybridge_display_wm_info, latency, |
| 1937 | &sandybridge_cursor_wm_info, latency, | 1937 | &sandybridge_cursor_wm_info, latency, |
| 1938 | &plane_wm, &cursor_wm)) { | 1938 | &plane_wm, &cursor_wm)) { |
| @@ -1943,7 +1943,7 @@ static void ivybridge_update_wm(struct drm_device *dev) | |||
| 1943 | DRM_DEBUG_KMS("FIFO watermarks For pipe C -" | 1943 | DRM_DEBUG_KMS("FIFO watermarks For pipe C -" |
| 1944 | " plane %d, cursor: %d\n", | 1944 | " plane %d, cursor: %d\n", |
| 1945 | plane_wm, cursor_wm); | 1945 | plane_wm, cursor_wm); |
| 1946 | enabled |= 3; | 1946 | enabled |= 1 << PIPE_C; |
| 1947 | } | 1947 | } |
| 1948 | 1948 | ||
| 1949 | /* | 1949 | /* |
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index f9889658329b..77b8a45fb10a 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c | |||
| @@ -46,29 +46,26 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc) | |||
| 46 | 46 | ||
| 47 | static inline void mga_wait_vsync(struct mga_device *mdev) | 47 | static inline void mga_wait_vsync(struct mga_device *mdev) |
| 48 | { | 48 | { |
| 49 | unsigned int count = 0; | 49 | unsigned long timeout = jiffies + HZ/10; |
| 50 | unsigned int status = 0; | 50 | unsigned int status = 0; |
| 51 | 51 | ||
| 52 | do { | 52 | do { |
| 53 | status = RREG32(MGAREG_Status); | 53 | status = RREG32(MGAREG_Status); |
| 54 | count++; | 54 | } while ((status & 0x08) && time_before(jiffies, timeout)); |
| 55 | } while ((status & 0x08) && (count < 250000)); | 55 | timeout = jiffies + HZ/10; |
| 56 | count = 0; | ||
| 57 | status = 0; | 56 | status = 0; |
| 58 | do { | 57 | do { |
| 59 | status = RREG32(MGAREG_Status); | 58 | status = RREG32(MGAREG_Status); |
| 60 | count++; | 59 | } while (!(status & 0x08) && time_before(jiffies, timeout)); |
| 61 | } while (!(status & 0x08) && (count < 250000)); | ||
| 62 | } | 60 | } |
| 63 | 61 | ||
| 64 | static inline void mga_wait_busy(struct mga_device *mdev) | 62 | static inline void mga_wait_busy(struct mga_device *mdev) |
| 65 | { | 63 | { |
| 66 | unsigned int count = 0; | 64 | unsigned long timeout = jiffies + HZ; |
| 67 | unsigned int status = 0; | 65 | unsigned int status = 0; |
| 68 | do { | 66 | do { |
| 69 | status = RREG8(MGAREG_Status + 2); | 67 | status = RREG8(MGAREG_Status + 2); |
| 70 | count++; | 68 | } while ((status & 0x01) && time_before(jiffies, timeout)); |
| 71 | } while ((status & 0x01) && (count < 500000)); | ||
| 72 | } | 69 | } |
| 73 | 70 | ||
| 74 | /* | 71 | /* |
| @@ -189,12 +186,12 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) | |||
| 189 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); | 186 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); |
| 190 | tmp = RREG8(DAC_DATA); | 187 | tmp = RREG8(DAC_DATA); |
| 191 | tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; | 188 | tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; |
| 192 | WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); | 189 | WREG8(DAC_DATA, tmp); |
| 193 | 190 | ||
| 194 | WREG8(DAC_INDEX, MGA1064_REMHEADCTL); | 191 | WREG8(DAC_INDEX, MGA1064_REMHEADCTL); |
| 195 | tmp = RREG8(DAC_DATA); | 192 | tmp = RREG8(DAC_DATA); |
| 196 | tmp |= MGA1064_REMHEADCTL_CLKDIS; | 193 | tmp |= MGA1064_REMHEADCTL_CLKDIS; |
| 197 | WREG_DAC(MGA1064_REMHEADCTL, tmp); | 194 | WREG8(DAC_DATA, tmp); |
| 198 | 195 | ||
| 199 | /* select PLL Set C */ | 196 | /* select PLL Set C */ |
| 200 | tmp = RREG8(MGAREG_MEM_MISC_READ); | 197 | tmp = RREG8(MGAREG_MEM_MISC_READ); |
| @@ -204,7 +201,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) | |||
| 204 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); | 201 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); |
| 205 | tmp = RREG8(DAC_DATA); | 202 | tmp = RREG8(DAC_DATA); |
| 206 | tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80; | 203 | tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80; |
| 207 | WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); | 204 | WREG8(DAC_DATA, tmp); |
| 208 | 205 | ||
| 209 | udelay(500); | 206 | udelay(500); |
| 210 | 207 | ||
| @@ -212,7 +209,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) | |||
| 212 | WREG8(DAC_INDEX, MGA1064_VREF_CTL); | 209 | WREG8(DAC_INDEX, MGA1064_VREF_CTL); |
| 213 | tmp = RREG8(DAC_DATA); | 210 | tmp = RREG8(DAC_DATA); |
| 214 | tmp &= ~0x04; | 211 | tmp &= ~0x04; |
| 215 | WREG_DAC(MGA1064_VREF_CTL, tmp); | 212 | WREG8(DAC_DATA, tmp); |
| 216 | 213 | ||
| 217 | udelay(50); | 214 | udelay(50); |
| 218 | 215 | ||
| @@ -236,13 +233,13 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) | |||
| 236 | tmp = RREG8(DAC_DATA); | 233 | tmp = RREG8(DAC_DATA); |
| 237 | tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; | 234 | tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; |
| 238 | tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; | 235 | tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; |
| 239 | WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); | 236 | WREG8(DAC_DATA, tmp); |
| 240 | 237 | ||
| 241 | WREG8(DAC_INDEX, MGA1064_REMHEADCTL); | 238 | WREG8(DAC_INDEX, MGA1064_REMHEADCTL); |
| 242 | tmp = RREG8(DAC_DATA); | 239 | tmp = RREG8(DAC_DATA); |
| 243 | tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK; | 240 | tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK; |
| 244 | tmp |= MGA1064_REMHEADCTL_CLKSL_PLL; | 241 | tmp |= MGA1064_REMHEADCTL_CLKSL_PLL; |
| 245 | WREG_DAC(MGA1064_REMHEADCTL, tmp); | 242 | WREG8(DAC_DATA, tmp); |
| 246 | 243 | ||
| 247 | /* reset dotclock rate bit */ | 244 | /* reset dotclock rate bit */ |
| 248 | WREG8(MGAREG_SEQ_INDEX, 1); | 245 | WREG8(MGAREG_SEQ_INDEX, 1); |
| @@ -253,7 +250,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) | |||
| 253 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); | 250 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); |
| 254 | tmp = RREG8(DAC_DATA); | 251 | tmp = RREG8(DAC_DATA); |
| 255 | tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; | 252 | tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; |
| 256 | WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); | 253 | WREG8(DAC_DATA, tmp); |
| 257 | 254 | ||
| 258 | vcount = RREG8(MGAREG_VCOUNT); | 255 | vcount = RREG8(MGAREG_VCOUNT); |
| 259 | 256 | ||
| @@ -318,7 +315,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) | |||
| 318 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); | 315 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); |
| 319 | tmp = RREG8(DAC_DATA); | 316 | tmp = RREG8(DAC_DATA); |
| 320 | tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; | 317 | tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; |
| 321 | WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); | 318 | WREG8(DAC_DATA, tmp); |
| 322 | 319 | ||
| 323 | tmp = RREG8(MGAREG_MEM_MISC_READ); | 320 | tmp = RREG8(MGAREG_MEM_MISC_READ); |
| 324 | tmp |= 0x3 << 2; | 321 | tmp |= 0x3 << 2; |
| @@ -326,12 +323,12 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) | |||
| 326 | 323 | ||
| 327 | WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); | 324 | WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); |
| 328 | tmp = RREG8(DAC_DATA); | 325 | tmp = RREG8(DAC_DATA); |
| 329 | WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40); | 326 | WREG8(DAC_DATA, tmp & ~0x40); |
| 330 | 327 | ||
| 331 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); | 328 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); |
| 332 | tmp = RREG8(DAC_DATA); | 329 | tmp = RREG8(DAC_DATA); |
| 333 | tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; | 330 | tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; |
| 334 | WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); | 331 | WREG8(DAC_DATA, tmp); |
| 335 | 332 | ||
| 336 | WREG_DAC(MGA1064_EV_PIX_PLLC_M, m); | 333 | WREG_DAC(MGA1064_EV_PIX_PLLC_M, m); |
| 337 | WREG_DAC(MGA1064_EV_PIX_PLLC_N, n); | 334 | WREG_DAC(MGA1064_EV_PIX_PLLC_N, n); |
| @@ -342,7 +339,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) | |||
| 342 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); | 339 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); |
| 343 | tmp = RREG8(DAC_DATA); | 340 | tmp = RREG8(DAC_DATA); |
| 344 | tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; | 341 | tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; |
| 345 | WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); | 342 | WREG8(DAC_DATA, tmp); |
| 346 | 343 | ||
| 347 | udelay(500); | 344 | udelay(500); |
| 348 | 345 | ||
| @@ -350,11 +347,11 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) | |||
| 350 | tmp = RREG8(DAC_DATA); | 347 | tmp = RREG8(DAC_DATA); |
| 351 | tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; | 348 | tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; |
| 352 | tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; | 349 | tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; |
| 353 | WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); | 350 | WREG8(DAC_DATA, tmp); |
| 354 | 351 | ||
| 355 | WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); | 352 | WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); |
| 356 | tmp = RREG8(DAC_DATA); | 353 | tmp = RREG8(DAC_DATA); |
| 357 | WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40); | 354 | WREG8(DAC_DATA, tmp | 0x40); |
| 358 | 355 | ||
| 359 | tmp = RREG8(MGAREG_MEM_MISC_READ); | 356 | tmp = RREG8(MGAREG_MEM_MISC_READ); |
| 360 | tmp |= (0x3 << 2); | 357 | tmp |= (0x3 << 2); |
| @@ -363,7 +360,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) | |||
| 363 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); | 360 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); |
| 364 | tmp = RREG8(DAC_DATA); | 361 | tmp = RREG8(DAC_DATA); |
| 365 | tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; | 362 | tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; |
| 366 | WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); | 363 | WREG8(DAC_DATA, tmp); |
| 367 | 364 | ||
| 368 | return 0; | 365 | return 0; |
| 369 | } | 366 | } |
| @@ -416,7 +413,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) | |||
| 416 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); | 413 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); |
| 417 | tmp = RREG8(DAC_DATA); | 414 | tmp = RREG8(DAC_DATA); |
| 418 | tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; | 415 | tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; |
| 419 | WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); | 416 | WREG8(DAC_DATA, tmp); |
| 420 | 417 | ||
| 421 | tmp = RREG8(MGAREG_MEM_MISC_READ); | 418 | tmp = RREG8(MGAREG_MEM_MISC_READ); |
| 422 | tmp |= 0x3 << 2; | 419 | tmp |= 0x3 << 2; |
| @@ -425,7 +422,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) | |||
| 425 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); | 422 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); |
| 426 | tmp = RREG8(DAC_DATA); | 423 | tmp = RREG8(DAC_DATA); |
| 427 | tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; | 424 | tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; |
| 428 | WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); | 425 | WREG8(DAC_DATA, tmp); |
| 429 | 426 | ||
| 430 | udelay(500); | 427 | udelay(500); |
| 431 | 428 | ||
| @@ -439,13 +436,13 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) | |||
| 439 | tmp = RREG8(DAC_DATA); | 436 | tmp = RREG8(DAC_DATA); |
| 440 | tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; | 437 | tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; |
| 441 | tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; | 438 | tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; |
| 442 | WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); | 439 | WREG8(DAC_DATA, tmp); |
| 443 | 440 | ||
| 444 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); | 441 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); |
| 445 | tmp = RREG8(DAC_DATA); | 442 | tmp = RREG8(DAC_DATA); |
| 446 | tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; | 443 | tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; |
| 447 | tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; | 444 | tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; |
| 448 | WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); | 445 | WREG8(DAC_DATA, tmp); |
| 449 | 446 | ||
| 450 | vcount = RREG8(MGAREG_VCOUNT); | 447 | vcount = RREG8(MGAREG_VCOUNT); |
| 451 | 448 | ||
| @@ -515,12 +512,12 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock) | |||
| 515 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); | 512 | WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); |
| 516 | tmp = RREG8(DAC_DATA); | 513 | tmp = RREG8(DAC_DATA); |
| 517 | tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; | 514 | tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; |
| 518 | WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); | 515 | WREG8(DAC_DATA, tmp); |
| 519 | 516 | ||
| 520 | WREG8(DAC_INDEX, MGA1064_REMHEADCTL); | 517 | WREG8(DAC_INDEX, MGA1064_REMHEADCTL); |
| 521 | tmp = RREG8(DAC_DATA); | 518 | tmp = RREG8(DAC_DATA); |
| 522 | tmp |= MGA1064_REMHEADCTL_CLKDIS; | 519 | tmp |= MGA1064_REMHEADCTL_CLKDIS; |
| 523 | WREG_DAC(MGA1064_REMHEADCTL, tmp); | 520 | WREG8(DAC_DATA, tmp); |
| 524 | 521 | ||
| 525 | tmp = RREG8(MGAREG_MEM_MISC_READ); | 522 | tmp = RREG8(MGAREG_MEM_MISC_READ); |
| 526 | tmp |= (0x3<<2) | 0xc0; | 523 | tmp |= (0x3<<2) | 0xc0; |
| @@ -530,7 +527,7 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock) | |||
| 530 | tmp = RREG8(DAC_DATA); | 527 | tmp = RREG8(DAC_DATA); |
| 531 | tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; | 528 | tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; |
| 532 | tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; | 529 | tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; |
| 533 | WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); | 530 | WREG8(DAC_DATA, tmp); |
| 534 | 531 | ||
| 535 | udelay(500); | 532 | udelay(500); |
| 536 | 533 | ||
| @@ -657,12 +654,26 @@ static void mga_g200wb_commit(struct drm_crtc *crtc) | |||
| 657 | WREG_DAC(MGA1064_GEN_IO_DATA, tmp); | 654 | WREG_DAC(MGA1064_GEN_IO_DATA, tmp); |
| 658 | } | 655 | } |
| 659 | 656 | ||
| 660 | 657 | /* | |
| 658 | This is how the framebuffer base address is stored in g200 cards: | ||
| 659 | * Assume @offset is the gpu_addr variable of the framebuffer object | ||
| 660 | * Then addr is the number of _pixels_ (not bytes) from the start of | ||
| 661 | VRAM to the first pixel we want to display. (divided by 2 for 32bit | ||
| 662 | framebuffers) | ||
| 663 | * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers | ||
| 664 | addr<20> -> CRTCEXT0<6> | ||
| 665 | addr<19-16> -> CRTCEXT0<3-0> | ||
| 666 | addr<15-8> -> CRTCC<7-0> | ||
| 667 | addr<7-0> -> CRTCD<7-0> | ||
| 668 | CRTCEXT0 has to be programmed last to trigger an update and make the | ||
| 669 | new addr variable take effect. | ||
| 670 | */ | ||
| 661 | void mga_set_start_address(struct drm_crtc *crtc, unsigned offset) | 671 | void mga_set_start_address(struct drm_crtc *crtc, unsigned offset) |
| 662 | { | 672 | { |
| 663 | struct mga_device *mdev = crtc->dev->dev_private; | 673 | struct mga_device *mdev = crtc->dev->dev_private; |
| 664 | u32 addr; | 674 | u32 addr; |
| 665 | int count; | 675 | int count; |
| 676 | u8 crtcext0; | ||
| 666 | 677 | ||
| 667 | while (RREG8(0x1fda) & 0x08); | 678 | while (RREG8(0x1fda) & 0x08); |
| 668 | while (!(RREG8(0x1fda) & 0x08)); | 679 | while (!(RREG8(0x1fda) & 0x08)); |
| @@ -670,10 +681,17 @@ void mga_set_start_address(struct drm_crtc *crtc, unsigned offset) | |||
| 670 | count = RREG8(MGAREG_VCOUNT) + 2; | 681 | count = RREG8(MGAREG_VCOUNT) + 2; |
| 671 | while (RREG8(MGAREG_VCOUNT) < count); | 682 | while (RREG8(MGAREG_VCOUNT) < count); |
| 672 | 683 | ||
| 673 | addr = offset >> 2; | 684 | WREG8(MGAREG_CRTCEXT_INDEX, 0); |
| 685 | crtcext0 = RREG8(MGAREG_CRTCEXT_DATA); | ||
| 686 | crtcext0 &= 0xB0; | ||
| 687 | addr = offset / 8; | ||
| 688 | /* Can't store addresses any higher than that... | ||
| 689 | but we also don't have more than 16MB of memory, so it should be fine. */ | ||
| 690 | WARN_ON(addr > 0x1fffff); | ||
| 691 | crtcext0 |= (!!(addr & (1<<20)))<<6; | ||
| 674 | WREG_CRT(0x0d, (u8)(addr & 0xff)); | 692 | WREG_CRT(0x0d, (u8)(addr & 0xff)); |
| 675 | WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff); | 693 | WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff); |
| 676 | WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf); | 694 | WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0); |
| 677 | } | 695 | } |
| 678 | 696 | ||
| 679 | 697 | ||
| @@ -829,11 +847,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
| 829 | 847 | ||
| 830 | 848 | ||
| 831 | for (i = 0; i < sizeof(dacvalue); i++) { | 849 | for (i = 0; i < sizeof(dacvalue); i++) { |
| 832 | if ((i <= 0x03) || | 850 | if ((i <= 0x17) || |
| 833 | (i == 0x07) || | ||
| 834 | (i == 0x0b) || | ||
| 835 | (i == 0x0f) || | ||
| 836 | ((i >= 0x13) && (i <= 0x17)) || | ||
| 837 | (i == 0x1b) || | 851 | (i == 0x1b) || |
| 838 | (i == 0x1c) || | 852 | (i == 0x1c) || |
| 839 | ((i >= 0x1f) && (i <= 0x29)) || | 853 | ((i >= 0x1f) && (i <= 0x29)) || |
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 08b0823c93d5..f86771481317 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c | |||
| @@ -277,7 +277,7 @@ out_unref: | |||
| 277 | return 0; | 277 | return 0; |
| 278 | } | 278 | } |
| 279 | 279 | ||
| 280 | static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port) | 280 | static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) |
| 281 | { | 281 | { |
| 282 | int irq_num; | 282 | int irq_num; |
| 283 | long addr = qdev->io_base + port; | 283 | long addr = qdev->io_base + port; |
| @@ -285,20 +285,29 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port) | |||
| 285 | 285 | ||
| 286 | mutex_lock(&qdev->async_io_mutex); | 286 | mutex_lock(&qdev->async_io_mutex); |
| 287 | irq_num = atomic_read(&qdev->irq_received_io_cmd); | 287 | irq_num = atomic_read(&qdev->irq_received_io_cmd); |
| 288 | |||
| 289 | |||
| 290 | if (qdev->last_sent_io_cmd > irq_num) { | 288 | if (qdev->last_sent_io_cmd > irq_num) { |
| 291 | ret = wait_event_interruptible(qdev->io_cmd_event, | 289 | if (intr) |
| 292 | atomic_read(&qdev->irq_received_io_cmd) > irq_num); | 290 | ret = wait_event_interruptible_timeout(qdev->io_cmd_event, |
| 293 | if (ret) | 291 | atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); |
| 292 | else | ||
| 293 | ret = wait_event_timeout(qdev->io_cmd_event, | ||
| 294 | atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); | ||
| 295 | /* 0 is timeout, just bail the "hw" has gone away */ | ||
| 296 | if (ret <= 0) | ||
| 294 | goto out; | 297 | goto out; |
| 295 | irq_num = atomic_read(&qdev->irq_received_io_cmd); | 298 | irq_num = atomic_read(&qdev->irq_received_io_cmd); |
| 296 | } | 299 | } |
| 297 | outb(val, addr); | 300 | outb(val, addr); |
| 298 | qdev->last_sent_io_cmd = irq_num + 1; | 301 | qdev->last_sent_io_cmd = irq_num + 1; |
| 299 | ret = wait_event_interruptible(qdev->io_cmd_event, | 302 | if (intr) |
| 300 | atomic_read(&qdev->irq_received_io_cmd) > irq_num); | 303 | ret = wait_event_interruptible_timeout(qdev->io_cmd_event, |
| 304 | atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); | ||
| 305 | else | ||
| 306 | ret = wait_event_timeout(qdev->io_cmd_event, | ||
| 307 | atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); | ||
| 301 | out: | 308 | out: |
| 309 | if (ret > 0) | ||
| 310 | ret = 0; | ||
| 302 | mutex_unlock(&qdev->async_io_mutex); | 311 | mutex_unlock(&qdev->async_io_mutex); |
| 303 | return ret; | 312 | return ret; |
| 304 | } | 313 | } |
| @@ -308,7 +317,7 @@ static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port) | |||
| 308 | int ret; | 317 | int ret; |
| 309 | 318 | ||
| 310 | restart: | 319 | restart: |
| 311 | ret = wait_for_io_cmd_user(qdev, val, port); | 320 | ret = wait_for_io_cmd_user(qdev, val, port, false); |
| 312 | if (ret == -ERESTARTSYS) | 321 | if (ret == -ERESTARTSYS) |
| 313 | goto restart; | 322 | goto restart; |
| 314 | } | 323 | } |
| @@ -340,7 +349,7 @@ int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf, | |||
| 340 | mutex_lock(&qdev->update_area_mutex); | 349 | mutex_lock(&qdev->update_area_mutex); |
| 341 | qdev->ram_header->update_area = *area; | 350 | qdev->ram_header->update_area = *area; |
| 342 | qdev->ram_header->update_surface = surface_id; | 351 | qdev->ram_header->update_surface = surface_id; |
| 343 | ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC); | 352 | ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true); |
| 344 | mutex_unlock(&qdev->update_area_mutex); | 353 | mutex_unlock(&qdev->update_area_mutex); |
| 345 | return ret; | 354 | return ret; |
| 346 | } | 355 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index fcfd4436ceed..823d29e926ec 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c | |||
| @@ -428,10 +428,10 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, | |||
| 428 | int inc = 1; | 428 | int inc = 1; |
| 429 | 429 | ||
| 430 | qobj = gem_to_qxl_bo(qxl_fb->obj); | 430 | qobj = gem_to_qxl_bo(qxl_fb->obj); |
| 431 | if (qxl_fb != qdev->active_user_framebuffer) { | 431 | /* if we aren't primary surface ignore this */ |
| 432 | DRM_INFO("%s: qxl_fb 0x%p != qdev->active_user_framebuffer 0x%p\n", | 432 | if (!qobj->is_primary) |
| 433 | __func__, qxl_fb, qdev->active_user_framebuffer); | 433 | return 0; |
| 434 | } | 434 | |
| 435 | if (!num_clips) { | 435 | if (!num_clips) { |
| 436 | num_clips = 1; | 436 | num_clips = 1; |
| 437 | clips = &norect; | 437 | clips = &norect; |
| @@ -604,7 +604,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, | |||
| 604 | mode->hdisplay, | 604 | mode->hdisplay, |
| 605 | mode->vdisplay); | 605 | mode->vdisplay); |
| 606 | } | 606 | } |
| 607 | qdev->mode_set = true; | ||
| 608 | return 0; | 607 | return 0; |
| 609 | } | 608 | } |
| 610 | 609 | ||
| @@ -893,7 +892,6 @@ qxl_user_framebuffer_create(struct drm_device *dev, | |||
| 893 | { | 892 | { |
| 894 | struct drm_gem_object *obj; | 893 | struct drm_gem_object *obj; |
| 895 | struct qxl_framebuffer *qxl_fb; | 894 | struct qxl_framebuffer *qxl_fb; |
| 896 | struct qxl_device *qdev = dev->dev_private; | ||
| 897 | int ret; | 895 | int ret; |
| 898 | 896 | ||
| 899 | obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); | 897 | obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); |
| @@ -909,13 +907,6 @@ qxl_user_framebuffer_create(struct drm_device *dev, | |||
| 909 | return NULL; | 907 | return NULL; |
| 910 | } | 908 | } |
| 911 | 909 | ||
| 912 | if (qdev->active_user_framebuffer) { | ||
| 913 | DRM_INFO("%s: active_user_framebuffer %p -> %p\n", | ||
| 914 | __func__, | ||
| 915 | qdev->active_user_framebuffer, qxl_fb); | ||
| 916 | } | ||
| 917 | qdev->active_user_framebuffer = qxl_fb; | ||
| 918 | |||
| 919 | return &qxl_fb->base; | 910 | return &qxl_fb->base; |
| 920 | } | 911 | } |
| 921 | 912 | ||
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 52b582c211da..43d06ab28a21 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h | |||
| @@ -255,12 +255,6 @@ struct qxl_device { | |||
| 255 | struct qxl_gem gem; | 255 | struct qxl_gem gem; |
| 256 | struct qxl_mode_info mode_info; | 256 | struct qxl_mode_info mode_info; |
| 257 | 257 | ||
| 258 | /* | ||
| 259 | * last created framebuffer with fb_create | ||
| 260 | * only used by debugfs dumbppm | ||
| 261 | */ | ||
| 262 | struct qxl_framebuffer *active_user_framebuffer; | ||
| 263 | |||
| 264 | struct fb_info *fbdev_info; | 258 | struct fb_info *fbdev_info; |
| 265 | struct qxl_framebuffer *fbdev_qfb; | 259 | struct qxl_framebuffer *fbdev_qfb; |
| 266 | void *ram_physical; | 260 | void *ram_physical; |
| @@ -270,7 +264,6 @@ struct qxl_device { | |||
| 270 | struct qxl_ring *cursor_ring; | 264 | struct qxl_ring *cursor_ring; |
| 271 | 265 | ||
| 272 | struct qxl_ram_header *ram_header; | 266 | struct qxl_ram_header *ram_header; |
| 273 | bool mode_set; | ||
| 274 | 267 | ||
| 275 | bool primary_created; | 268 | bool primary_created; |
| 276 | 269 | ||
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 04b64f9cbfdb..6db7370373ea 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c | |||
| @@ -294,6 +294,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data, | |||
| 294 | goto out; | 294 | goto out; |
| 295 | 295 | ||
| 296 | if (!qobj->pin_count) { | 296 | if (!qobj->pin_count) { |
| 297 | qxl_ttm_placement_from_domain(qobj, qobj->type); | ||
| 297 | ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, | 298 | ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, |
| 298 | true, false); | 299 | true, false); |
| 299 | if (unlikely(ret)) | 300 | if (unlikely(ret)) |
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index 865e2c9980db..60170ea5e3a2 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c | |||
| @@ -75,7 +75,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv, | |||
| 75 | OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1)); | 75 | OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1)); |
| 76 | 76 | ||
| 77 | for (i = 0; i < nr; ++i) { | 77 | for (i = 0; i < nr; ++i) { |
| 78 | if (DRM_COPY_FROM_USER_UNCHECKED | 78 | if (DRM_COPY_FROM_USER |
| 79 | (&box, &cmdbuf->boxes[n + i], sizeof(box))) { | 79 | (&box, &cmdbuf->boxes[n + i], sizeof(box))) { |
| 80 | DRM_ERROR("copy cliprect faulted\n"); | 80 | DRM_ERROR("copy cliprect faulted\n"); |
| 81 | return -EFAULT; | 81 | return -EFAULT; |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index d33f484ace48..094e7e5ea39e 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
| @@ -147,7 +147,7 @@ static inline void radeon_unregister_atpx_handler(void) {} | |||
| 147 | #endif | 147 | #endif |
| 148 | 148 | ||
| 149 | int radeon_no_wb; | 149 | int radeon_no_wb; |
| 150 | int radeon_modeset = 1; | 150 | int radeon_modeset = -1; |
| 151 | int radeon_dynclks = -1; | 151 | int radeon_dynclks = -1; |
| 152 | int radeon_r4xx_atom = 0; | 152 | int radeon_r4xx_atom = 0; |
| 153 | int radeon_agpmode = 0; | 153 | int radeon_agpmode = 0; |
| @@ -456,6 +456,16 @@ static struct pci_driver radeon_kms_pci_driver = { | |||
| 456 | 456 | ||
| 457 | static int __init radeon_init(void) | 457 | static int __init radeon_init(void) |
| 458 | { | 458 | { |
| 459 | #ifdef CONFIG_VGA_CONSOLE | ||
| 460 | if (vgacon_text_force() && radeon_modeset == -1) { | ||
| 461 | DRM_INFO("VGACON disable radeon kernel modesetting.\n"); | ||
| 462 | radeon_modeset = 0; | ||
| 463 | } | ||
| 464 | #endif | ||
| 465 | /* set to modesetting by default if not nomodeset */ | ||
| 466 | if (radeon_modeset == -1) | ||
| 467 | radeon_modeset = 1; | ||
| 468 | |||
| 459 | if (radeon_modeset == 1) { | 469 | if (radeon_modeset == 1) { |
| 460 | DRM_INFO("radeon kernel modesetting enabled.\n"); | 470 | DRM_INFO("radeon kernel modesetting enabled.\n"); |
| 461 | driver = &kms_driver; | 471 | driver = &kms_driver; |
diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c index 1e2060324f02..8c04943f82e3 100644 --- a/drivers/gpu/host1x/drm/dc.c +++ b/drivers/gpu/host1x/drm/dc.c | |||
| @@ -1128,11 +1128,6 @@ static int tegra_dc_probe(struct platform_device *pdev) | |||
| 1128 | return err; | 1128 | return err; |
| 1129 | 1129 | ||
| 1130 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1130 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1131 | if (!regs) { | ||
| 1132 | dev_err(&pdev->dev, "failed to get registers\n"); | ||
| 1133 | return -ENXIO; | ||
| 1134 | } | ||
| 1135 | |||
| 1136 | dc->regs = devm_ioremap_resource(&pdev->dev, regs); | 1131 | dc->regs = devm_ioremap_resource(&pdev->dev, regs); |
| 1137 | if (IS_ERR(dc->regs)) | 1132 | if (IS_ERR(dc->regs)) |
| 1138 | return PTR_ERR(dc->regs); | 1133 | return PTR_ERR(dc->regs); |
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c index df0b69987914..2ebd6ce46108 100644 --- a/drivers/hwmon/abituguru.c +++ b/drivers/hwmon/abituguru.c | |||
| @@ -1414,14 +1414,18 @@ static int abituguru_probe(struct platform_device *pdev) | |||
| 1414 | pr_info("found Abit uGuru\n"); | 1414 | pr_info("found Abit uGuru\n"); |
| 1415 | 1415 | ||
| 1416 | /* Register sysfs hooks */ | 1416 | /* Register sysfs hooks */ |
| 1417 | for (i = 0; i < sysfs_attr_i; i++) | 1417 | for (i = 0; i < sysfs_attr_i; i++) { |
| 1418 | if (device_create_file(&pdev->dev, | 1418 | res = device_create_file(&pdev->dev, |
| 1419 | &data->sysfs_attr[i].dev_attr)) | 1419 | &data->sysfs_attr[i].dev_attr); |
| 1420 | if (res) | ||
| 1420 | goto abituguru_probe_error; | 1421 | goto abituguru_probe_error; |
| 1421 | for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) | 1422 | } |
| 1422 | if (device_create_file(&pdev->dev, | 1423 | for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) { |
| 1423 | &abituguru_sysfs_attr[i].dev_attr)) | 1424 | res = device_create_file(&pdev->dev, |
| 1425 | &abituguru_sysfs_attr[i].dev_attr); | ||
| 1426 | if (res) | ||
| 1424 | goto abituguru_probe_error; | 1427 | goto abituguru_probe_error; |
| 1428 | } | ||
| 1425 | 1429 | ||
| 1426 | data->hwmon_dev = hwmon_device_register(&pdev->dev); | 1430 | data->hwmon_dev = hwmon_device_register(&pdev->dev); |
| 1427 | if (!IS_ERR(data->hwmon_dev)) | 1431 | if (!IS_ERR(data->hwmon_dev)) |
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c index aafa4531b961..52b77afebde1 100644 --- a/drivers/hwmon/iio_hwmon.c +++ b/drivers/hwmon/iio_hwmon.c | |||
| @@ -84,8 +84,10 @@ static int iio_hwmon_probe(struct platform_device *pdev) | |||
| 84 | return PTR_ERR(channels); | 84 | return PTR_ERR(channels); |
| 85 | 85 | ||
| 86 | st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL); | 86 | st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL); |
| 87 | if (st == NULL) | 87 | if (st == NULL) { |
| 88 | return -ENOMEM; | 88 | ret = -ENOMEM; |
| 89 | goto error_release_channels; | ||
| 90 | } | ||
| 89 | 91 | ||
| 90 | st->channels = channels; | 92 | st->channels = channels; |
| 91 | 93 | ||
| @@ -159,7 +161,7 @@ static int iio_hwmon_probe(struct platform_device *pdev) | |||
| 159 | error_remove_group: | 161 | error_remove_group: |
| 160 | sysfs_remove_group(&dev->kobj, &st->attr_group); | 162 | sysfs_remove_group(&dev->kobj, &st->attr_group); |
| 161 | error_release_channels: | 163 | error_release_channels: |
| 162 | iio_channel_release_all(st->channels); | 164 | iio_channel_release_all(channels); |
| 163 | return ret; | 165 | return ret; |
| 164 | } | 166 | } |
| 165 | 167 | ||
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index f43f5e571db9..04638aee9039 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c | |||
| @@ -3705,8 +3705,10 @@ static int nct6775_probe(struct platform_device *pdev) | |||
| 3705 | data->have_temp |= 1 << i; | 3705 | data->have_temp |= 1 << i; |
| 3706 | data->have_temp_fixed |= 1 << i; | 3706 | data->have_temp_fixed |= 1 << i; |
| 3707 | data->reg_temp[0][i] = reg_temp_alternate[i]; | 3707 | data->reg_temp[0][i] = reg_temp_alternate[i]; |
| 3708 | data->reg_temp[1][i] = reg_temp_over[i]; | 3708 | if (i < num_reg_temp) { |
| 3709 | data->reg_temp[2][i] = reg_temp_hyst[i]; | 3709 | data->reg_temp[1][i] = reg_temp_over[i]; |
| 3710 | data->reg_temp[2][i] = reg_temp_hyst[i]; | ||
| 3711 | } | ||
| 3710 | data->temp_src[i] = i + 1; | 3712 | data->temp_src[i] = i + 1; |
| 3711 | continue; | 3713 | continue; |
| 3712 | } | 3714 | } |
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c index a478454f690f..dfe6d9527efb 100644 --- a/drivers/hwmon/tmp401.c +++ b/drivers/hwmon/tmp401.c | |||
| @@ -240,7 +240,7 @@ static struct tmp401_data *tmp401_update_device(struct device *dev) | |||
| 240 | mutex_lock(&data->update_lock); | 240 | mutex_lock(&data->update_lock); |
| 241 | 241 | ||
| 242 | next_update = data->last_updated + | 242 | next_update = data->last_updated + |
| 243 | msecs_to_jiffies(data->update_interval) + 1; | 243 | msecs_to_jiffies(data->update_interval); |
| 244 | if (time_after(jiffies, next_update) || !data->valid) { | 244 | if (time_after(jiffies, next_update) || !data->valid) { |
| 245 | if (data->kind != tmp432) { | 245 | if (data->kind != tmp432) { |
| 246 | /* | 246 | /* |
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 6e8ee92ab553..cab1c91b75a3 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c | |||
| @@ -1082,11 +1082,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) | |||
| 1082 | /* map the registers */ | 1082 | /* map the registers */ |
| 1083 | 1083 | ||
| 1084 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1084 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1085 | if (res == NULL) { | ||
| 1086 | dev_err(&pdev->dev, "cannot find IO resource\n"); | ||
| 1087 | return -ENOENT; | ||
| 1088 | } | ||
| 1089 | |||
| 1090 | i2c->regs = devm_ioremap_resource(&pdev->dev, res); | 1085 | i2c->regs = devm_ioremap_resource(&pdev->dev, res); |
| 1091 | 1086 | ||
| 1092 | if (IS_ERR(i2c->regs)) | 1087 | if (IS_ERR(i2c->regs)) |
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c index 5a7ad240bd26..a63c7d506836 100644 --- a/drivers/i2c/busses/i2c-sirf.c +++ b/drivers/i2c/busses/i2c-sirf.c | |||
| @@ -303,12 +303,6 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev) | |||
| 303 | adap->class = I2C_CLASS_HWMON; | 303 | adap->class = I2C_CLASS_HWMON; |
| 304 | 304 | ||
| 305 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 305 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 306 | if (mem_res == NULL) { | ||
| 307 | dev_err(&pdev->dev, "Unable to get MEM resource\n"); | ||
| 308 | err = -EINVAL; | ||
| 309 | goto out; | ||
| 310 | } | ||
| 311 | |||
| 312 | siic->base = devm_ioremap_resource(&pdev->dev, mem_res); | 306 | siic->base = devm_ioremap_resource(&pdev->dev, mem_res); |
| 313 | if (IS_ERR(siic->base)) { | 307 | if (IS_ERR(siic->base)) { |
| 314 | err = PTR_ERR(siic->base); | 308 | err = PTR_ERR(siic->base); |
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index b60ff90adc39..9aa1b60f7fdd 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c | |||
| @@ -714,11 +714,6 @@ static int tegra_i2c_probe(struct platform_device *pdev) | |||
| 714 | int ret = 0; | 714 | int ret = 0; |
| 715 | 715 | ||
| 716 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 716 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 717 | if (!res) { | ||
| 718 | dev_err(&pdev->dev, "no mem resource\n"); | ||
| 719 | return -EINVAL; | ||
| 720 | } | ||
| 721 | |||
| 722 | base = devm_ioremap_resource(&pdev->dev, res); | 717 | base = devm_ioremap_resource(&pdev->dev, res); |
| 723 | if (IS_ERR(base)) | 718 | if (IS_ERR(base)) |
| 724 | return PTR_ERR(base); | 719 | return PTR_ERR(base); |
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index 699187ab3800..5b9ac32801c7 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c | |||
| @@ -1002,6 +1002,7 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) | |||
| 1002 | kill_guest(&lg->cpus[0], | 1002 | kill_guest(&lg->cpus[0], |
| 1003 | "Cannot populate switcher mapping"); | 1003 | "Cannot populate switcher mapping"); |
| 1004 | } | 1004 | } |
| 1005 | lg->pgdirs[pgdir].last_host_cpu = -1; | ||
| 1005 | } | 1006 | } |
| 1006 | } | 1007 | } |
| 1007 | 1008 | ||
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 759cffc45cab..88f2f802d528 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
| @@ -2188,7 +2188,7 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) | |||
| 2188 | 2188 | ||
| 2189 | *need_commit = false; | 2189 | *need_commit = false; |
| 2190 | 2190 | ||
| 2191 | metadata_dev_size = get_metadata_dev_size(pool->md_dev); | 2191 | metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev); |
| 2192 | 2192 | ||
| 2193 | r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); | 2193 | r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); |
| 2194 | if (r) { | 2194 | if (r) { |
| @@ -2197,7 +2197,7 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) | |||
| 2197 | } | 2197 | } |
| 2198 | 2198 | ||
| 2199 | if (metadata_dev_size < sb_metadata_dev_size) { | 2199 | if (metadata_dev_size < sb_metadata_dev_size) { |
| 2200 | DMERR("metadata device (%llu sectors) too small: expected %llu", | 2200 | DMERR("metadata device (%llu blocks) too small: expected %llu", |
| 2201 | metadata_dev_size, sb_metadata_dev_size); | 2201 | metadata_dev_size, sb_metadata_dev_size); |
| 2202 | return -EINVAL; | 2202 | return -EINVAL; |
| 2203 | 2203 | ||
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c index cadf1cc19aaf..04644e7b42b1 100644 --- a/drivers/memory/emif.c +++ b/drivers/memory/emif.c | |||
| @@ -1560,12 +1560,6 @@ static int __init_or_module emif_probe(struct platform_device *pdev) | |||
| 1560 | platform_set_drvdata(pdev, emif); | 1560 | platform_set_drvdata(pdev, emif); |
| 1561 | 1561 | ||
| 1562 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1562 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1563 | if (!res) { | ||
| 1564 | dev_err(emif->dev, "%s: error getting memory resource\n", | ||
| 1565 | __func__); | ||
| 1566 | goto error; | ||
| 1567 | } | ||
| 1568 | |||
| 1569 | emif->base = devm_ioremap_resource(emif->dev, res); | 1563 | emif->base = devm_ioremap_resource(emif->dev, res); |
| 1570 | if (IS_ERR(emif->base)) | 1564 | if (IS_ERR(emif->base)) |
| 1571 | goto error; | 1565 | goto error; |
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c index 5be3b5e13855..d8d5137f9717 100644 --- a/drivers/mfd/intel_msic.c +++ b/drivers/mfd/intel_msic.c | |||
| @@ -414,11 +414,6 @@ static int intel_msic_probe(struct platform_device *pdev) | |||
| 414 | * the clients via intel_msic_irq_read(). | 414 | * the clients via intel_msic_irq_read(). |
| 415 | */ | 415 | */ |
| 416 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 416 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 417 | if (!res) { | ||
| 418 | dev_err(&pdev->dev, "failed to get SRAM iomem resource\n"); | ||
| 419 | return -ENODEV; | ||
| 420 | } | ||
| 421 | |||
| 422 | msic->irq_base = devm_ioremap_resource(&pdev->dev, res); | 417 | msic->irq_base = devm_ioremap_resource(&pdev->dev, res); |
| 423 | if (IS_ERR(msic->irq_base)) | 418 | if (IS_ERR(msic->irq_base)) |
| 424 | return PTR_ERR(msic->irq_base); | 419 | return PTR_ERR(msic->irq_base); |
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index c09c28f92055..1abd5ad59925 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c | |||
| @@ -154,11 +154,6 @@ static int ssc_probe(struct platform_device *pdev) | |||
| 154 | ssc->pdata = (struct atmel_ssc_platform_data *)plat_dat; | 154 | ssc->pdata = (struct atmel_ssc_platform_data *)plat_dat; |
| 155 | 155 | ||
| 156 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 156 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 157 | if (!regs) { | ||
| 158 | dev_dbg(&pdev->dev, "no mmio resource defined\n"); | ||
| 159 | return -ENXIO; | ||
| 160 | } | ||
| 161 | |||
| 162 | ssc->regs = devm_ioremap_resource(&pdev->dev, regs); | 157 | ssc->regs = devm_ioremap_resource(&pdev->dev, regs); |
| 163 | if (IS_ERR(ssc->regs)) | 158 | if (IS_ERR(ssc->regs)) |
| 164 | return PTR_ERR(ssc->regs); | 159 | return PTR_ERR(ssc->regs); |
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 375c109607ff..f4f3038c1df0 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
| @@ -1130,6 +1130,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
| 1130 | struct variant_data *variant = host->variant; | 1130 | struct variant_data *variant = host->variant; |
| 1131 | u32 pwr = 0; | 1131 | u32 pwr = 0; |
| 1132 | unsigned long flags; | 1132 | unsigned long flags; |
| 1133 | int ret; | ||
| 1133 | 1134 | ||
| 1134 | pm_runtime_get_sync(mmc_dev(mmc)); | 1135 | pm_runtime_get_sync(mmc_dev(mmc)); |
| 1135 | 1136 | ||
| @@ -1161,8 +1162,12 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
| 1161 | break; | 1162 | break; |
| 1162 | case MMC_POWER_ON: | 1163 | case MMC_POWER_ON: |
| 1163 | if (!IS_ERR(mmc->supply.vqmmc) && | 1164 | if (!IS_ERR(mmc->supply.vqmmc) && |
| 1164 | !regulator_is_enabled(mmc->supply.vqmmc)) | 1165 | !regulator_is_enabled(mmc->supply.vqmmc)) { |
| 1165 | regulator_enable(mmc->supply.vqmmc); | 1166 | ret = regulator_enable(mmc->supply.vqmmc); |
| 1167 | if (ret < 0) | ||
| 1168 | dev_err(mmc_dev(mmc), | ||
| 1169 | "failed to enable vqmmc regulator\n"); | ||
| 1170 | } | ||
| 1166 | 1171 | ||
| 1167 | pwr |= MCI_PWR_ON; | 1172 | pwr |= MCI_PWR_ON; |
| 1168 | break; | 1173 | break; |
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c index a94facb46e5c..fd1df5e13ae4 100644 --- a/drivers/mtd/nand/lpc32xx_mlc.c +++ b/drivers/mtd/nand/lpc32xx_mlc.c | |||
| @@ -672,11 +672,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) | |||
| 672 | } | 672 | } |
| 673 | 673 | ||
| 674 | rc = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 674 | rc = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 675 | if (rc == NULL) { | ||
| 676 | dev_err(&pdev->dev, "No memory resource found for device!\r\n"); | ||
| 677 | return -ENXIO; | ||
| 678 | } | ||
| 679 | |||
| 680 | host->io_base = devm_ioremap_resource(&pdev->dev, rc); | 675 | host->io_base = devm_ioremap_resource(&pdev->dev, rc); |
| 681 | if (IS_ERR(host->io_base)) | 676 | if (IS_ERR(host->io_base)) |
| 682 | return PTR_ERR(host->io_base); | 677 | return PTR_ERR(host->io_base); |
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig index 7ffc756131a2..547098086773 100644 --- a/drivers/net/caif/Kconfig +++ b/drivers/net/caif/Kconfig | |||
| @@ -43,7 +43,7 @@ config CAIF_HSI | |||
| 43 | 43 | ||
| 44 | config CAIF_VIRTIO | 44 | config CAIF_VIRTIO |
| 45 | tristate "CAIF virtio transport driver" | 45 | tristate "CAIF virtio transport driver" |
| 46 | depends on CAIF | 46 | depends on CAIF && HAS_DMA |
| 47 | select VHOST_RING | 47 | select VHOST_RING |
| 48 | select VIRTIO | 48 | select VIRTIO |
| 49 | select GENERIC_ALLOCATOR | 49 | select GENERIC_ALLOCATOR |
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index de570a8f8967..072c6f14e8fc 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c | |||
| @@ -632,7 +632,6 @@ struct vortex_private { | |||
| 632 | pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ | 632 | pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ |
| 633 | open:1, | 633 | open:1, |
| 634 | medialock:1, | 634 | medialock:1, |
| 635 | must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ | ||
| 636 | large_frames:1, /* accept large frames */ | 635 | large_frames:1, /* accept large frames */ |
| 637 | handling_irq:1; /* private in_irq indicator */ | 636 | handling_irq:1; /* private in_irq indicator */ |
| 638 | /* {get|set}_wol operations are already serialized by rtnl. | 637 | /* {get|set}_wol operations are already serialized by rtnl. |
| @@ -1012,6 +1011,12 @@ static int vortex_init_one(struct pci_dev *pdev, | |||
| 1012 | if (rc < 0) | 1011 | if (rc < 0) |
| 1013 | goto out; | 1012 | goto out; |
| 1014 | 1013 | ||
| 1014 | rc = pci_request_regions(pdev, DRV_NAME); | ||
| 1015 | if (rc < 0) { | ||
| 1016 | pci_disable_device(pdev); | ||
| 1017 | goto out; | ||
| 1018 | } | ||
| 1019 | |||
| 1015 | unit = vortex_cards_found; | 1020 | unit = vortex_cards_found; |
| 1016 | 1021 | ||
| 1017 | if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { | 1022 | if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { |
| @@ -1027,6 +1032,7 @@ static int vortex_init_one(struct pci_dev *pdev, | |||
| 1027 | if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ | 1032 | if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ |
| 1028 | ioaddr = pci_iomap(pdev, 0, 0); | 1033 | ioaddr = pci_iomap(pdev, 0, 0); |
| 1029 | if (!ioaddr) { | 1034 | if (!ioaddr) { |
| 1035 | pci_release_regions(pdev); | ||
| 1030 | pci_disable_device(pdev); | 1036 | pci_disable_device(pdev); |
| 1031 | rc = -ENOMEM; | 1037 | rc = -ENOMEM; |
| 1032 | goto out; | 1038 | goto out; |
| @@ -1036,6 +1042,7 @@ static int vortex_init_one(struct pci_dev *pdev, | |||
| 1036 | ent->driver_data, unit); | 1042 | ent->driver_data, unit); |
| 1037 | if (rc < 0) { | 1043 | if (rc < 0) { |
| 1038 | pci_iounmap(pdev, ioaddr); | 1044 | pci_iounmap(pdev, ioaddr); |
| 1045 | pci_release_regions(pdev); | ||
| 1039 | pci_disable_device(pdev); | 1046 | pci_disable_device(pdev); |
| 1040 | goto out; | 1047 | goto out; |
| 1041 | } | 1048 | } |
| @@ -1178,11 +1185,6 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, | |||
| 1178 | 1185 | ||
| 1179 | /* PCI-only startup logic */ | 1186 | /* PCI-only startup logic */ |
| 1180 | if (pdev) { | 1187 | if (pdev) { |
| 1181 | /* EISA resources already marked, so only PCI needs to do this here */ | ||
| 1182 | /* Ignore return value, because Cardbus drivers already allocate for us */ | ||
| 1183 | if (request_region(dev->base_addr, vci->io_size, print_name) != NULL) | ||
| 1184 | vp->must_free_region = 1; | ||
| 1185 | |||
| 1186 | /* enable bus-mastering if necessary */ | 1188 | /* enable bus-mastering if necessary */ |
| 1187 | if (vci->flags & PCI_USES_MASTER) | 1189 | if (vci->flags & PCI_USES_MASTER) |
| 1188 | pci_set_master(pdev); | 1190 | pci_set_master(pdev); |
| @@ -1220,7 +1222,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, | |||
| 1220 | &vp->rx_ring_dma); | 1222 | &vp->rx_ring_dma); |
| 1221 | retval = -ENOMEM; | 1223 | retval = -ENOMEM; |
| 1222 | if (!vp->rx_ring) | 1224 | if (!vp->rx_ring) |
| 1223 | goto free_region; | 1225 | goto free_device; |
| 1224 | 1226 | ||
| 1225 | vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); | 1227 | vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); |
| 1226 | vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; | 1228 | vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; |
| @@ -1484,9 +1486,7 @@ free_ring: | |||
| 1484 | + sizeof(struct boom_tx_desc) * TX_RING_SIZE, | 1486 | + sizeof(struct boom_tx_desc) * TX_RING_SIZE, |
| 1485 | vp->rx_ring, | 1487 | vp->rx_ring, |
| 1486 | vp->rx_ring_dma); | 1488 | vp->rx_ring_dma); |
| 1487 | free_region: | 1489 | free_device: |
| 1488 | if (vp->must_free_region) | ||
| 1489 | release_region(dev->base_addr, vci->io_size); | ||
| 1490 | free_netdev(dev); | 1490 | free_netdev(dev); |
| 1491 | pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); | 1491 | pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); |
| 1492 | out: | 1492 | out: |
| @@ -3254,8 +3254,9 @@ static void vortex_remove_one(struct pci_dev *pdev) | |||
| 3254 | + sizeof(struct boom_tx_desc) * TX_RING_SIZE, | 3254 | + sizeof(struct boom_tx_desc) * TX_RING_SIZE, |
| 3255 | vp->rx_ring, | 3255 | vp->rx_ring, |
| 3256 | vp->rx_ring_dma); | 3256 | vp->rx_ring_dma); |
| 3257 | if (vp->must_free_region) | 3257 | |
| 3258 | release_region(dev->base_addr, vp->io_size); | 3258 | pci_release_regions(pdev); |
| 3259 | |||
| 3259 | free_netdev(dev); | 3260 | free_netdev(dev); |
| 3260 | } | 3261 | } |
| 3261 | 3262 | ||
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index ce4a030d3d0c..07f7ef05c3f2 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c | |||
| @@ -3236,9 +3236,10 @@ bnad_init(struct bnad *bnad, | |||
| 3236 | 3236 | ||
| 3237 | sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); | 3237 | sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); |
| 3238 | bnad->work_q = create_singlethread_workqueue(bnad->wq_name); | 3238 | bnad->work_q = create_singlethread_workqueue(bnad->wq_name); |
| 3239 | 3239 | if (!bnad->work_q) { | |
| 3240 | if (!bnad->work_q) | 3240 | iounmap(bnad->bar0); |
| 3241 | return -ENOMEM; | 3241 | return -ENOMEM; |
| 3242 | } | ||
| 3242 | 3243 | ||
| 3243 | return 0; | 3244 | return 0; |
| 3244 | } | 3245 | } |
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 1194446f859a..768285ec10f4 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig | |||
| @@ -22,7 +22,7 @@ if NET_CADENCE | |||
| 22 | 22 | ||
| 23 | config ARM_AT91_ETHER | 23 | config ARM_AT91_ETHER |
| 24 | tristate "AT91RM9200 Ethernet support" | 24 | tristate "AT91RM9200 Ethernet support" |
| 25 | depends on GENERIC_HARDIRQS | 25 | depends on GENERIC_HARDIRQS && HAS_DMA |
| 26 | select NET_CORE | 26 | select NET_CORE |
| 27 | select MACB | 27 | select MACB |
| 28 | ---help--- | 28 | ---help--- |
| @@ -31,6 +31,7 @@ config ARM_AT91_ETHER | |||
| 31 | 31 | ||
| 32 | config MACB | 32 | config MACB |
| 33 | tristate "Cadence MACB/GEM support" | 33 | tristate "Cadence MACB/GEM support" |
| 34 | depends on HAS_DMA | ||
| 34 | select PHYLIB | 35 | select PHYLIB |
| 35 | ---help--- | 36 | ---help--- |
| 36 | The Cadence MACB ethernet interface is found on many Atmel AT32 and | 37 | The Cadence MACB ethernet interface is found on many Atmel AT32 and |
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig index aba435c3d4ae..184a063bed5f 100644 --- a/drivers/net/ethernet/calxeda/Kconfig +++ b/drivers/net/ethernet/calxeda/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config NET_CALXEDA_XGMAC | 1 | config NET_CALXEDA_XGMAC |
| 2 | tristate "Calxeda 1G/10G XGMAC Ethernet driver" | 2 | tristate "Calxeda 1G/10G XGMAC Ethernet driver" |
| 3 | depends on HAS_IOMEM | 3 | depends on HAS_IOMEM && HAS_DMA |
| 4 | select CRC32 | 4 | select CRC32 |
| 5 | help | 5 | help |
| 6 | This is the driver for the XGMAC Ethernet IP block found on Calxeda | 6 | This is the driver for the XGMAC Ethernet IP block found on Calxeda |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index aff0310a778b..ca9825ca88c9 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -87,6 +87,8 @@ | |||
| 87 | #define FEC_QUIRK_HAS_GBIT (1 << 3) | 87 | #define FEC_QUIRK_HAS_GBIT (1 << 3) |
| 88 | /* Controller has extend desc buffer */ | 88 | /* Controller has extend desc buffer */ |
| 89 | #define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) | 89 | #define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) |
| 90 | /* Controller has hardware checksum support */ | ||
| 91 | #define FEC_QUIRK_HAS_CSUM (1 << 5) | ||
| 90 | 92 | ||
| 91 | static struct platform_device_id fec_devtype[] = { | 93 | static struct platform_device_id fec_devtype[] = { |
| 92 | { | 94 | { |
| @@ -105,7 +107,7 @@ static struct platform_device_id fec_devtype[] = { | |||
| 105 | }, { | 107 | }, { |
| 106 | .name = "imx6q-fec", | 108 | .name = "imx6q-fec", |
| 107 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | | 109 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
| 108 | FEC_QUIRK_HAS_BUFDESC_EX, | 110 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM, |
| 109 | }, { | 111 | }, { |
| 110 | .name = "mvf-fec", | 112 | .name = "mvf-fec", |
| 111 | .driver_data = FEC_QUIRK_ENET_MAC, | 113 | .driver_data = FEC_QUIRK_ENET_MAC, |
| @@ -1744,6 +1746,8 @@ static const struct net_device_ops fec_netdev_ops = { | |||
| 1744 | static int fec_enet_init(struct net_device *ndev) | 1746 | static int fec_enet_init(struct net_device *ndev) |
| 1745 | { | 1747 | { |
| 1746 | struct fec_enet_private *fep = netdev_priv(ndev); | 1748 | struct fec_enet_private *fep = netdev_priv(ndev); |
| 1749 | const struct platform_device_id *id_entry = | ||
| 1750 | platform_get_device_id(fep->pdev); | ||
| 1747 | struct bufdesc *cbd_base; | 1751 | struct bufdesc *cbd_base; |
| 1748 | 1752 | ||
| 1749 | /* Allocate memory for buffer descriptors. */ | 1753 | /* Allocate memory for buffer descriptors. */ |
| @@ -1775,12 +1779,14 @@ static int fec_enet_init(struct net_device *ndev) | |||
| 1775 | writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); | 1779 | writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); |
| 1776 | netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); | 1780 | netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); |
| 1777 | 1781 | ||
| 1778 | /* enable hw accelerator */ | 1782 | if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { |
| 1779 | ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1783 | /* enable hw accelerator */ |
| 1780 | | NETIF_F_RXCSUM); | 1784 | ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
| 1781 | ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1785 | | NETIF_F_RXCSUM); |
| 1782 | | NETIF_F_RXCSUM); | 1786 | ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
| 1783 | fep->csum_flags |= FLAG_RX_CSUM_ENABLED; | 1787 | | NETIF_F_RXCSUM); |
| 1788 | fep->csum_flags |= FLAG_RX_CSUM_ENABLED; | ||
| 1789 | } | ||
| 1784 | 1790 | ||
| 1785 | fec_restart(ndev, 0); | 1791 | fec_restart(ndev, 0); |
| 1786 | 1792 | ||
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 4989481c19f0..d300a0c0eafc 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c | |||
| @@ -359,10 +359,26 @@ static int emac_reset(struct emac_instance *dev) | |||
| 359 | } | 359 | } |
| 360 | 360 | ||
| 361 | #ifdef CONFIG_PPC_DCR_NATIVE | 361 | #ifdef CONFIG_PPC_DCR_NATIVE |
| 362 | /* Enable internal clock source */ | 362 | /* |
| 363 | if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) | 363 | * PPC460EX/GT Embedded Processor Advanced User's Manual |
| 364 | dcri_clrset(SDR0, SDR0_ETH_CFG, | 364 | * section 28.10.1 Mode Register 0 (EMACx_MR0) states: |
| 365 | 0, SDR0_ETH_CFG_ECS << dev->cell_index); | 365 | * Note: The PHY must provide a TX Clk in order to perform a soft reset |
| 366 | * of the EMAC. If none is present, select the internal clock | ||
| 367 | * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). | ||
| 368 | * After a soft reset, select the external clock. | ||
| 369 | */ | ||
| 370 | if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { | ||
| 371 | if (dev->phy_address == 0xffffffff && | ||
| 372 | dev->phy_map == 0xffffffff) { | ||
| 373 | /* No PHY: select internal loop clock before reset */ | ||
| 374 | dcri_clrset(SDR0, SDR0_ETH_CFG, | ||
| 375 | 0, SDR0_ETH_CFG_ECS << dev->cell_index); | ||
| 376 | } else { | ||
| 377 | /* PHY present: select external clock before reset */ | ||
| 378 | dcri_clrset(SDR0, SDR0_ETH_CFG, | ||
| 379 | SDR0_ETH_CFG_ECS << dev->cell_index, 0); | ||
| 380 | } | ||
| 381 | } | ||
| 366 | #endif | 382 | #endif |
| 367 | 383 | ||
| 368 | out_be32(&p->mr0, EMAC_MR0_SRST); | 384 | out_be32(&p->mr0, EMAC_MR0_SRST); |
| @@ -370,10 +386,14 @@ static int emac_reset(struct emac_instance *dev) | |||
| 370 | --n; | 386 | --n; |
| 371 | 387 | ||
| 372 | #ifdef CONFIG_PPC_DCR_NATIVE | 388 | #ifdef CONFIG_PPC_DCR_NATIVE |
| 373 | /* Enable external clock source */ | 389 | if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { |
| 374 | if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) | 390 | if (dev->phy_address == 0xffffffff && |
| 375 | dcri_clrset(SDR0, SDR0_ETH_CFG, | 391 | dev->phy_map == 0xffffffff) { |
| 376 | SDR0_ETH_CFG_ECS << dev->cell_index, 0); | 392 | /* No PHY: restore external clock source after reset */ |
| 393 | dcri_clrset(SDR0, SDR0_ETH_CFG, | ||
| 394 | SDR0_ETH_CFG_ECS << dev->cell_index, 0); | ||
| 395 | } | ||
| 396 | } | ||
| 377 | #endif | 397 | #endif |
| 378 | 398 | ||
| 379 | if (n) { | 399 | if (n) { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c index 91f2b2c43c12..d3f508697a3d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c | |||
| @@ -60,7 +60,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, | |||
| 60 | context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; | 60 | context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; |
| 61 | if (user_prio >= 0) { | 61 | if (user_prio >= 0) { |
| 62 | context->pri_path.sched_queue |= user_prio << 3; | 62 | context->pri_path.sched_queue |= user_prio << 3; |
| 63 | context->pri_path.feup = 1 << 6; | 63 | context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP; |
| 64 | } | 64 | } |
| 65 | context->pri_path.counter_index = 0xff; | 65 | context->pri_path.counter_index = 0xff; |
| 66 | context->cqn_send = cpu_to_be32(cqn); | 66 | context->cqn_send = cpu_to_be32(cqn); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index b147bdd40768..58a8e535d698 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
| @@ -131,7 +131,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) | |||
| 131 | [2] = "RSS XOR Hash Function support", | 131 | [2] = "RSS XOR Hash Function support", |
| 132 | [3] = "Device manage flow steering support", | 132 | [3] = "Device manage flow steering support", |
| 133 | [4] = "Automatic MAC reassignment support", | 133 | [4] = "Automatic MAC reassignment support", |
| 134 | [5] = "Time stamping support" | 134 | [5] = "Time stamping support", |
| 135 | [6] = "VST (control vlan insertion/stripping) support", | ||
| 136 | [7] = "FSM (MAC anti-spoofing) support" | ||
| 135 | }; | 137 | }; |
| 136 | int i; | 138 | int i; |
| 137 | 139 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index e12e0d2e0ee0..1157f028a90f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
| @@ -372,24 +372,29 @@ static int update_vport_qp_param(struct mlx4_dev *dev, | |||
| 372 | if (MLX4_QP_ST_RC == qp_type) | 372 | if (MLX4_QP_ST_RC == qp_type) |
| 373 | return -EINVAL; | 373 | return -EINVAL; |
| 374 | 374 | ||
| 375 | /* force strip vlan by clear vsd */ | ||
| 376 | qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); | ||
| 377 | if (0 != vp_oper->state.default_vlan) { | ||
| 378 | qpc->pri_path.vlan_control = | ||
| 379 | MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | | ||
| 380 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | | ||
| 381 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; | ||
| 382 | } else { /* priority tagged */ | ||
| 383 | qpc->pri_path.vlan_control = | ||
| 384 | MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | | ||
| 385 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; | ||
| 386 | } | ||
| 387 | |||
| 388 | qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN; | ||
| 375 | qpc->pri_path.vlan_index = vp_oper->vlan_idx; | 389 | qpc->pri_path.vlan_index = vp_oper->vlan_idx; |
| 376 | qpc->pri_path.fl = (1 << 6) | (1 << 2); /* set cv bit and hide_cqe_vlan bit*/ | 390 | qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; |
| 377 | qpc->pri_path.feup |= 1 << 3; /* set fvl bit */ | 391 | qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; |
| 378 | qpc->pri_path.sched_queue &= 0xC7; | 392 | qpc->pri_path.sched_queue &= 0xC7; |
| 379 | qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; | 393 | qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; |
| 380 | mlx4_dbg(dev, "qp %d port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n", | ||
| 381 | be32_to_cpu(qpc->local_qpn) & 0xffffff, port, | ||
| 382 | (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan, | ||
| 383 | vp_oper->vlan_idx, (int)(qpc->pri_path.feup), | ||
| 384 | (int)(qpc->pri_path.fl)); | ||
| 385 | } | 394 | } |
| 386 | if (vp_oper->state.spoofchk) { | 395 | if (vp_oper->state.spoofchk) { |
| 387 | qpc->pri_path.feup |= 1 << 5; /* set fsm bit */; | 396 | qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; |
| 388 | qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; | 397 | qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; |
| 389 | mlx4_dbg(dev, "spoof qp %d port %d feup 0x%x, myLmc 0x%x mindx %d\n", | ||
| 390 | be32_to_cpu(qpc->local_qpn) & 0xffffff, port, | ||
| 391 | (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc, | ||
| 392 | vp_oper->mac_idx); | ||
| 393 | } | 398 | } |
| 394 | return 0; | 399 | return 0; |
| 395 | } | 400 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 90c253b145ef..019c5f78732e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
| @@ -429,6 +429,7 @@ struct qlcnic_hardware_context { | |||
| 429 | 429 | ||
| 430 | u16 port_type; | 430 | u16 port_type; |
| 431 | u16 board_type; | 431 | u16 board_type; |
| 432 | u16 supported_type; | ||
| 432 | 433 | ||
| 433 | u16 link_speed; | 434 | u16 link_speed; |
| 434 | u16 link_duplex; | 435 | u16 link_duplex; |
| @@ -1514,6 +1515,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter); | |||
| 1514 | void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter); | 1515 | void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter); |
| 1515 | void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter); | 1516 | void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter); |
| 1516 | void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter); | 1517 | void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter); |
| 1518 | int qlcnic_82xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *); | ||
| 1517 | 1519 | ||
| 1518 | int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); | 1520 | int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); |
| 1519 | int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); | 1521 | int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index ea790a93ee7c..b4ff1e35a11d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
| @@ -696,15 +696,14 @@ u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter) | |||
| 696 | return 1; | 696 | return 1; |
| 697 | } | 697 | } |
| 698 | 698 | ||
| 699 | u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter) | 699 | u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time) |
| 700 | { | 700 | { |
| 701 | u32 data; | 701 | u32 data; |
| 702 | unsigned long wait_time = 0; | ||
| 703 | struct qlcnic_hardware_context *ahw = adapter->ahw; | 702 | struct qlcnic_hardware_context *ahw = adapter->ahw; |
| 704 | /* wait for mailbox completion */ | 703 | /* wait for mailbox completion */ |
| 705 | do { | 704 | do { |
| 706 | data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); | 705 | data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); |
| 707 | if (++wait_time > QLCNIC_MBX_TIMEOUT) { | 706 | if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) { |
| 708 | data = QLCNIC_RCODE_TIMEOUT; | 707 | data = QLCNIC_RCODE_TIMEOUT; |
| 709 | break; | 708 | break; |
| 710 | } | 709 | } |
| @@ -720,8 +719,8 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter, | |||
| 720 | u16 opcode; | 719 | u16 opcode; |
| 721 | u8 mbx_err_code; | 720 | u8 mbx_err_code; |
| 722 | unsigned long flags; | 721 | unsigned long flags; |
| 723 | u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd; | ||
| 724 | struct qlcnic_hardware_context *ahw = adapter->ahw; | 722 | struct qlcnic_hardware_context *ahw = adapter->ahw; |
| 723 | u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0; | ||
| 725 | 724 | ||
| 726 | opcode = LSW(cmd->req.arg[0]); | 725 | opcode = LSW(cmd->req.arg[0]); |
| 727 | if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { | 726 | if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { |
| @@ -754,15 +753,13 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter, | |||
| 754 | /* Signal FW about the impending command */ | 753 | /* Signal FW about the impending command */ |
| 755 | QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); | 754 | QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); |
| 756 | poll: | 755 | poll: |
| 757 | rsp = qlcnic_83xx_mbx_poll(adapter); | 756 | rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time); |
| 758 | if (rsp != QLCNIC_RCODE_TIMEOUT) { | 757 | if (rsp != QLCNIC_RCODE_TIMEOUT) { |
| 759 | /* Get the FW response data */ | 758 | /* Get the FW response data */ |
| 760 | fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); | 759 | fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); |
| 761 | if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { | 760 | if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { |
| 762 | __qlcnic_83xx_process_aen(adapter); | 761 | __qlcnic_83xx_process_aen(adapter); |
| 763 | mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); | 762 | goto poll; |
| 764 | if (mbx_val) | ||
| 765 | goto poll; | ||
| 766 | } | 763 | } |
| 767 | mbx_err_code = QLCNIC_MBX_STATUS(fw_data); | 764 | mbx_err_code = QLCNIC_MBX_STATUS(fw_data); |
| 768 | rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); | 765 | rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); |
| @@ -1276,11 +1273,13 @@ out: | |||
| 1276 | return err; | 1273 | return err; |
| 1277 | } | 1274 | } |
| 1278 | 1275 | ||
| 1279 | static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test) | 1276 | static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, |
| 1277 | int num_sds_ring) | ||
| 1280 | { | 1278 | { |
| 1281 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 1279 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
| 1282 | struct qlcnic_host_sds_ring *sds_ring; | 1280 | struct qlcnic_host_sds_ring *sds_ring; |
| 1283 | struct qlcnic_host_rds_ring *rds_ring; | 1281 | struct qlcnic_host_rds_ring *rds_ring; |
| 1282 | u16 adapter_state = adapter->is_up; | ||
| 1284 | u8 ring; | 1283 | u8 ring; |
| 1285 | int ret; | 1284 | int ret; |
| 1286 | 1285 | ||
| @@ -1304,6 +1303,10 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test) | |||
| 1304 | ret = qlcnic_fw_create_ctx(adapter); | 1303 | ret = qlcnic_fw_create_ctx(adapter); |
| 1305 | if (ret) { | 1304 | if (ret) { |
| 1306 | qlcnic_detach(adapter); | 1305 | qlcnic_detach(adapter); |
| 1306 | if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) { | ||
| 1307 | adapter->max_sds_rings = num_sds_ring; | ||
| 1308 | qlcnic_attach(adapter); | ||
| 1309 | } | ||
| 1307 | netif_device_attach(netdev); | 1310 | netif_device_attach(netdev); |
| 1308 | return ret; | 1311 | return ret; |
| 1309 | } | 1312 | } |
| @@ -1596,7 +1599,8 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) | |||
| 1596 | if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) | 1599 | if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) |
| 1597 | return -EBUSY; | 1600 | return -EBUSY; |
| 1598 | 1601 | ||
| 1599 | ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST); | 1602 | ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST, |
| 1603 | max_sds_rings); | ||
| 1600 | if (ret) | 1604 | if (ret) |
| 1601 | goto fail_diag_alloc; | 1605 | goto fail_diag_alloc; |
| 1602 | 1606 | ||
| @@ -2830,6 +2834,23 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) | |||
| 2830 | break; | 2834 | break; |
| 2831 | } | 2835 | } |
| 2832 | config = cmd.rsp.arg[3]; | 2836 | config = cmd.rsp.arg[3]; |
| 2837 | if (QLC_83XX_SFP_PRESENT(config)) { | ||
| 2838 | switch (ahw->module_type) { | ||
| 2839 | case LINKEVENT_MODULE_OPTICAL_UNKNOWN: | ||
| 2840 | case LINKEVENT_MODULE_OPTICAL_SRLR: | ||
| 2841 | case LINKEVENT_MODULE_OPTICAL_LRM: | ||
| 2842 | case LINKEVENT_MODULE_OPTICAL_SFP_1G: | ||
| 2843 | ahw->supported_type = PORT_FIBRE; | ||
| 2844 | break; | ||
| 2845 | case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: | ||
| 2846 | case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: | ||
| 2847 | case LINKEVENT_MODULE_TWINAX: | ||
| 2848 | ahw->supported_type = PORT_TP; | ||
| 2849 | break; | ||
| 2850 | default: | ||
| 2851 | ahw->supported_type = PORT_OTHER; | ||
| 2852 | } | ||
| 2853 | } | ||
| 2833 | if (config & 1) | 2854 | if (config & 1) |
| 2834 | err = 1; | 2855 | err = 1; |
| 2835 | } | 2856 | } |
| @@ -2838,7 +2859,8 @@ out: | |||
| 2838 | return config; | 2859 | return config; |
| 2839 | } | 2860 | } |
| 2840 | 2861 | ||
| 2841 | int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter) | 2862 | int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, |
| 2863 | struct ethtool_cmd *ecmd) | ||
| 2842 | { | 2864 | { |
| 2843 | u32 config = 0; | 2865 | u32 config = 0; |
| 2844 | int status = 0; | 2866 | int status = 0; |
| @@ -2851,6 +2873,54 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter) | |||
| 2851 | ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config); | 2873 | ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config); |
| 2852 | /* hard code until there is a way to get it from flash */ | 2874 | /* hard code until there is a way to get it from flash */ |
| 2853 | ahw->board_type = QLCNIC_BRDTYPE_83XX_10G; | 2875 | ahw->board_type = QLCNIC_BRDTYPE_83XX_10G; |
| 2876 | |||
| 2877 | if (netif_running(adapter->netdev) && ahw->has_link_events) { | ||
| 2878 | ethtool_cmd_speed_set(ecmd, ahw->link_speed); | ||
| 2879 | ecmd->duplex = ahw->link_duplex; | ||
| 2880 | ecmd->autoneg = ahw->link_autoneg; | ||
| 2881 | } else { | ||
| 2882 | ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); | ||
| 2883 | ecmd->duplex = DUPLEX_UNKNOWN; | ||
| 2884 | ecmd->autoneg = AUTONEG_DISABLE; | ||
| 2885 | } | ||
| 2886 | |||
| 2887 | if (ahw->port_type == QLCNIC_XGBE) { | ||
| 2888 | ecmd->supported = SUPPORTED_1000baseT_Full; | ||
| 2889 | ecmd->advertising = ADVERTISED_1000baseT_Full; | ||
| 2890 | } else { | ||
| 2891 | ecmd->supported = (SUPPORTED_10baseT_Half | | ||
| 2892 | SUPPORTED_10baseT_Full | | ||
| 2893 | SUPPORTED_100baseT_Half | | ||
| 2894 | SUPPORTED_100baseT_Full | | ||
| 2895 | SUPPORTED_1000baseT_Half | | ||
| 2896 | SUPPORTED_1000baseT_Full); | ||
| 2897 | ecmd->advertising = (ADVERTISED_100baseT_Half | | ||
| 2898 | ADVERTISED_100baseT_Full | | ||
| 2899 | ADVERTISED_1000baseT_Half | | ||
| 2900 | ADVERTISED_1000baseT_Full); | ||
| 2901 | } | ||
| 2902 | |||
| 2903 | switch (ahw->supported_type) { | ||
| 2904 | case PORT_FIBRE: | ||
| 2905 | ecmd->supported |= SUPPORTED_FIBRE; | ||
| 2906 | ecmd->advertising |= ADVERTISED_FIBRE; | ||
| 2907 | ecmd->port = PORT_FIBRE; | ||
| 2908 | ecmd->transceiver = XCVR_EXTERNAL; | ||
| 2909 | break; | ||
| 2910 | case PORT_TP: | ||
| 2911 | ecmd->supported |= SUPPORTED_TP; | ||
| 2912 | ecmd->advertising |= ADVERTISED_TP; | ||
| 2913 | ecmd->port = PORT_TP; | ||
| 2914 | ecmd->transceiver = XCVR_INTERNAL; | ||
| 2915 | break; | ||
| 2916 | default: | ||
| 2917 | ecmd->supported |= SUPPORTED_FIBRE; | ||
| 2918 | ecmd->advertising |= ADVERTISED_FIBRE; | ||
| 2919 | ecmd->port = PORT_OTHER; | ||
| 2920 | ecmd->transceiver = XCVR_EXTERNAL; | ||
| 2921 | break; | ||
| 2922 | } | ||
| 2923 | ecmd->phy_address = ahw->physical_port; | ||
| 2854 | return status; | 2924 | return status; |
| 2855 | } | 2925 | } |
| 2856 | 2926 | ||
| @@ -3046,7 +3116,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) | |||
| 3046 | if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) | 3116 | if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) |
| 3047 | return -EIO; | 3117 | return -EIO; |
| 3048 | 3118 | ||
| 3049 | ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST); | 3119 | ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST, |
| 3120 | max_sds_rings); | ||
| 3050 | if (ret) | 3121 | if (ret) |
| 3051 | goto fail_diag_irq; | 3122 | goto fail_diag_irq; |
| 3052 | 3123 | ||
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 1f1d85e6f2af..f5db67fc9f55 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h | |||
| @@ -603,7 +603,7 @@ int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *); | |||
| 603 | 603 | ||
| 604 | void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); | 604 | void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); |
| 605 | void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); | 605 | void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); |
| 606 | int qlcnic_83xx_get_settings(struct qlcnic_adapter *); | 606 | int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *); |
| 607 | int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *); | 607 | int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *); |
| 608 | void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *, | 608 | void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *, |
| 609 | struct ethtool_pauseparam *); | 609 | struct ethtool_pauseparam *); |
| @@ -620,7 +620,7 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *); | |||
| 620 | int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *); | 620 | int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *); |
| 621 | int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *); | 621 | int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *); |
| 622 | u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *); | 622 | u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *); |
| 623 | u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *); | 623 | u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *); |
| 624 | void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); | 624 | void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); |
| 625 | void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); | 625 | void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); |
| 626 | #endif | 626 | #endif |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index ab1d8d99cbd5..c67d1eb35e8f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | |||
| @@ -435,10 +435,6 @@ static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter) | |||
| 435 | } | 435 | } |
| 436 | done: | 436 | done: |
| 437 | netif_device_attach(netdev); | 437 | netif_device_attach(netdev); |
| 438 | if (netif_running(netdev)) { | ||
| 439 | netif_carrier_on(netdev); | ||
| 440 | netif_wake_queue(netdev); | ||
| 441 | } | ||
| 442 | } | 438 | } |
| 443 | 439 | ||
| 444 | static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter, | 440 | static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter, |
| @@ -642,15 +638,21 @@ static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) | |||
| 642 | 638 | ||
| 643 | static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter) | 639 | static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter) |
| 644 | { | 640 | { |
| 641 | struct qlcnic_hardware_context *ahw = adapter->ahw; | ||
| 642 | |||
| 645 | qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1); | 643 | qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1); |
| 646 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | ||
| 647 | set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); | 644 | set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); |
| 648 | qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); | 645 | qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); |
| 649 | set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); | 646 | set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); |
| 650 | adapter->ahw->idc.quiesce_req = 0; | 647 | |
| 651 | adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; | 648 | ahw->idc.quiesce_req = 0; |
| 652 | adapter->ahw->idc.err_code = 0; | 649 | ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; |
| 653 | adapter->ahw->idc.collect_dump = 0; | 650 | ahw->idc.err_code = 0; |
| 651 | ahw->idc.collect_dump = 0; | ||
| 652 | ahw->reset_context = 0; | ||
| 653 | adapter->tx_timeo_cnt = 0; | ||
| 654 | |||
| 655 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | ||
| 654 | } | 656 | } |
| 655 | 657 | ||
| 656 | /** | 658 | /** |
| @@ -851,6 +853,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) | |||
| 851 | /* Check for soft reset request */ | 853 | /* Check for soft reset request */ |
| 852 | if (ahw->reset_context && | 854 | if (ahw->reset_context && |
| 853 | !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { | 855 | !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { |
| 856 | adapter->ahw->reset_context = 0; | ||
| 854 | qlcnic_83xx_idc_tx_soft_reset(adapter); | 857 | qlcnic_83xx_idc_tx_soft_reset(adapter); |
| 855 | return ret; | 858 | return ret; |
| 856 | } | 859 | } |
| @@ -914,6 +917,7 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter) | |||
| 914 | static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) | 917 | static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) |
| 915 | { | 918 | { |
| 916 | dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); | 919 | dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); |
| 920 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | ||
| 917 | adapter->ahw->idc.err_code = -EIO; | 921 | adapter->ahw->idc.err_code = -EIO; |
| 918 | 922 | ||
| 919 | return 0; | 923 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 08efb4635007..f67652de5a63 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | |||
| @@ -131,12 +131,13 @@ static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = { | |||
| 131 | "ctx_lro_pkt_cnt", | 131 | "ctx_lro_pkt_cnt", |
| 132 | "ctx_ip_csum_error", | 132 | "ctx_ip_csum_error", |
| 133 | "ctx_rx_pkts_wo_ctx", | 133 | "ctx_rx_pkts_wo_ctx", |
| 134 | "ctx_rx_pkts_dropped_wo_sts", | 134 | "ctx_rx_pkts_drop_wo_sds_on_card", |
| 135 | "ctx_rx_pkts_drop_wo_sds_on_host", | ||
| 135 | "ctx_rx_osized_pkts", | 136 | "ctx_rx_osized_pkts", |
| 136 | "ctx_rx_pkts_dropped_wo_rds", | 137 | "ctx_rx_pkts_dropped_wo_rds", |
| 137 | "ctx_rx_unexpected_mcast_pkts", | 138 | "ctx_rx_unexpected_mcast_pkts", |
| 138 | "ctx_invalid_mac_address", | 139 | "ctx_invalid_mac_address", |
| 139 | "ctx_rx_rds_ring_prim_attemoted", | 140 | "ctx_rx_rds_ring_prim_attempted", |
| 140 | "ctx_rx_rds_ring_prim_success", | 141 | "ctx_rx_rds_ring_prim_success", |
| 141 | "ctx_num_lro_flows_added", | 142 | "ctx_num_lro_flows_added", |
| 142 | "ctx_num_lro_flows_removed", | 143 | "ctx_num_lro_flows_removed", |
| @@ -251,6 +252,18 @@ static int | |||
| 251 | qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | 252 | qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) |
| 252 | { | 253 | { |
| 253 | struct qlcnic_adapter *adapter = netdev_priv(dev); | 254 | struct qlcnic_adapter *adapter = netdev_priv(dev); |
| 255 | |||
| 256 | if (qlcnic_82xx_check(adapter)) | ||
| 257 | return qlcnic_82xx_get_settings(adapter, ecmd); | ||
| 258 | else if (qlcnic_83xx_check(adapter)) | ||
| 259 | return qlcnic_83xx_get_settings(adapter, ecmd); | ||
| 260 | |||
| 261 | return -EIO; | ||
| 262 | } | ||
| 263 | |||
| 264 | int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, | ||
| 265 | struct ethtool_cmd *ecmd) | ||
| 266 | { | ||
| 254 | struct qlcnic_hardware_context *ahw = adapter->ahw; | 267 | struct qlcnic_hardware_context *ahw = adapter->ahw; |
| 255 | u32 speed, reg; | 268 | u32 speed, reg; |
| 256 | int check_sfp_module = 0; | 269 | int check_sfp_module = 0; |
| @@ -276,10 +289,7 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
| 276 | 289 | ||
| 277 | } else if (adapter->ahw->port_type == QLCNIC_XGBE) { | 290 | } else if (adapter->ahw->port_type == QLCNIC_XGBE) { |
| 278 | u32 val = 0; | 291 | u32 val = 0; |
| 279 | if (qlcnic_83xx_check(adapter)) | 292 | val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); |
| 280 | qlcnic_83xx_get_settings(adapter); | ||
| 281 | else | ||
| 282 | val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); | ||
| 283 | 293 | ||
| 284 | if (val == QLCNIC_PORT_MODE_802_3_AP) { | 294 | if (val == QLCNIC_PORT_MODE_802_3_AP) { |
| 285 | ecmd->supported = SUPPORTED_1000baseT_Full; | 295 | ecmd->supported = SUPPORTED_1000baseT_Full; |
| @@ -289,16 +299,13 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
| 289 | ecmd->advertising = ADVERTISED_10000baseT_Full; | 299 | ecmd->advertising = ADVERTISED_10000baseT_Full; |
| 290 | } | 300 | } |
| 291 | 301 | ||
| 292 | if (netif_running(dev) && adapter->ahw->has_link_events) { | 302 | if (netif_running(adapter->netdev) && ahw->has_link_events) { |
| 293 | if (qlcnic_82xx_check(adapter)) { | 303 | reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn)); |
| 294 | reg = QLCRD32(adapter, | 304 | speed = P3P_LINK_SPEED_VAL(pcifn, reg); |
| 295 | P3P_LINK_SPEED_REG(pcifn)); | 305 | ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; |
| 296 | speed = P3P_LINK_SPEED_VAL(pcifn, reg); | 306 | ethtool_cmd_speed_set(ecmd, ahw->link_speed); |
| 297 | ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; | 307 | ecmd->autoneg = ahw->link_autoneg; |
| 298 | } | 308 | ecmd->duplex = ahw->link_duplex; |
| 299 | ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed); | ||
| 300 | ecmd->autoneg = adapter->ahw->link_autoneg; | ||
| 301 | ecmd->duplex = adapter->ahw->link_duplex; | ||
| 302 | goto skip; | 309 | goto skip; |
| 303 | } | 310 | } |
| 304 | 311 | ||
| @@ -340,8 +347,8 @@ skip: | |||
| 340 | case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: | 347 | case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: |
| 341 | ecmd->advertising |= ADVERTISED_TP; | 348 | ecmd->advertising |= ADVERTISED_TP; |
| 342 | ecmd->supported |= SUPPORTED_TP; | 349 | ecmd->supported |= SUPPORTED_TP; |
| 343 | check_sfp_module = netif_running(dev) && | 350 | check_sfp_module = netif_running(adapter->netdev) && |
| 344 | adapter->ahw->has_link_events; | 351 | ahw->has_link_events; |
| 345 | case QLCNIC_BRDTYPE_P3P_10G_XFP: | 352 | case QLCNIC_BRDTYPE_P3P_10G_XFP: |
| 346 | ecmd->supported |= SUPPORTED_FIBRE; | 353 | ecmd->supported |= SUPPORTED_FIBRE; |
| 347 | ecmd->advertising |= ADVERTISED_FIBRE; | 354 | ecmd->advertising |= ADVERTISED_FIBRE; |
| @@ -355,8 +362,8 @@ skip: | |||
| 355 | ecmd->advertising |= | 362 | ecmd->advertising |= |
| 356 | (ADVERTISED_FIBRE | ADVERTISED_TP); | 363 | (ADVERTISED_FIBRE | ADVERTISED_TP); |
| 357 | ecmd->port = PORT_FIBRE; | 364 | ecmd->port = PORT_FIBRE; |
| 358 | check_sfp_module = netif_running(dev) && | 365 | check_sfp_module = netif_running(adapter->netdev) && |
| 359 | adapter->ahw->has_link_events; | 366 | ahw->has_link_events; |
| 360 | } else { | 367 | } else { |
| 361 | ecmd->autoneg = AUTONEG_ENABLE; | 368 | ecmd->autoneg = AUTONEG_ENABLE; |
| 362 | ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); | 369 | ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); |
| @@ -365,13 +372,6 @@ skip: | |||
| 365 | ecmd->port = PORT_TP; | 372 | ecmd->port = PORT_TP; |
| 366 | } | 373 | } |
| 367 | break; | 374 | break; |
| 368 | case QLCNIC_BRDTYPE_83XX_10G: | ||
| 369 | ecmd->autoneg = AUTONEG_DISABLE; | ||
| 370 | ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); | ||
| 371 | ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP); | ||
| 372 | ecmd->port = PORT_FIBRE; | ||
| 373 | check_sfp_module = netif_running(dev) && ahw->has_link_events; | ||
| 374 | break; | ||
| 375 | default: | 375 | default: |
| 376 | dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", | 376 | dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", |
| 377 | adapter->ahw->board_type); | 377 | adapter->ahw->board_type); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index 95b1b5732838..b6818f4356b9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h | |||
| @@ -134,7 +134,7 @@ struct qlcnic_mailbox_metadata { | |||
| 134 | 134 | ||
| 135 | #define QLCNIC_SET_OWNER 1 | 135 | #define QLCNIC_SET_OWNER 1 |
| 136 | #define QLCNIC_CLR_OWNER 0 | 136 | #define QLCNIC_CLR_OWNER 0 |
| 137 | #define QLCNIC_MBX_TIMEOUT 10000 | 137 | #define QLCNIC_MBX_TIMEOUT 5000 |
| 138 | 138 | ||
| 139 | #define QLCNIC_MBX_RSP_OK 1 | 139 | #define QLCNIC_MBX_RSP_OK 1 |
| 140 | #define QLCNIC_MBX_PORT_RSP_OK 0x1a | 140 | #define QLCNIC_MBX_PORT_RSP_OK 0x1a |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 264d5a4f8153..8fb836d4129f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
| @@ -37,24 +37,24 @@ MODULE_PARM_DESC(qlcnic_mac_learn, | |||
| 37 | "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)"); | 37 | "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)"); |
| 38 | 38 | ||
| 39 | int qlcnic_use_msi = 1; | 39 | int qlcnic_use_msi = 1; |
| 40 | MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); | 40 | MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled)"); |
| 41 | module_param_named(use_msi, qlcnic_use_msi, int, 0444); | 41 | module_param_named(use_msi, qlcnic_use_msi, int, 0444); |
| 42 | 42 | ||
| 43 | int qlcnic_use_msi_x = 1; | 43 | int qlcnic_use_msi_x = 1; |
| 44 | MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); | 44 | MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled)"); |
| 45 | module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444); | 45 | module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444); |
| 46 | 46 | ||
| 47 | int qlcnic_auto_fw_reset = 1; | 47 | int qlcnic_auto_fw_reset = 1; |
| 48 | MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); | 48 | MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)"); |
| 49 | module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644); | 49 | module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644); |
| 50 | 50 | ||
| 51 | int qlcnic_load_fw_file; | 51 | int qlcnic_load_fw_file; |
| 52 | MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); | 52 | MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)"); |
| 53 | module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); | 53 | module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); |
| 54 | 54 | ||
| 55 | int qlcnic_config_npars; | 55 | int qlcnic_config_npars; |
| 56 | module_param(qlcnic_config_npars, int, 0444); | 56 | module_param(qlcnic_config_npars, int, 0444); |
| 57 | MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); | 57 | MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled)"); |
| 58 | 58 | ||
| 59 | static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); | 59 | static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); |
| 60 | static void qlcnic_remove(struct pci_dev *pdev); | 60 | static void qlcnic_remove(struct pci_dev *pdev); |
| @@ -308,6 +308,23 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) | |||
| 308 | return 0; | 308 | return 0; |
| 309 | } | 309 | } |
| 310 | 310 | ||
| 311 | static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter) | ||
| 312 | { | ||
| 313 | struct qlcnic_mac_list_s *cur; | ||
| 314 | struct list_head *head; | ||
| 315 | |||
| 316 | list_for_each(head, &adapter->mac_list) { | ||
| 317 | cur = list_entry(head, struct qlcnic_mac_list_s, list); | ||
| 318 | if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) { | ||
| 319 | qlcnic_sre_macaddr_change(adapter, cur->mac_addr, | ||
| 320 | 0, QLCNIC_MAC_DEL); | ||
| 321 | list_del(&cur->list); | ||
| 322 | kfree(cur); | ||
| 323 | return; | ||
| 324 | } | ||
| 325 | } | ||
| 326 | } | ||
| 327 | |||
| 311 | static int qlcnic_set_mac(struct net_device *netdev, void *p) | 328 | static int qlcnic_set_mac(struct net_device *netdev, void *p) |
| 312 | { | 329 | { |
| 313 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 330 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
| @@ -322,11 +339,15 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p) | |||
| 322 | if (!is_valid_ether_addr(addr->sa_data)) | 339 | if (!is_valid_ether_addr(addr->sa_data)) |
| 323 | return -EINVAL; | 340 | return -EINVAL; |
| 324 | 341 | ||
| 342 | if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN)) | ||
| 343 | return 0; | ||
| 344 | |||
| 325 | if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { | 345 | if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { |
| 326 | netif_device_detach(netdev); | 346 | netif_device_detach(netdev); |
| 327 | qlcnic_napi_disable(adapter); | 347 | qlcnic_napi_disable(adapter); |
| 328 | } | 348 | } |
| 329 | 349 | ||
| 350 | qlcnic_delete_adapter_mac(adapter); | ||
| 330 | memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); | 351 | memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); |
| 331 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 352 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
| 332 | qlcnic_set_multi(adapter->netdev); | 353 | qlcnic_set_multi(adapter->netdev); |
| @@ -2481,12 +2502,17 @@ static void qlcnic_tx_timeout(struct net_device *netdev) | |||
| 2481 | if (test_bit(__QLCNIC_RESETTING, &adapter->state)) | 2502 | if (test_bit(__QLCNIC_RESETTING, &adapter->state)) |
| 2482 | return; | 2503 | return; |
| 2483 | 2504 | ||
| 2484 | dev_err(&netdev->dev, "transmit timeout, resetting.\n"); | 2505 | if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) { |
| 2485 | 2506 | netdev_info(netdev, "Tx timeout, reset the adapter.\n"); | |
| 2486 | if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) | 2507 | if (qlcnic_82xx_check(adapter)) |
| 2487 | adapter->need_fw_reset = 1; | 2508 | adapter->need_fw_reset = 1; |
| 2488 | else | 2509 | else if (qlcnic_83xx_check(adapter)) |
| 2510 | qlcnic_83xx_idc_request_reset(adapter, | ||
| 2511 | QLCNIC_FORCE_FW_DUMP_KEY); | ||
| 2512 | } else { | ||
| 2513 | netdev_info(netdev, "Tx timeout, reset adapter context.\n"); | ||
| 2489 | adapter->ahw->reset_context = 1; | 2514 | adapter->ahw->reset_context = 1; |
| 2515 | } | ||
| 2490 | } | 2516 | } |
| 2491 | 2517 | ||
| 2492 | static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) | 2518 | static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 44d547d78b84..3869c3864deb 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | |||
| @@ -280,9 +280,9 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) | |||
| 280 | static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, | 280 | static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, |
| 281 | u32 *pay, u8 pci_func, u8 size) | 281 | u32 *pay, u8 pci_func, u8 size) |
| 282 | { | 282 | { |
| 283 | u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0; | ||
| 283 | struct qlcnic_hardware_context *ahw = adapter->ahw; | 284 | struct qlcnic_hardware_context *ahw = adapter->ahw; |
| 284 | unsigned long flags; | 285 | unsigned long flags; |
| 285 | u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val; | ||
| 286 | u16 opcode; | 286 | u16 opcode; |
| 287 | u8 mbx_err_code; | 287 | u8 mbx_err_code; |
| 288 | int i, j; | 288 | int i, j; |
| @@ -330,15 +330,13 @@ static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, | |||
| 330 | * assume something is wrong. | 330 | * assume something is wrong. |
| 331 | */ | 331 | */ |
| 332 | poll: | 332 | poll: |
| 333 | rsp = qlcnic_83xx_mbx_poll(adapter); | 333 | rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time); |
| 334 | if (rsp != QLCNIC_RCODE_TIMEOUT) { | 334 | if (rsp != QLCNIC_RCODE_TIMEOUT) { |
| 335 | /* Get the FW response data */ | 335 | /* Get the FW response data */ |
| 336 | fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); | 336 | fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); |
| 337 | if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { | 337 | if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { |
| 338 | __qlcnic_83xx_process_aen(adapter); | 338 | __qlcnic_83xx_process_aen(adapter); |
| 339 | mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); | 339 | goto poll; |
| 340 | if (mbx_val) | ||
| 341 | goto poll; | ||
| 342 | } | 340 | } |
| 343 | mbx_err_code = QLCNIC_MBX_STATUS(fw_data); | 341 | mbx_err_code = QLCNIC_MBX_STATUS(fw_data); |
| 344 | rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); | 342 | rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index c81be2da119b..1a66ccded235 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | |||
| @@ -1133,9 +1133,6 @@ static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf, | |||
| 1133 | if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id) | 1133 | if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id) |
| 1134 | return -EINVAL; | 1134 | return -EINVAL; |
| 1135 | 1135 | ||
| 1136 | if (!(cmd->req.arg[1] & BIT_8)) | ||
| 1137 | return -EINVAL; | ||
| 1138 | |||
| 1139 | return 0; | 1136 | return 0; |
| 1140 | } | 1137 | } |
| 1141 | 1138 | ||
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 87463bc701a6..50235d201592 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
| @@ -1106,6 +1106,7 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, | |||
| 1106 | if (pci_dma_mapping_error(qdev->pdev, map)) { | 1106 | if (pci_dma_mapping_error(qdev->pdev, map)) { |
| 1107 | __free_pages(rx_ring->pg_chunk.page, | 1107 | __free_pages(rx_ring->pg_chunk.page, |
| 1108 | qdev->lbq_buf_order); | 1108 | qdev->lbq_buf_order); |
| 1109 | rx_ring->pg_chunk.page = NULL; | ||
| 1109 | netif_err(qdev, drv, qdev->ndev, | 1110 | netif_err(qdev, drv, qdev->ndev, |
| 1110 | "PCI mapping failed.\n"); | 1111 | "PCI mapping failed.\n"); |
| 1111 | return -ENOMEM; | 1112 | return -ENOMEM; |
| @@ -2777,6 +2778,12 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring | |||
| 2777 | curr_idx = 0; | 2778 | curr_idx = 0; |
| 2778 | 2779 | ||
| 2779 | } | 2780 | } |
| 2781 | if (rx_ring->pg_chunk.page) { | ||
| 2782 | pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map, | ||
| 2783 | ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); | ||
| 2784 | put_page(rx_ring->pg_chunk.page); | ||
| 2785 | rx_ring->pg_chunk.page = NULL; | ||
| 2786 | } | ||
| 2780 | } | 2787 | } |
| 2781 | 2788 | ||
| 2782 | static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) | 2789 | static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 33dc6f2418f2..42e9dd05c936 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
| @@ -2745,11 +2745,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
| 2745 | if (mdp->cd->tsu) { | 2745 | if (mdp->cd->tsu) { |
| 2746 | struct resource *rtsu; | 2746 | struct resource *rtsu; |
| 2747 | rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 2747 | rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
| 2748 | if (!rtsu) { | ||
| 2749 | dev_err(&pdev->dev, "Not found TSU resource\n"); | ||
| 2750 | ret = -ENODEV; | ||
| 2751 | goto out_release; | ||
| 2752 | } | ||
| 2753 | mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); | 2748 | mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); |
| 2754 | if (IS_ERR(mdp->tsu_addr)) { | 2749 | if (IS_ERR(mdp->tsu_addr)) { |
| 2755 | ret = PTR_ERR(mdp->tsu_addr); | 2750 | ret = PTR_ERR(mdp->tsu_addr); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index f695a50bac47..43c1f3223322 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config STMMAC_ETH | 1 | config STMMAC_ETH |
| 2 | tristate "STMicroelectronics 10/100/1000 Ethernet driver" | 2 | tristate "STMicroelectronics 10/100/1000 Ethernet driver" |
| 3 | depends on HAS_IOMEM | 3 | depends on HAS_IOMEM && HAS_DMA |
| 4 | select NET_CORE | 4 | select NET_CORE |
| 5 | select MII | 5 | select MII |
| 6 | select PHYLIB | 6 | select PHYLIB |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d5a141c7c4e7..1c502bb0c916 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
| @@ -229,7 +229,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) | |||
| 229 | } | 229 | } |
| 230 | 230 | ||
| 231 | if (port->passthru) | 231 | if (port->passthru) |
| 232 | vlan = list_first_entry(&port->vlans, struct macvlan_dev, list); | 232 | vlan = list_first_or_null_rcu(&port->vlans, |
| 233 | struct macvlan_dev, list); | ||
| 233 | else | 234 | else |
| 234 | vlan = macvlan_hash_lookup(port, eth->h_dest); | 235 | vlan = macvlan_hash_lookup(port, eth->h_dest); |
| 235 | if (vlan == NULL) | 236 | if (vlan == NULL) |
| @@ -814,7 +815,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
| 814 | if (err < 0) | 815 | if (err < 0) |
| 815 | goto upper_dev_unlink; | 816 | goto upper_dev_unlink; |
| 816 | 817 | ||
| 817 | list_add_tail(&vlan->list, &port->vlans); | 818 | list_add_tail_rcu(&vlan->list, &port->vlans); |
| 818 | netif_stacked_transfer_operstate(lowerdev, dev); | 819 | netif_stacked_transfer_operstate(lowerdev, dev); |
| 819 | 820 | ||
| 820 | return 0; | 821 | return 0; |
| @@ -842,7 +843,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head) | |||
| 842 | { | 843 | { |
| 843 | struct macvlan_dev *vlan = netdev_priv(dev); | 844 | struct macvlan_dev *vlan = netdev_priv(dev); |
| 844 | 845 | ||
| 845 | list_del(&vlan->list); | 846 | list_del_rcu(&vlan->list); |
| 846 | unregister_netdevice_queue(dev, head); | 847 | unregister_netdevice_queue(dev, head); |
| 847 | netdev_upper_dev_unlink(vlan->lowerdev, dev); | 848 | netdev_upper_dev_unlink(vlan->lowerdev, dev); |
| 848 | } | 849 | } |
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index ed947dd76fbd..f3cdf64997d6 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c | |||
| @@ -375,6 +375,8 @@ static void ntb_netdev_remove(struct pci_dev *pdev) | |||
| 375 | if (dev == NULL) | 375 | if (dev == NULL) |
| 376 | return; | 376 | return; |
| 377 | 377 | ||
| 378 | list_del(&dev->list); | ||
| 379 | |||
| 378 | ndev = dev->ndev; | 380 | ndev = dev->ndev; |
| 379 | 381 | ||
| 380 | unregister_netdev(ndev); | 382 | unregister_netdev(ndev); |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 3c23fdc27bf0..655bb25eed2b 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -28,7 +28,7 @@ | |||
| 28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 29 | #include <linux/cpu.h> | 29 | #include <linux/cpu.h> |
| 30 | 30 | ||
| 31 | static int napi_weight = 128; | 31 | static int napi_weight = NAPI_POLL_WEIGHT; |
| 32 | module_param(napi_weight, int, 0444); | 32 | module_param(napi_weight, int, 0444); |
| 33 | 33 | ||
| 34 | static bool csum = true, gso = true; | 34 | static bool csum = true, gso = true; |
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 9b20d9ee2719..7f702fe3ecc2 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c | |||
| @@ -2369,6 +2369,9 @@ ath5k_tx_complete_poll_work(struct work_struct *work) | |||
| 2369 | int i; | 2369 | int i; |
| 2370 | bool needreset = false; | 2370 | bool needreset = false; |
| 2371 | 2371 | ||
| 2372 | if (!test_bit(ATH_STAT_STARTED, ah->status)) | ||
| 2373 | return; | ||
| 2374 | |||
| 2372 | mutex_lock(&ah->lock); | 2375 | mutex_lock(&ah->lock); |
| 2373 | 2376 | ||
| 2374 | for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { | 2377 | for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { |
| @@ -2676,6 +2679,7 @@ done: | |||
| 2676 | mmiowb(); | 2679 | mmiowb(); |
| 2677 | mutex_unlock(&ah->lock); | 2680 | mutex_unlock(&ah->lock); |
| 2678 | 2681 | ||
| 2682 | set_bit(ATH_STAT_STARTED, ah->status); | ||
| 2679 | ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work, | 2683 | ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work, |
| 2680 | msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); | 2684 | msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); |
| 2681 | 2685 | ||
| @@ -2737,6 +2741,7 @@ void ath5k_stop(struct ieee80211_hw *hw) | |||
| 2737 | 2741 | ||
| 2738 | ath5k_stop_tasklets(ah); | 2742 | ath5k_stop_tasklets(ah); |
| 2739 | 2743 | ||
| 2744 | clear_bit(ATH_STAT_STARTED, ah->status); | ||
| 2740 | cancel_delayed_work_sync(&ah->tx_complete_work); | 2745 | cancel_delayed_work_sync(&ah->tx_complete_work); |
| 2741 | 2746 | ||
| 2742 | if (!ath5k_modparam_no_hw_rfkill_switch) | 2747 | if (!ath5k_modparam_no_hw_rfkill_switch) |
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index 17507dc8a1e7..f3dc124c60c7 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig | |||
| @@ -17,7 +17,7 @@ config ATH9K_BTCOEX_SUPPORT | |||
| 17 | 17 | ||
| 18 | config ATH9K | 18 | config ATH9K |
| 19 | tristate "Atheros 802.11n wireless cards support" | 19 | tristate "Atheros 802.11n wireless cards support" |
| 20 | depends on MAC80211 | 20 | depends on MAC80211 && HAS_DMA |
| 21 | select ATH9K_HW | 21 | select ATH9K_HW |
| 22 | select MAC80211_LEDS | 22 | select MAC80211_LEDS |
| 23 | select LEDS_CLASS | 23 | select LEDS_CLASS |
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h index 0c2ac0c6dc89..e85a8b076c22 100644 --- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h | |||
| @@ -233,9 +233,9 @@ static const u32 ar9565_1p0_baseband_core[][2] = { | |||
| 233 | {0x00009d10, 0x01834061}, | 233 | {0x00009d10, 0x01834061}, |
| 234 | {0x00009d14, 0x00c00400}, | 234 | {0x00009d14, 0x00c00400}, |
| 235 | {0x00009d18, 0x00000000}, | 235 | {0x00009d18, 0x00000000}, |
| 236 | {0x00009e08, 0x0078230c}, | 236 | {0x00009e08, 0x0038230c}, |
| 237 | {0x00009e24, 0x990bb515}, | 237 | {0x00009e24, 0x9907b515}, |
| 238 | {0x00009e28, 0x126f0000}, | 238 | {0x00009e28, 0x126f0600}, |
| 239 | {0x00009e30, 0x06336f77}, | 239 | {0x00009e30, 0x06336f77}, |
| 240 | {0x00009e34, 0x6af6532f}, | 240 | {0x00009e34, 0x6af6532f}, |
| 241 | {0x00009e38, 0x0cc80c00}, | 241 | {0x00009e38, 0x0cc80c00}, |
| @@ -337,7 +337,7 @@ static const u32 ar9565_1p0_baseband_core[][2] = { | |||
| 337 | 337 | ||
| 338 | static const u32 ar9565_1p0_baseband_postamble[][5] = { | 338 | static const u32 ar9565_1p0_baseband_postamble[][5] = { |
| 339 | /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ | 339 | /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ |
| 340 | {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a800d}, | 340 | {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8009}, |
| 341 | {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae}, | 341 | {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae}, |
| 342 | {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da}, | 342 | {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da}, |
| 343 | {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81}, | 343 | {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81}, |
| @@ -345,9 +345,9 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = { | |||
| 345 | {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c}, | 345 | {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c}, |
| 346 | {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4}, | 346 | {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4}, |
| 347 | {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0}, | 347 | {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0}, |
| 348 | {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020}, | 348 | {0x00009e04, 0x00802020, 0x00802020, 0x00142020, 0x00142020}, |
| 349 | {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, | 349 | {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, |
| 350 | {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, | 350 | {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e}, |
| 351 | {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e}, | 351 | {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e}, |
| 352 | {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, | 352 | {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, |
| 353 | {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, | 353 | {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, |
| @@ -450,6 +450,8 @@ static const u32 ar9565_1p0_soc_postamble[][5] = { | |||
| 450 | 450 | ||
| 451 | static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { | 451 | static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { |
| 452 | /* Addr allmodes */ | 452 | /* Addr allmodes */ |
| 453 | {0x00004050, 0x00300300}, | ||
| 454 | {0x0000406c, 0x00100000}, | ||
| 453 | {0x0000a000, 0x00010000}, | 455 | {0x0000a000, 0x00010000}, |
| 454 | {0x0000a004, 0x00030002}, | 456 | {0x0000a004, 0x00030002}, |
| 455 | {0x0000a008, 0x00050004}, | 457 | {0x0000a008, 0x00050004}, |
| @@ -498,27 +500,27 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { | |||
| 498 | {0x0000a0b4, 0x00000000}, | 500 | {0x0000a0b4, 0x00000000}, |
| 499 | {0x0000a0b8, 0x00000000}, | 501 | {0x0000a0b8, 0x00000000}, |
| 500 | {0x0000a0bc, 0x00000000}, | 502 | {0x0000a0bc, 0x00000000}, |
| 501 | {0x0000a0c0, 0x001f0000}, | 503 | {0x0000a0c0, 0x00bf00a0}, |
| 502 | {0x0000a0c4, 0x01000101}, | 504 | {0x0000a0c4, 0x11a011a1}, |
| 503 | {0x0000a0c8, 0x011e011f}, | 505 | {0x0000a0c8, 0x11be11bf}, |
| 504 | {0x0000a0cc, 0x011c011d}, | 506 | {0x0000a0cc, 0x11bc11bd}, |
| 505 | {0x0000a0d0, 0x02030204}, | 507 | {0x0000a0d0, 0x22632264}, |
| 506 | {0x0000a0d4, 0x02010202}, | 508 | {0x0000a0d4, 0x22612262}, |
| 507 | {0x0000a0d8, 0x021f0200}, | 509 | {0x0000a0d8, 0x227f2260}, |
| 508 | {0x0000a0dc, 0x0302021e}, | 510 | {0x0000a0dc, 0x4322227e}, |
| 509 | {0x0000a0e0, 0x03000301}, | 511 | {0x0000a0e0, 0x43204321}, |
| 510 | {0x0000a0e4, 0x031e031f}, | 512 | {0x0000a0e4, 0x433e433f}, |
| 511 | {0x0000a0e8, 0x0402031d}, | 513 | {0x0000a0e8, 0x4462433d}, |
| 512 | {0x0000a0ec, 0x04000401}, | 514 | {0x0000a0ec, 0x44604461}, |
| 513 | {0x0000a0f0, 0x041e041f}, | 515 | {0x0000a0f0, 0x447e447f}, |
| 514 | {0x0000a0f4, 0x0502041d}, | 516 | {0x0000a0f4, 0x5582447d}, |
| 515 | {0x0000a0f8, 0x05000501}, | 517 | {0x0000a0f8, 0x55805581}, |
| 516 | {0x0000a0fc, 0x051e051f}, | 518 | {0x0000a0fc, 0x559e559f}, |
| 517 | {0x0000a100, 0x06010602}, | 519 | {0x0000a100, 0x66816682}, |
| 518 | {0x0000a104, 0x061f0600}, | 520 | {0x0000a104, 0x669f6680}, |
| 519 | {0x0000a108, 0x061d061e}, | 521 | {0x0000a108, 0x669d669e}, |
| 520 | {0x0000a10c, 0x07020703}, | 522 | {0x0000a10c, 0x77627763}, |
| 521 | {0x0000a110, 0x07000701}, | 523 | {0x0000a110, 0x77607761}, |
| 522 | {0x0000a114, 0x00000000}, | 524 | {0x0000a114, 0x00000000}, |
| 523 | {0x0000a118, 0x00000000}, | 525 | {0x0000a118, 0x00000000}, |
| 524 | {0x0000a11c, 0x00000000}, | 526 | {0x0000a11c, 0x00000000}, |
| @@ -530,27 +532,27 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { | |||
| 530 | {0x0000a134, 0x00000000}, | 532 | {0x0000a134, 0x00000000}, |
| 531 | {0x0000a138, 0x00000000}, | 533 | {0x0000a138, 0x00000000}, |
| 532 | {0x0000a13c, 0x00000000}, | 534 | {0x0000a13c, 0x00000000}, |
| 533 | {0x0000a140, 0x001f0000}, | 535 | {0x0000a140, 0x00bf00a0}, |
| 534 | {0x0000a144, 0x01000101}, | 536 | {0x0000a144, 0x11a011a1}, |
| 535 | {0x0000a148, 0x011e011f}, | 537 | {0x0000a148, 0x11be11bf}, |
| 536 | {0x0000a14c, 0x011c011d}, | 538 | {0x0000a14c, 0x11bc11bd}, |
| 537 | {0x0000a150, 0x02030204}, | 539 | {0x0000a150, 0x22632264}, |
| 538 | {0x0000a154, 0x02010202}, | 540 | {0x0000a154, 0x22612262}, |
| 539 | {0x0000a158, 0x021f0200}, | 541 | {0x0000a158, 0x227f2260}, |
| 540 | {0x0000a15c, 0x0302021e}, | 542 | {0x0000a15c, 0x4322227e}, |
| 541 | {0x0000a160, 0x03000301}, | 543 | {0x0000a160, 0x43204321}, |
| 542 | {0x0000a164, 0x031e031f}, | 544 | {0x0000a164, 0x433e433f}, |
| 543 | {0x0000a168, 0x0402031d}, | 545 | {0x0000a168, 0x4462433d}, |
| 544 | {0x0000a16c, 0x04000401}, | 546 | {0x0000a16c, 0x44604461}, |
| 545 | {0x0000a170, 0x041e041f}, | 547 | {0x0000a170, 0x447e447f}, |
| 546 | {0x0000a174, 0x0502041d}, | 548 | {0x0000a174, 0x5582447d}, |
| 547 | {0x0000a178, 0x05000501}, | 549 | {0x0000a178, 0x55805581}, |
| 548 | {0x0000a17c, 0x051e051f}, | 550 | {0x0000a17c, 0x559e559f}, |
| 549 | {0x0000a180, 0x06010602}, | 551 | {0x0000a180, 0x66816682}, |
| 550 | {0x0000a184, 0x061f0600}, | 552 | {0x0000a184, 0x669f6680}, |
| 551 | {0x0000a188, 0x061d061e}, | 553 | {0x0000a188, 0x669d669e}, |
| 552 | {0x0000a18c, 0x07020703}, | 554 | {0x0000a18c, 0x77e677e7}, |
| 553 | {0x0000a190, 0x07000701}, | 555 | {0x0000a190, 0x77e477e5}, |
| 554 | {0x0000a194, 0x00000000}, | 556 | {0x0000a194, 0x00000000}, |
| 555 | {0x0000a198, 0x00000000}, | 557 | {0x0000a198, 0x00000000}, |
| 556 | {0x0000a19c, 0x00000000}, | 558 | {0x0000a19c, 0x00000000}, |
| @@ -770,7 +772,7 @@ static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = { | |||
| 770 | 772 | ||
| 771 | static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = { | 773 | static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = { |
| 772 | /* Addr allmodes */ | 774 | /* Addr allmodes */ |
| 773 | {0x00018c00, 0x18213ede}, | 775 | {0x00018c00, 0x18212ede}, |
| 774 | {0x00018c04, 0x000801d8}, | 776 | {0x00018c04, 0x000801d8}, |
| 775 | {0x00018c08, 0x0003780c}, | 777 | {0x00018c08, 0x0003780c}, |
| 776 | }; | 778 | }; |
| @@ -889,8 +891,8 @@ static const u32 ar9565_1p0_common_wo_xlna_rx_gain_table[][2] = { | |||
| 889 | {0x0000a180, 0x66816682}, | 891 | {0x0000a180, 0x66816682}, |
| 890 | {0x0000a184, 0x669f6680}, | 892 | {0x0000a184, 0x669f6680}, |
| 891 | {0x0000a188, 0x669d669e}, | 893 | {0x0000a188, 0x669d669e}, |
| 892 | {0x0000a18c, 0x77627763}, | 894 | {0x0000a18c, 0x77e677e7}, |
| 893 | {0x0000a190, 0x77607761}, | 895 | {0x0000a190, 0x77e477e5}, |
| 894 | {0x0000a194, 0x00000000}, | 896 | {0x0000a194, 0x00000000}, |
| 895 | {0x0000a198, 0x00000000}, | 897 | {0x0000a198, 0x00000000}, |
| 896 | {0x0000a19c, 0x00000000}, | 898 | {0x0000a19c, 0x00000000}, |
| @@ -1114,7 +1116,7 @@ static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = { | |||
| 1114 | {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, | 1116 | {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, |
| 1115 | {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, | 1117 | {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, |
| 1116 | {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, | 1118 | {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, |
| 1117 | {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, | 1119 | {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df}, |
| 1118 | {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, | 1120 | {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, |
| 1119 | {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, | 1121 | {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, |
| 1120 | {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004}, | 1122 | {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004}, |
| @@ -1140,13 +1142,13 @@ static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = { | |||
| 1140 | {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5}, | 1142 | {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5}, |
| 1141 | {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9}, | 1143 | {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9}, |
| 1142 | {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb}, | 1144 | {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb}, |
| 1143 | {0x0000a564, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, | 1145 | {0x0000a564, 0x7804ff56, 0x7804ff56, 0x60001cf0, 0x60001cf0}, |
| 1144 | {0x0000a568, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, | 1146 | {0x0000a568, 0x7804ff56, 0x7804ff56, 0x61001cf1, 0x61001cf1}, |
| 1145 | {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, | 1147 | {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x62001cf2, 0x62001cf2}, |
| 1146 | {0x0000a570, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, | 1148 | {0x0000a570, 0x7804ff56, 0x7804ff56, 0x63001cf3, 0x63001cf3}, |
| 1147 | {0x0000a574, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, | 1149 | {0x0000a574, 0x7804ff56, 0x7804ff56, 0x64001cf4, 0x64001cf4}, |
| 1148 | {0x0000a578, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, | 1150 | {0x0000a578, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6}, |
| 1149 | {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, | 1151 | {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6}, |
| 1150 | {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, | 1152 | {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, |
| 1151 | {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, | 1153 | {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, |
| 1152 | {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, | 1154 | {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, |
| @@ -1174,7 +1176,7 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = { | |||
| 1174 | {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, | 1176 | {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, |
| 1175 | {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, | 1177 | {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, |
| 1176 | {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, | 1178 | {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, |
| 1177 | {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, | 1179 | {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df}, |
| 1178 | {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, | 1180 | {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, |
| 1179 | {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, | 1181 | {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, |
| 1180 | {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004}, | 1182 | {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004}, |
| @@ -1200,13 +1202,13 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = { | |||
| 1200 | {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, | 1202 | {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, |
| 1201 | {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, | 1203 | {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, |
| 1202 | {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb}, | 1204 | {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb}, |
| 1203 | {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, | 1205 | {0x0000a564, 0x7504ff56, 0x7504ff56, 0x59001cf0, 0x59001cf0}, |
| 1204 | {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, | 1206 | {0x0000a568, 0x7504ff56, 0x7504ff56, 0x5a001cf1, 0x5a001cf1}, |
| 1205 | {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, | 1207 | {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x5b001cf2, 0x5b001cf2}, |
| 1206 | {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, | 1208 | {0x0000a570, 0x7504ff56, 0x7504ff56, 0x5c001cf3, 0x5c001cf3}, |
| 1207 | {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, | 1209 | {0x0000a574, 0x7504ff56, 0x7504ff56, 0x5d001cf4, 0x5d001cf4}, |
| 1208 | {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, | 1210 | {0x0000a578, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6}, |
| 1209 | {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, | 1211 | {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6}, |
| 1210 | {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, | 1212 | {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, |
| 1211 | {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, | 1213 | {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, |
| 1212 | {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, | 1214 | {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 6963862a1872..a18414b5948b 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
| @@ -227,13 +227,13 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start) | |||
| 227 | if (!test_bit(SC_OP_BEACONS, &sc->sc_flags)) | 227 | if (!test_bit(SC_OP_BEACONS, &sc->sc_flags)) |
| 228 | goto work; | 228 | goto work; |
| 229 | 229 | ||
| 230 | ath9k_set_beacon(sc); | ||
| 231 | |||
| 232 | if (ah->opmode == NL80211_IFTYPE_STATION && | 230 | if (ah->opmode == NL80211_IFTYPE_STATION && |
| 233 | test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) { | 231 | test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) { |
| 234 | spin_lock_irqsave(&sc->sc_pm_lock, flags); | 232 | spin_lock_irqsave(&sc->sc_pm_lock, flags); |
| 235 | sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; | 233 | sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; |
| 236 | spin_unlock_irqrestore(&sc->sc_pm_lock, flags); | 234 | spin_unlock_irqrestore(&sc->sc_pm_lock, flags); |
| 235 | } else { | ||
| 236 | ath9k_set_beacon(sc); | ||
| 237 | } | 237 | } |
| 238 | work: | 238 | work: |
| 239 | ath_restart_work(sc); | 239 | ath_restart_work(sc); |
| @@ -1332,6 +1332,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw, | |||
| 1332 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | 1332 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
| 1333 | struct ath_node *an = (struct ath_node *) sta->drv_priv; | 1333 | struct ath_node *an = (struct ath_node *) sta->drv_priv; |
| 1334 | struct ieee80211_key_conf ps_key = { }; | 1334 | struct ieee80211_key_conf ps_key = { }; |
| 1335 | int key; | ||
| 1335 | 1336 | ||
| 1336 | ath_node_attach(sc, sta, vif); | 1337 | ath_node_attach(sc, sta, vif); |
| 1337 | 1338 | ||
| @@ -1339,7 +1340,9 @@ static int ath9k_sta_add(struct ieee80211_hw *hw, | |||
| 1339 | vif->type != NL80211_IFTYPE_AP_VLAN) | 1340 | vif->type != NL80211_IFTYPE_AP_VLAN) |
| 1340 | return 0; | 1341 | return 0; |
| 1341 | 1342 | ||
| 1342 | an->ps_key = ath_key_config(common, vif, sta, &ps_key); | 1343 | key = ath_key_config(common, vif, sta, &ps_key); |
| 1344 | if (key > 0) | ||
| 1345 | an->ps_key = key; | ||
| 1343 | 1346 | ||
| 1344 | return 0; | 1347 | return 0; |
| 1345 | } | 1348 | } |
| @@ -1356,6 +1359,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc, | |||
| 1356 | return; | 1359 | return; |
| 1357 | 1360 | ||
| 1358 | ath_key_delete(common, &ps_key); | 1361 | ath_key_delete(common, &ps_key); |
| 1362 | an->ps_key = 0; | ||
| 1359 | } | 1363 | } |
| 1360 | 1364 | ||
| 1361 | static int ath9k_sta_remove(struct ieee80211_hw *hw, | 1365 | static int ath9k_sta_remove(struct ieee80211_hw *hw, |
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 523355b87659..f7c70b3a6ea9 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c | |||
| @@ -1728,6 +1728,25 @@ drop_recycle_buffer: | |||
| 1728 | sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); | 1728 | sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); |
| 1729 | } | 1729 | } |
| 1730 | 1730 | ||
| 1731 | void b43_dma_handle_rx_overflow(struct b43_dmaring *ring) | ||
| 1732 | { | ||
| 1733 | int current_slot, previous_slot; | ||
| 1734 | |||
| 1735 | B43_WARN_ON(ring->tx); | ||
| 1736 | |||
| 1737 | /* Device has filled all buffers, drop all packets and let TCP | ||
| 1738 | * decrease speed. | ||
| 1739 | * Decrement RX index by one will let the device to see all slots | ||
| 1740 | * as free again | ||
| 1741 | */ | ||
| 1742 | /* | ||
| 1743 | *TODO: How to increase rx_drop in mac80211? | ||
| 1744 | */ | ||
| 1745 | current_slot = ring->ops->get_current_rxslot(ring); | ||
| 1746 | previous_slot = prev_slot(ring, current_slot); | ||
| 1747 | ring->ops->set_current_rxslot(ring, previous_slot); | ||
| 1748 | } | ||
| 1749 | |||
| 1731 | void b43_dma_rx(struct b43_dmaring *ring) | 1750 | void b43_dma_rx(struct b43_dmaring *ring) |
| 1732 | { | 1751 | { |
| 1733 | const struct b43_dma_ops *ops = ring->ops; | 1752 | const struct b43_dma_ops *ops = ring->ops; |
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h index 9fdd1983079c..df8c8cdcbdb5 100644 --- a/drivers/net/wireless/b43/dma.h +++ b/drivers/net/wireless/b43/dma.h | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | /* DMA-Interrupt reasons. */ | 9 | /* DMA-Interrupt reasons. */ |
| 10 | #define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \ | 10 | #define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \ |
| 11 | | (1 << 14) | (1 << 15)) | 11 | | (1 << 14) | (1 << 15)) |
| 12 | #define B43_DMAIRQ_NONFATALMASK (1 << 13) | 12 | #define B43_DMAIRQ_RDESC_UFLOW (1 << 13) |
| 13 | #define B43_DMAIRQ_RX_DONE (1 << 16) | 13 | #define B43_DMAIRQ_RX_DONE (1 << 16) |
| 14 | 14 | ||
| 15 | /*** 32-bit DMA Engine. ***/ | 15 | /*** 32-bit DMA Engine. ***/ |
| @@ -295,6 +295,8 @@ int b43_dma_tx(struct b43_wldev *dev, | |||
| 295 | void b43_dma_handle_txstatus(struct b43_wldev *dev, | 295 | void b43_dma_handle_txstatus(struct b43_wldev *dev, |
| 296 | const struct b43_txstatus *status); | 296 | const struct b43_txstatus *status); |
| 297 | 297 | ||
| 298 | void b43_dma_handle_rx_overflow(struct b43_dmaring *ring); | ||
| 299 | |||
| 298 | void b43_dma_rx(struct b43_dmaring *ring); | 300 | void b43_dma_rx(struct b43_dmaring *ring); |
| 299 | 301 | ||
| 300 | void b43_dma_direct_fifo_rx(struct b43_wldev *dev, | 302 | void b43_dma_direct_fifo_rx(struct b43_wldev *dev, |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index d377f77d30b5..6dd07e2ec595 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
| @@ -1902,30 +1902,18 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev) | |||
| 1902 | } | 1902 | } |
| 1903 | } | 1903 | } |
| 1904 | 1904 | ||
| 1905 | if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK | | 1905 | if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) { |
| 1906 | B43_DMAIRQ_NONFATALMASK))) { | 1906 | b43err(dev->wl, |
| 1907 | if (merged_dma_reason & B43_DMAIRQ_FATALMASK) { | 1907 | "Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n", |
| 1908 | b43err(dev->wl, "Fatal DMA error: " | 1908 | dma_reason[0], dma_reason[1], |
| 1909 | "0x%08X, 0x%08X, 0x%08X, " | 1909 | dma_reason[2], dma_reason[3], |
| 1910 | "0x%08X, 0x%08X, 0x%08X\n", | 1910 | dma_reason[4], dma_reason[5]); |
| 1911 | dma_reason[0], dma_reason[1], | 1911 | b43err(dev->wl, "This device does not support DMA " |
| 1912 | dma_reason[2], dma_reason[3], | ||
| 1913 | dma_reason[4], dma_reason[5]); | ||
| 1914 | b43err(dev->wl, "This device does not support DMA " | ||
| 1915 | "on your system. It will now be switched to PIO.\n"); | 1912 | "on your system. It will now be switched to PIO.\n"); |
| 1916 | /* Fall back to PIO transfers if we get fatal DMA errors! */ | 1913 | /* Fall back to PIO transfers if we get fatal DMA errors! */ |
| 1917 | dev->use_pio = true; | 1914 | dev->use_pio = true; |
| 1918 | b43_controller_restart(dev, "DMA error"); | 1915 | b43_controller_restart(dev, "DMA error"); |
| 1919 | return; | 1916 | return; |
| 1920 | } | ||
| 1921 | if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) { | ||
| 1922 | b43err(dev->wl, "DMA error: " | ||
| 1923 | "0x%08X, 0x%08X, 0x%08X, " | ||
| 1924 | "0x%08X, 0x%08X, 0x%08X\n", | ||
| 1925 | dma_reason[0], dma_reason[1], | ||
| 1926 | dma_reason[2], dma_reason[3], | ||
| 1927 | dma_reason[4], dma_reason[5]); | ||
| 1928 | } | ||
| 1929 | } | 1917 | } |
| 1930 | 1918 | ||
| 1931 | if (unlikely(reason & B43_IRQ_UCODE_DEBUG)) | 1919 | if (unlikely(reason & B43_IRQ_UCODE_DEBUG)) |
| @@ -1944,6 +1932,11 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev) | |||
| 1944 | handle_irq_noise(dev); | 1932 | handle_irq_noise(dev); |
| 1945 | 1933 | ||
| 1946 | /* Check the DMA reason registers for received data. */ | 1934 | /* Check the DMA reason registers for received data. */ |
| 1935 | if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) { | ||
| 1936 | if (B43_DEBUG) | ||
| 1937 | b43warn(dev->wl, "RX descriptor underrun\n"); | ||
| 1938 | b43_dma_handle_rx_overflow(dev->dma.rx_ring); | ||
| 1939 | } | ||
| 1947 | if (dma_reason[0] & B43_DMAIRQ_RX_DONE) { | 1940 | if (dma_reason[0] & B43_DMAIRQ_RX_DONE) { |
| 1948 | if (b43_using_pio_transfers(dev)) | 1941 | if (b43_using_pio_transfers(dev)) |
| 1949 | b43_pio_rx(dev->pio.rx_queue); | 1942 | b43_pio_rx(dev->pio.rx_queue); |
| @@ -2001,7 +1994,7 @@ static irqreturn_t b43_do_interrupt(struct b43_wldev *dev) | |||
| 2001 | return IRQ_NONE; | 1994 | return IRQ_NONE; |
| 2002 | 1995 | ||
| 2003 | dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON) | 1996 | dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON) |
| 2004 | & 0x0001DC00; | 1997 | & 0x0001FC00; |
| 2005 | dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON) | 1998 | dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON) |
| 2006 | & 0x0000DC00; | 1999 | & 0x0000DC00; |
| 2007 | dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON) | 2000 | dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON) |
| @@ -3130,7 +3123,7 @@ static int b43_chip_init(struct b43_wldev *dev) | |||
| 3130 | b43_write32(dev, 0x018C, 0x02000000); | 3123 | b43_write32(dev, 0x018C, 0x02000000); |
| 3131 | } | 3124 | } |
| 3132 | b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000); | 3125 | b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000); |
| 3133 | b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00); | 3126 | b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00); |
| 3134 | b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00); | 3127 | b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00); |
| 3135 | b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00); | 3128 | b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00); |
| 3136 | b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00); | 3129 | b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00); |
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index b8f82e688c72..9a95045c97b6 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c | |||
| @@ -5741,8 +5741,7 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length) | |||
| 5741 | hw->flags = | 5741 | hw->flags = |
| 5742 | IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | | 5742 | IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | |
| 5743 | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT | | 5743 | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT | |
| 5744 | IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS | | 5744 | IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; |
| 5745 | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; | ||
| 5746 | if (il->cfg->sku & IL_SKU_N) | 5745 | if (il->cfg->sku & IL_SKU_N) |
| 5747 | hw->flags |= | 5746 | hw->flags |= |
| 5748 | IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | | 5747 | IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | |
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index d3c8ece980d8..e42b266a023a 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c | |||
| @@ -2234,9 +2234,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) | |||
| 2234 | if (wdev->netdev->reg_state == NETREG_REGISTERED) | 2234 | if (wdev->netdev->reg_state == NETREG_REGISTERED) |
| 2235 | unregister_netdevice(wdev->netdev); | 2235 | unregister_netdevice(wdev->netdev); |
| 2236 | 2236 | ||
| 2237 | if (wdev->netdev->reg_state == NETREG_UNREGISTERED) | ||
| 2238 | free_netdev(wdev->netdev); | ||
| 2239 | |||
| 2240 | /* Clear the priv in adapter */ | 2237 | /* Clear the priv in adapter */ |
| 2241 | priv->netdev = NULL; | 2238 | priv->netdev = NULL; |
| 2242 | 2239 | ||
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 74db0d24a579..26755d9acb55 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c | |||
| @@ -1191,6 +1191,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter) | |||
| 1191 | adapter->if_ops.wakeup(adapter); | 1191 | adapter->if_ops.wakeup(adapter); |
| 1192 | adapter->hs_activated = false; | 1192 | adapter->hs_activated = false; |
| 1193 | adapter->is_hs_configured = false; | 1193 | adapter->is_hs_configured = false; |
| 1194 | adapter->is_suspended = false; | ||
| 1194 | mwifiex_hs_activated_event(mwifiex_get_priv(adapter, | 1195 | mwifiex_hs_activated_event(mwifiex_get_priv(adapter, |
| 1195 | MWIFIEX_BSS_ROLE_ANY), | 1196 | MWIFIEX_BSS_ROLE_ANY), |
| 1196 | false); | 1197 | false); |
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 121443a0f2a1..2eb88ea9acf7 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c | |||
| @@ -655,6 +655,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv, | |||
| 655 | struct net_device *dev) | 655 | struct net_device *dev) |
| 656 | { | 656 | { |
| 657 | dev->netdev_ops = &mwifiex_netdev_ops; | 657 | dev->netdev_ops = &mwifiex_netdev_ops; |
| 658 | dev->destructor = free_netdev; | ||
| 658 | /* Initialize private structure */ | 659 | /* Initialize private structure */ |
| 659 | priv->current_key_index = 0; | 660 | priv->current_key_index = 0; |
| 660 | priv->media_connected = false; | 661 | priv->media_connected = false; |
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index 311d0b26b81c..1a8a19dbd635 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c | |||
| @@ -96,7 +96,7 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, | |||
| 96 | } else { | 96 | } else { |
| 97 | /* Multicast */ | 97 | /* Multicast */ |
| 98 | priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE; | 98 | priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE; |
| 99 | if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) { | 99 | if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) { |
| 100 | dev_dbg(priv->adapter->dev, | 100 | dev_dbg(priv->adapter->dev, |
| 101 | "info: Enabling All Multicast!\n"); | 101 | "info: Enabling All Multicast!\n"); |
| 102 | priv->curr_pkt_filter |= | 102 | priv->curr_pkt_filter |= |
| @@ -108,20 +108,11 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, | |||
| 108 | dev_dbg(priv->adapter->dev, | 108 | dev_dbg(priv->adapter->dev, |
| 109 | "info: Set multicast list=%d\n", | 109 | "info: Set multicast list=%d\n", |
| 110 | mcast_list->num_multicast_addr); | 110 | mcast_list->num_multicast_addr); |
| 111 | /* Set multicast addresses to firmware */ | 111 | /* Send multicast addresses to firmware */ |
| 112 | if (old_pkt_filter == priv->curr_pkt_filter) { | 112 | ret = mwifiex_send_cmd_async(priv, |
| 113 | /* Send request to firmware */ | 113 | HostCmd_CMD_MAC_MULTICAST_ADR, |
| 114 | ret = mwifiex_send_cmd_async(priv, | 114 | HostCmd_ACT_GEN_SET, 0, |
| 115 | HostCmd_CMD_MAC_MULTICAST_ADR, | 115 | mcast_list); |
| 116 | HostCmd_ACT_GEN_SET, 0, | ||
| 117 | mcast_list); | ||
| 118 | } else { | ||
| 119 | /* Send request to firmware */ | ||
| 120 | ret = mwifiex_send_cmd_async(priv, | ||
| 121 | HostCmd_CMD_MAC_MULTICAST_ADR, | ||
| 122 | HostCmd_ACT_GEN_SET, 0, | ||
| 123 | mcast_list); | ||
| 124 | } | ||
| 125 | } | 116 | } |
| 126 | } | 117 | } |
| 127 | } | 118 | } |
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c index f802e7c92356..2dacd19e1b8a 100644 --- a/drivers/ntb/ntb_hw.c +++ b/drivers/ntb/ntb_hw.c | |||
| @@ -345,7 +345,7 @@ int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val) | |||
| 345 | */ | 345 | */ |
| 346 | void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw) | 346 | void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw) |
| 347 | { | 347 | { |
| 348 | if (mw > NTB_NUM_MW) | 348 | if (mw >= NTB_NUM_MW) |
| 349 | return NULL; | 349 | return NULL; |
| 350 | 350 | ||
| 351 | return ndev->mw[mw].vbase; | 351 | return ndev->mw[mw].vbase; |
| @@ -362,7 +362,7 @@ void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw) | |||
| 362 | */ | 362 | */ |
| 363 | resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw) | 363 | resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw) |
| 364 | { | 364 | { |
| 365 | if (mw > NTB_NUM_MW) | 365 | if (mw >= NTB_NUM_MW) |
| 366 | return 0; | 366 | return 0; |
| 367 | 367 | ||
| 368 | return ndev->mw[mw].bar_sz; | 368 | return ndev->mw[mw].bar_sz; |
| @@ -380,7 +380,7 @@ resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw) | |||
| 380 | */ | 380 | */ |
| 381 | void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr) | 381 | void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr) |
| 382 | { | 382 | { |
| 383 | if (mw > NTB_NUM_MW) | 383 | if (mw >= NTB_NUM_MW) |
| 384 | return; | 384 | return; |
| 385 | 385 | ||
| 386 | dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr, | 386 | dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr, |
| @@ -1027,8 +1027,8 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1027 | ndev->mw[i].vbase = | 1027 | ndev->mw[i].vbase = |
| 1028 | ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)), | 1028 | ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)), |
| 1029 | ndev->mw[i].bar_sz); | 1029 | ndev->mw[i].bar_sz); |
| 1030 | dev_info(&pdev->dev, "MW %d size %d\n", i, | 1030 | dev_info(&pdev->dev, "MW %d size %llu\n", i, |
| 1031 | (u32) pci_resource_len(pdev, MW_TO_BAR(i))); | 1031 | pci_resource_len(pdev, MW_TO_BAR(i))); |
| 1032 | if (!ndev->mw[i].vbase) { | 1032 | if (!ndev->mw[i].vbase) { |
| 1033 | dev_warn(&pdev->dev, "Cannot remap BAR %d\n", | 1033 | dev_warn(&pdev->dev, "Cannot remap BAR %d\n", |
| 1034 | MW_TO_BAR(i)); | 1034 | MW_TO_BAR(i)); |
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index e0bdfd7f9930..f8d7081ee301 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c | |||
| @@ -58,7 +58,7 @@ | |||
| 58 | #include <linux/ntb.h> | 58 | #include <linux/ntb.h> |
| 59 | #include "ntb_hw.h" | 59 | #include "ntb_hw.h" |
| 60 | 60 | ||
| 61 | #define NTB_TRANSPORT_VERSION 2 | 61 | #define NTB_TRANSPORT_VERSION 3 |
| 62 | 62 | ||
| 63 | static unsigned int transport_mtu = 0x401E; | 63 | static unsigned int transport_mtu = 0x401E; |
| 64 | module_param(transport_mtu, uint, 0644); | 64 | module_param(transport_mtu, uint, 0644); |
| @@ -173,10 +173,13 @@ struct ntb_payload_header { | |||
| 173 | 173 | ||
| 174 | enum { | 174 | enum { |
| 175 | VERSION = 0, | 175 | VERSION = 0, |
| 176 | MW0_SZ, | ||
| 177 | MW1_SZ, | ||
| 178 | NUM_QPS, | ||
| 179 | QP_LINKS, | 176 | QP_LINKS, |
| 177 | NUM_QPS, | ||
| 178 | NUM_MWS, | ||
| 179 | MW0_SZ_HIGH, | ||
| 180 | MW0_SZ_LOW, | ||
| 181 | MW1_SZ_HIGH, | ||
| 182 | MW1_SZ_LOW, | ||
| 180 | MAX_SPAD, | 183 | MAX_SPAD, |
| 181 | }; | 184 | }; |
| 182 | 185 | ||
| @@ -297,7 +300,7 @@ int ntb_register_client_dev(char *device_name) | |||
| 297 | { | 300 | { |
| 298 | struct ntb_transport_client_dev *client_dev; | 301 | struct ntb_transport_client_dev *client_dev; |
| 299 | struct ntb_transport *nt; | 302 | struct ntb_transport *nt; |
| 300 | int rc; | 303 | int rc, i = 0; |
| 301 | 304 | ||
| 302 | if (list_empty(&ntb_transport_list)) | 305 | if (list_empty(&ntb_transport_list)) |
| 303 | return -ENODEV; | 306 | return -ENODEV; |
| @@ -315,7 +318,7 @@ int ntb_register_client_dev(char *device_name) | |||
| 315 | dev = &client_dev->dev; | 318 | dev = &client_dev->dev; |
| 316 | 319 | ||
| 317 | /* setup and register client devices */ | 320 | /* setup and register client devices */ |
| 318 | dev_set_name(dev, "%s", device_name); | 321 | dev_set_name(dev, "%s%d", device_name, i); |
| 319 | dev->bus = &ntb_bus_type; | 322 | dev->bus = &ntb_bus_type; |
| 320 | dev->release = ntb_client_release; | 323 | dev->release = ntb_client_release; |
| 321 | dev->parent = &ntb_query_pdev(nt->ndev)->dev; | 324 | dev->parent = &ntb_query_pdev(nt->ndev)->dev; |
| @@ -327,6 +330,7 @@ int ntb_register_client_dev(char *device_name) | |||
| 327 | } | 330 | } |
| 328 | 331 | ||
| 329 | list_add_tail(&client_dev->entry, &nt->client_devs); | 332 | list_add_tail(&client_dev->entry, &nt->client_devs); |
| 333 | i++; | ||
| 330 | } | 334 | } |
| 331 | 335 | ||
| 332 | return 0; | 336 | return 0; |
| @@ -486,12 +490,13 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt, | |||
| 486 | (qp_num / NTB_NUM_MW * rx_size); | 490 | (qp_num / NTB_NUM_MW * rx_size); |
| 487 | rx_size -= sizeof(struct ntb_rx_info); | 491 | rx_size -= sizeof(struct ntb_rx_info); |
| 488 | 492 | ||
| 489 | qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info); | 493 | qp->rx_buff = qp->remote_rx_info + 1; |
| 490 | qp->rx_max_frame = min(transport_mtu, rx_size); | 494 | /* Due to housekeeping, there must be atleast 2 buffs */ |
| 495 | qp->rx_max_frame = min(transport_mtu, rx_size / 2); | ||
| 491 | qp->rx_max_entry = rx_size / qp->rx_max_frame; | 496 | qp->rx_max_entry = rx_size / qp->rx_max_frame; |
| 492 | qp->rx_index = 0; | 497 | qp->rx_index = 0; |
| 493 | 498 | ||
| 494 | qp->remote_rx_info->entry = qp->rx_max_entry; | 499 | qp->remote_rx_info->entry = qp->rx_max_entry - 1; |
| 495 | 500 | ||
| 496 | /* setup the hdr offsets with 0's */ | 501 | /* setup the hdr offsets with 0's */ |
| 497 | for (i = 0; i < qp->rx_max_entry; i++) { | 502 | for (i = 0; i < qp->rx_max_entry; i++) { |
| @@ -502,6 +507,19 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt, | |||
| 502 | 507 | ||
| 503 | qp->rx_pkts = 0; | 508 | qp->rx_pkts = 0; |
| 504 | qp->tx_pkts = 0; | 509 | qp->tx_pkts = 0; |
| 510 | qp->tx_index = 0; | ||
| 511 | } | ||
| 512 | |||
| 513 | static void ntb_free_mw(struct ntb_transport *nt, int num_mw) | ||
| 514 | { | ||
| 515 | struct ntb_transport_mw *mw = &nt->mw[num_mw]; | ||
| 516 | struct pci_dev *pdev = ntb_query_pdev(nt->ndev); | ||
| 517 | |||
| 518 | if (!mw->virt_addr) | ||
| 519 | return; | ||
| 520 | |||
| 521 | dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr); | ||
| 522 | mw->virt_addr = NULL; | ||
| 505 | } | 523 | } |
| 506 | 524 | ||
| 507 | static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) | 525 | static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) |
| @@ -509,12 +527,20 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) | |||
| 509 | struct ntb_transport_mw *mw = &nt->mw[num_mw]; | 527 | struct ntb_transport_mw *mw = &nt->mw[num_mw]; |
| 510 | struct pci_dev *pdev = ntb_query_pdev(nt->ndev); | 528 | struct pci_dev *pdev = ntb_query_pdev(nt->ndev); |
| 511 | 529 | ||
| 530 | /* No need to re-setup */ | ||
| 531 | if (mw->size == ALIGN(size, 4096)) | ||
| 532 | return 0; | ||
| 533 | |||
| 534 | if (mw->size != 0) | ||
| 535 | ntb_free_mw(nt, num_mw); | ||
| 536 | |||
| 512 | /* Alloc memory for receiving data. Must be 4k aligned */ | 537 | /* Alloc memory for receiving data. Must be 4k aligned */ |
| 513 | mw->size = ALIGN(size, 4096); | 538 | mw->size = ALIGN(size, 4096); |
| 514 | 539 | ||
| 515 | mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr, | 540 | mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr, |
| 516 | GFP_KERNEL); | 541 | GFP_KERNEL); |
| 517 | if (!mw->virt_addr) { | 542 | if (!mw->virt_addr) { |
| 543 | mw->size = 0; | ||
| 518 | dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n", | 544 | dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n", |
| 519 | (int) mw->size); | 545 | (int) mw->size); |
| 520 | return -ENOMEM; | 546 | return -ENOMEM; |
| @@ -604,25 +630,31 @@ static void ntb_transport_link_work(struct work_struct *work) | |||
| 604 | u32 val; | 630 | u32 val; |
| 605 | int rc, i; | 631 | int rc, i; |
| 606 | 632 | ||
| 607 | /* send the local info */ | 633 | /* send the local info, in the opposite order of the way we read it */ |
| 608 | rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION); | 634 | for (i = 0; i < NTB_NUM_MW; i++) { |
| 609 | if (rc) { | 635 | rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), |
| 610 | dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", | 636 | ntb_get_mw_size(ndev, i) >> 32); |
| 611 | 0, VERSION); | 637 | if (rc) { |
| 612 | goto out; | 638 | dev_err(&pdev->dev, "Error writing %u to remote spad %d\n", |
| 613 | } | 639 | (u32)(ntb_get_mw_size(ndev, i) >> 32), |
| 640 | MW0_SZ_HIGH + (i * 2)); | ||
| 641 | goto out; | ||
| 642 | } | ||
| 614 | 643 | ||
| 615 | rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0)); | 644 | rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2), |
| 616 | if (rc) { | 645 | (u32) ntb_get_mw_size(ndev, i)); |
| 617 | dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", | 646 | if (rc) { |
| 618 | (u32) ntb_get_mw_size(ndev, 0), MW0_SZ); | 647 | dev_err(&pdev->dev, "Error writing %u to remote spad %d\n", |
| 619 | goto out; | 648 | (u32) ntb_get_mw_size(ndev, i), |
| 649 | MW0_SZ_LOW + (i * 2)); | ||
| 650 | goto out; | ||
| 651 | } | ||
| 620 | } | 652 | } |
| 621 | 653 | ||
| 622 | rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1)); | 654 | rc = ntb_write_remote_spad(ndev, NUM_MWS, NTB_NUM_MW); |
| 623 | if (rc) { | 655 | if (rc) { |
| 624 | dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", | 656 | dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", |
| 625 | (u32) ntb_get_mw_size(ndev, 1), MW1_SZ); | 657 | NTB_NUM_MW, NUM_MWS); |
| 626 | goto out; | 658 | goto out; |
| 627 | } | 659 | } |
| 628 | 660 | ||
| @@ -633,16 +665,10 @@ static void ntb_transport_link_work(struct work_struct *work) | |||
| 633 | goto out; | 665 | goto out; |
| 634 | } | 666 | } |
| 635 | 667 | ||
| 636 | rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val); | 668 | rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION); |
| 637 | if (rc) { | ||
| 638 | dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS); | ||
| 639 | goto out; | ||
| 640 | } | ||
| 641 | |||
| 642 | rc = ntb_write_remote_spad(ndev, QP_LINKS, val); | ||
| 643 | if (rc) { | 669 | if (rc) { |
| 644 | dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", | 670 | dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", |
| 645 | val, QP_LINKS); | 671 | NTB_TRANSPORT_VERSION, VERSION); |
| 646 | goto out; | 672 | goto out; |
| 647 | } | 673 | } |
| 648 | 674 | ||
| @@ -667,33 +693,43 @@ static void ntb_transport_link_work(struct work_struct *work) | |||
| 667 | goto out; | 693 | goto out; |
| 668 | dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); | 694 | dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); |
| 669 | 695 | ||
| 670 | rc = ntb_read_remote_spad(ndev, MW0_SZ, &val); | 696 | rc = ntb_read_remote_spad(ndev, NUM_MWS, &val); |
| 671 | if (rc) { | 697 | if (rc) { |
| 672 | dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ); | 698 | dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS); |
| 673 | goto out; | 699 | goto out; |
| 674 | } | 700 | } |
| 675 | 701 | ||
| 676 | if (!val) | 702 | if (val != NTB_NUM_MW) |
| 677 | goto out; | 703 | goto out; |
| 678 | dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val); | 704 | dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val); |
| 679 | 705 | ||
| 680 | rc = ntb_set_mw(nt, 0, val); | 706 | for (i = 0; i < NTB_NUM_MW; i++) { |
| 681 | if (rc) | 707 | u64 val64; |
| 682 | goto out; | ||
| 683 | 708 | ||
| 684 | rc = ntb_read_remote_spad(ndev, MW1_SZ, &val); | 709 | rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val); |
| 685 | if (rc) { | 710 | if (rc) { |
| 686 | dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ); | 711 | dev_err(&pdev->dev, "Error reading remote spad %d\n", |
| 687 | goto out; | 712 | MW0_SZ_HIGH + (i * 2)); |
| 688 | } | 713 | goto out1; |
| 714 | } | ||
| 689 | 715 | ||
| 690 | if (!val) | 716 | val64 = (u64) val << 32; |
| 691 | goto out; | ||
| 692 | dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val); | ||
| 693 | 717 | ||
| 694 | rc = ntb_set_mw(nt, 1, val); | 718 | rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val); |
| 695 | if (rc) | 719 | if (rc) { |
| 696 | goto out; | 720 | dev_err(&pdev->dev, "Error reading remote spad %d\n", |
| 721 | MW0_SZ_LOW + (i * 2)); | ||
| 722 | goto out1; | ||
| 723 | } | ||
| 724 | |||
| 725 | val64 |= val; | ||
| 726 | |||
| 727 | dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64); | ||
| 728 | |||
| 729 | rc = ntb_set_mw(nt, i, val64); | ||
| 730 | if (rc) | ||
| 731 | goto out1; | ||
| 732 | } | ||
| 697 | 733 | ||
| 698 | nt->transport_link = NTB_LINK_UP; | 734 | nt->transport_link = NTB_LINK_UP; |
| 699 | 735 | ||
| @@ -708,6 +744,9 @@ static void ntb_transport_link_work(struct work_struct *work) | |||
| 708 | 744 | ||
| 709 | return; | 745 | return; |
| 710 | 746 | ||
| 747 | out1: | ||
| 748 | for (i = 0; i < NTB_NUM_MW; i++) | ||
| 749 | ntb_free_mw(nt, i); | ||
| 711 | out: | 750 | out: |
| 712 | if (ntb_hw_link_status(ndev)) | 751 | if (ntb_hw_link_status(ndev)) |
| 713 | schedule_delayed_work(&nt->link_work, | 752 | schedule_delayed_work(&nt->link_work, |
| @@ -780,10 +819,10 @@ static void ntb_transport_init_queue(struct ntb_transport *nt, | |||
| 780 | (qp_num / NTB_NUM_MW * tx_size); | 819 | (qp_num / NTB_NUM_MW * tx_size); |
| 781 | tx_size -= sizeof(struct ntb_rx_info); | 820 | tx_size -= sizeof(struct ntb_rx_info); |
| 782 | 821 | ||
| 783 | qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info); | 822 | qp->tx_mw = qp->rx_info + 1; |
| 784 | qp->tx_max_frame = min(transport_mtu, tx_size); | 823 | /* Due to housekeeping, there must be atleast 2 buffs */ |
| 824 | qp->tx_max_frame = min(transport_mtu, tx_size / 2); | ||
| 785 | qp->tx_max_entry = tx_size / qp->tx_max_frame; | 825 | qp->tx_max_entry = tx_size / qp->tx_max_frame; |
| 786 | qp->tx_index = 0; | ||
| 787 | 826 | ||
| 788 | if (nt->debugfs_dir) { | 827 | if (nt->debugfs_dir) { |
| 789 | char debugfs_name[4]; | 828 | char debugfs_name[4]; |
| @@ -897,10 +936,7 @@ void ntb_transport_free(void *transport) | |||
| 897 | pdev = ntb_query_pdev(nt->ndev); | 936 | pdev = ntb_query_pdev(nt->ndev); |
| 898 | 937 | ||
| 899 | for (i = 0; i < NTB_NUM_MW; i++) | 938 | for (i = 0; i < NTB_NUM_MW; i++) |
| 900 | if (nt->mw[i].virt_addr) | 939 | ntb_free_mw(nt, i); |
| 901 | dma_free_coherent(&pdev->dev, nt->mw[i].size, | ||
| 902 | nt->mw[i].virt_addr, | ||
| 903 | nt->mw[i].dma_addr); | ||
| 904 | 940 | ||
| 905 | kfree(nt->qps); | 941 | kfree(nt->qps); |
| 906 | ntb_unregister_transport(nt->ndev); | 942 | ntb_unregister_transport(nt->ndev); |
| @@ -999,11 +1035,16 @@ out: | |||
| 999 | static void ntb_transport_rx(unsigned long data) | 1035 | static void ntb_transport_rx(unsigned long data) |
| 1000 | { | 1036 | { |
| 1001 | struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; | 1037 | struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; |
| 1002 | int rc; | 1038 | int rc, i; |
| 1003 | 1039 | ||
| 1004 | do { | 1040 | /* Limit the number of packets processed in a single interrupt to |
| 1041 | * provide fairness to others | ||
| 1042 | */ | ||
| 1043 | for (i = 0; i < qp->rx_max_entry; i++) { | ||
| 1005 | rc = ntb_process_rxc(qp); | 1044 | rc = ntb_process_rxc(qp); |
| 1006 | } while (!rc); | 1045 | if (rc) |
| 1046 | break; | ||
| 1047 | } | ||
| 1007 | } | 1048 | } |
| 1008 | 1049 | ||
| 1009 | static void ntb_transport_rxc_db(void *data, int db_num) | 1050 | static void ntb_transport_rxc_db(void *data, int db_num) |
| @@ -1210,12 +1251,14 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue); | |||
| 1210 | */ | 1251 | */ |
| 1211 | void ntb_transport_free_queue(struct ntb_transport_qp *qp) | 1252 | void ntb_transport_free_queue(struct ntb_transport_qp *qp) |
| 1212 | { | 1253 | { |
| 1213 | struct pci_dev *pdev = ntb_query_pdev(qp->ndev); | 1254 | struct pci_dev *pdev; |
| 1214 | struct ntb_queue_entry *entry; | 1255 | struct ntb_queue_entry *entry; |
| 1215 | 1256 | ||
| 1216 | if (!qp) | 1257 | if (!qp) |
| 1217 | return; | 1258 | return; |
| 1218 | 1259 | ||
| 1260 | pdev = ntb_query_pdev(qp->ndev); | ||
| 1261 | |||
| 1219 | cancel_delayed_work_sync(&qp->link_work); | 1262 | cancel_delayed_work_sync(&qp->link_work); |
| 1220 | 1263 | ||
| 1221 | ntb_unregister_db_callback(qp->ndev, qp->qp_num); | 1264 | ntb_unregister_db_callback(qp->ndev, qp->qp_num); |
| @@ -1371,12 +1414,13 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up); | |||
| 1371 | */ | 1414 | */ |
| 1372 | void ntb_transport_link_down(struct ntb_transport_qp *qp) | 1415 | void ntb_transport_link_down(struct ntb_transport_qp *qp) |
| 1373 | { | 1416 | { |
| 1374 | struct pci_dev *pdev = ntb_query_pdev(qp->ndev); | 1417 | struct pci_dev *pdev; |
| 1375 | int rc, val; | 1418 | int rc, val; |
| 1376 | 1419 | ||
| 1377 | if (!qp) | 1420 | if (!qp) |
| 1378 | return; | 1421 | return; |
| 1379 | 1422 | ||
| 1423 | pdev = ntb_query_pdev(qp->ndev); | ||
| 1380 | qp->client_ready = NTB_LINK_DOWN; | 1424 | qp->client_ready = NTB_LINK_DOWN; |
| 1381 | 1425 | ||
| 1382 | rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val); | 1426 | rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val); |
| @@ -1408,6 +1452,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_down); | |||
| 1408 | */ | 1452 | */ |
| 1409 | bool ntb_transport_link_query(struct ntb_transport_qp *qp) | 1453 | bool ntb_transport_link_query(struct ntb_transport_qp *qp) |
| 1410 | { | 1454 | { |
| 1455 | if (!qp) | ||
| 1456 | return false; | ||
| 1457 | |||
| 1411 | return qp->qp_link == NTB_LINK_UP; | 1458 | return qp->qp_link == NTB_LINK_UP; |
| 1412 | } | 1459 | } |
| 1413 | EXPORT_SYMBOL_GPL(ntb_transport_link_query); | 1460 | EXPORT_SYMBOL_GPL(ntb_transport_link_query); |
| @@ -1422,6 +1469,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_query); | |||
| 1422 | */ | 1469 | */ |
| 1423 | unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) | 1470 | unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) |
| 1424 | { | 1471 | { |
| 1472 | if (!qp) | ||
| 1473 | return 0; | ||
| 1474 | |||
| 1425 | return qp->qp_num; | 1475 | return qp->qp_num; |
| 1426 | } | 1476 | } |
| 1427 | EXPORT_SYMBOL_GPL(ntb_transport_qp_num); | 1477 | EXPORT_SYMBOL_GPL(ntb_transport_qp_num); |
| @@ -1436,6 +1486,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num); | |||
| 1436 | */ | 1486 | */ |
| 1437 | unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) | 1487 | unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) |
| 1438 | { | 1488 | { |
| 1489 | if (!qp) | ||
| 1490 | return 0; | ||
| 1491 | |||
| 1439 | return qp->tx_max_frame - sizeof(struct ntb_payload_header); | 1492 | return qp->tx_max_frame - sizeof(struct ntb_payload_header); |
| 1440 | } | 1493 | } |
| 1441 | EXPORT_SYMBOL_GPL(ntb_transport_max_size); | 1494 | EXPORT_SYMBOL_GPL(ntb_transport_max_size); |
diff --git a/drivers/of/base.c b/drivers/of/base.c index c76d16c972cc..f53b992f060a 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
| @@ -1208,11 +1208,11 @@ static int __of_parse_phandle_with_args(const struct device_node *np, | |||
| 1208 | out_args->args_count = count; | 1208 | out_args->args_count = count; |
| 1209 | for (i = 0; i < count; i++) | 1209 | for (i = 0; i < count; i++) |
| 1210 | out_args->args[i] = be32_to_cpup(list++); | 1210 | out_args->args[i] = be32_to_cpup(list++); |
| 1211 | } else { | ||
| 1212 | of_node_put(node); | ||
| 1211 | } | 1213 | } |
| 1212 | 1214 | ||
| 1213 | /* Found it! return success */ | 1215 | /* Found it! return success */ |
| 1214 | if (node) | ||
| 1215 | of_node_put(node); | ||
| 1216 | return 0; | 1216 | return 0; |
| 1217 | } | 1217 | } |
| 1218 | 1218 | ||
diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/pinctrl-abx500.c index aa17f7580f61..6d4532702f80 100644 --- a/drivers/pinctrl/pinctrl-abx500.c +++ b/drivers/pinctrl/pinctrl-abx500.c | |||
| @@ -851,23 +851,12 @@ static int abx500_gpio_probe(struct platform_device *pdev) | |||
| 851 | 851 | ||
| 852 | if (abx500_pdata) | 852 | if (abx500_pdata) |
| 853 | pdata = abx500_pdata->gpio; | 853 | pdata = abx500_pdata->gpio; |
| 854 | if (!pdata) { | ||
| 855 | if (np) { | ||
| 856 | const struct of_device_id *match; | ||
| 857 | 854 | ||
| 858 | match = of_match_device(abx500_gpio_match, &pdev->dev); | 855 | if (!(pdata || np)) { |
| 859 | if (!match) | 856 | dev_err(&pdev->dev, "gpio dt and platform data missing\n"); |
| 860 | return -ENODEV; | 857 | return -ENODEV; |
| 861 | id = (unsigned long)match->data; | ||
| 862 | } else { | ||
| 863 | dev_err(&pdev->dev, "gpio dt and platform data missing\n"); | ||
| 864 | return -ENODEV; | ||
| 865 | } | ||
| 866 | } | 858 | } |
| 867 | 859 | ||
| 868 | if (platid) | ||
| 869 | id = platid->driver_data; | ||
| 870 | |||
| 871 | pct = devm_kzalloc(&pdev->dev, sizeof(struct abx500_pinctrl), | 860 | pct = devm_kzalloc(&pdev->dev, sizeof(struct abx500_pinctrl), |
| 872 | GFP_KERNEL); | 861 | GFP_KERNEL); |
| 873 | if (pct == NULL) { | 862 | if (pct == NULL) { |
| @@ -882,6 +871,16 @@ static int abx500_gpio_probe(struct platform_device *pdev) | |||
| 882 | pct->chip.dev = &pdev->dev; | 871 | pct->chip.dev = &pdev->dev; |
| 883 | pct->chip.base = (np) ? -1 : pdata->gpio_base; | 872 | pct->chip.base = (np) ? -1 : pdata->gpio_base; |
| 884 | 873 | ||
| 874 | if (platid) | ||
| 875 | id = platid->driver_data; | ||
| 876 | else if (np) { | ||
| 877 | const struct of_device_id *match; | ||
| 878 | |||
| 879 | match = of_match_device(abx500_gpio_match, &pdev->dev); | ||
| 880 | if (match) | ||
| 881 | id = (unsigned long)match->data; | ||
| 882 | } | ||
| 883 | |||
| 885 | /* initialize the lock */ | 884 | /* initialize the lock */ |
| 886 | mutex_init(&pct->lock); | 885 | mutex_init(&pct->lock); |
| 887 | 886 | ||
| @@ -900,8 +899,7 @@ static int abx500_gpio_probe(struct platform_device *pdev) | |||
| 900 | abx500_pinctrl_ab8505_init(&pct->soc); | 899 | abx500_pinctrl_ab8505_init(&pct->soc); |
| 901 | break; | 900 | break; |
| 902 | default: | 901 | default: |
| 903 | dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n", | 902 | dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n", id); |
| 904 | (int) platid->driver_data); | ||
| 905 | mutex_destroy(&pct->lock); | 903 | mutex_destroy(&pct->lock); |
| 906 | return -EINVAL; | 904 | return -EINVAL; |
| 907 | } | 905 | } |
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c index edde3acc4186..a67af419f531 100644 --- a/drivers/pinctrl/pinctrl-coh901.c +++ b/drivers/pinctrl/pinctrl-coh901.c | |||
| @@ -713,11 +713,6 @@ static int __init u300_gpio_probe(struct platform_device *pdev) | |||
| 713 | gpio->dev = &pdev->dev; | 713 | gpio->dev = &pdev->dev; |
| 714 | 714 | ||
| 715 | memres = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 715 | memres = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 716 | if (!memres) { | ||
| 717 | dev_err(gpio->dev, "could not get GPIO memory resource\n"); | ||
| 718 | return -ENODEV; | ||
| 719 | } | ||
| 720 | |||
| 721 | gpio->base = devm_ioremap_resource(&pdev->dev, memres); | 716 | gpio->base = devm_ioremap_resource(&pdev->dev, memres); |
| 722 | if (IS_ERR(gpio->base)) | 717 | if (IS_ERR(gpio->base)) |
| 723 | return PTR_ERR(gpio->base); | 718 | return PTR_ERR(gpio->base); |
diff --git a/drivers/pinctrl/pinctrl-exynos5440.c b/drivers/pinctrl/pinctrl-exynos5440.c index 6038503ed929..32a48f44f574 100644 --- a/drivers/pinctrl/pinctrl-exynos5440.c +++ b/drivers/pinctrl/pinctrl-exynos5440.c | |||
| @@ -1000,11 +1000,6 @@ static int exynos5440_pinctrl_probe(struct platform_device *pdev) | |||
| 1000 | } | 1000 | } |
| 1001 | 1001 | ||
| 1002 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1002 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1003 | if (!res) { | ||
| 1004 | dev_err(dev, "cannot find IO resource\n"); | ||
| 1005 | return -ENOENT; | ||
| 1006 | } | ||
| 1007 | |||
| 1008 | priv->reg_base = devm_ioremap_resource(&pdev->dev, res); | 1003 | priv->reg_base = devm_ioremap_resource(&pdev->dev, res); |
| 1009 | if (IS_ERR(priv->reg_base)) | 1004 | if (IS_ERR(priv->reg_base)) |
| 1010 | return PTR_ERR(priv->reg_base); | 1005 | return PTR_ERR(priv->reg_base); |
diff --git a/drivers/pinctrl/pinctrl-lantiq.c b/drivers/pinctrl/pinctrl-lantiq.c index 615c5002b757..d22ca252b80d 100644 --- a/drivers/pinctrl/pinctrl-lantiq.c +++ b/drivers/pinctrl/pinctrl-lantiq.c | |||
| @@ -52,7 +52,8 @@ static void ltq_pinctrl_dt_free_map(struct pinctrl_dev *pctldev, | |||
| 52 | int i; | 52 | int i; |
| 53 | 53 | ||
| 54 | for (i = 0; i < num_maps; i++) | 54 | for (i = 0; i < num_maps; i++) |
| 55 | if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN) | 55 | if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN || |
| 56 | map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP) | ||
| 56 | kfree(map[i].data.configs.configs); | 57 | kfree(map[i].data.configs.configs); |
| 57 | kfree(map); | 58 | kfree(map); |
| 58 | } | 59 | } |
diff --git a/drivers/pinctrl/pinctrl-samsung.c b/drivers/pinctrl/pinctrl-samsung.c index 976366899f68..055d0162098b 100644 --- a/drivers/pinctrl/pinctrl-samsung.c +++ b/drivers/pinctrl/pinctrl-samsung.c | |||
| @@ -932,11 +932,6 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) | |||
| 932 | drvdata->dev = dev; | 932 | drvdata->dev = dev; |
| 933 | 933 | ||
| 934 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 934 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 935 | if (!res) { | ||
| 936 | dev_err(dev, "cannot find IO resource\n"); | ||
| 937 | return -ENOENT; | ||
| 938 | } | ||
| 939 | |||
| 940 | drvdata->virt_base = devm_ioremap_resource(&pdev->dev, res); | 935 | drvdata->virt_base = devm_ioremap_resource(&pdev->dev, res); |
| 941 | if (IS_ERR(drvdata->virt_base)) | 936 | if (IS_ERR(drvdata->virt_base)) |
| 942 | return PTR_ERR(drvdata->virt_base); | 937 | return PTR_ERR(drvdata->virt_base); |
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 5f2d2bfd356e..b9fa04618601 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c | |||
| @@ -1166,7 +1166,8 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, | |||
| 1166 | (*map)->data.mux.function = np->name; | 1166 | (*map)->data.mux.function = np->name; |
| 1167 | 1167 | ||
| 1168 | if (pcs->is_pinconf) { | 1168 | if (pcs->is_pinconf) { |
| 1169 | if (pcs_parse_pinconf(pcs, np, function, map)) | 1169 | res = pcs_parse_pinconf(pcs, np, function, map); |
| 1170 | if (res) | ||
| 1170 | goto free_pingroups; | 1171 | goto free_pingroups; |
| 1171 | *num_maps = 2; | 1172 | *num_maps = 2; |
| 1172 | } else { | 1173 | } else { |
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c index f2977cff8366..e92132c76a6b 100644 --- a/drivers/pinctrl/pinctrl-xway.c +++ b/drivers/pinctrl/pinctrl-xway.c | |||
| @@ -716,10 +716,6 @@ static int pinmux_xway_probe(struct platform_device *pdev) | |||
| 716 | 716 | ||
| 717 | /* get and remap our register range */ | 717 | /* get and remap our register range */ |
| 718 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 718 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 719 | if (!res) { | ||
| 720 | dev_err(&pdev->dev, "Failed to get resource\n"); | ||
| 721 | return -ENOENT; | ||
| 722 | } | ||
| 723 | xway_info.membase[0] = devm_ioremap_resource(&pdev->dev, res); | 719 | xway_info.membase[0] = devm_ioremap_resource(&pdev->dev, res); |
| 724 | if (IS_ERR(xway_info.membase[0])) | 720 | if (IS_ERR(xway_info.membase[0])) |
| 725 | return PTR_ERR(xway_info.membase[0]); | 721 | return PTR_ERR(xway_info.membase[0]); |
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8750.c b/drivers/pinctrl/vt8500/pinctrl-wm8750.c index b964cc550568..de43262398db 100644 --- a/drivers/pinctrl/vt8500/pinctrl-wm8750.c +++ b/drivers/pinctrl/vt8500/pinctrl-wm8750.c | |||
| @@ -53,7 +53,7 @@ static const struct wmt_pinctrl_bank_registers wm8750_banks[] = { | |||
| 53 | #define WMT_PIN_EXTGPIO6 WMT_PIN(0, 6) | 53 | #define WMT_PIN_EXTGPIO6 WMT_PIN(0, 6) |
| 54 | #define WMT_PIN_EXTGPIO7 WMT_PIN(0, 7) | 54 | #define WMT_PIN_EXTGPIO7 WMT_PIN(0, 7) |
| 55 | #define WMT_PIN_WAKEUP0 WMT_PIN(0, 16) | 55 | #define WMT_PIN_WAKEUP0 WMT_PIN(0, 16) |
| 56 | #define WMT_PIN_WAKEUP1 WMT_PIN(0, 16) | 56 | #define WMT_PIN_WAKEUP1 WMT_PIN(0, 17) |
| 57 | #define WMT_PIN_SD0CD WMT_PIN(0, 28) | 57 | #define WMT_PIN_SD0CD WMT_PIN(0, 28) |
| 58 | #define WMT_PIN_VDOUT0 WMT_PIN(1, 0) | 58 | #define WMT_PIN_VDOUT0 WMT_PIN(1, 0) |
| 59 | #define WMT_PIN_VDOUT1 WMT_PIN(1, 1) | 59 | #define WMT_PIN_VDOUT1 WMT_PIN(1, 1) |
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c index ec287989eafc..c938bae18812 100644 --- a/drivers/pwm/pwm-imx.c +++ b/drivers/pwm/pwm-imx.c | |||
| @@ -265,11 +265,6 @@ static int imx_pwm_probe(struct platform_device *pdev) | |||
| 265 | imx->chip.npwm = 1; | 265 | imx->chip.npwm = 1; |
| 266 | 266 | ||
| 267 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 267 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 268 | if (r == NULL) { | ||
| 269 | dev_err(&pdev->dev, "no memory resource defined\n"); | ||
| 270 | return -ENODEV; | ||
| 271 | } | ||
| 272 | |||
| 273 | imx->mmio_base = devm_ioremap_resource(&pdev->dev, r); | 268 | imx->mmio_base = devm_ioremap_resource(&pdev->dev, r); |
| 274 | if (IS_ERR(imx->mmio_base)) | 269 | if (IS_ERR(imx->mmio_base)) |
| 275 | return PTR_ERR(imx->mmio_base); | 270 | return PTR_ERR(imx->mmio_base); |
diff --git a/drivers/pwm/pwm-puv3.c b/drivers/pwm/pwm-puv3.c index d1eb499fb15d..ed6007b27585 100644 --- a/drivers/pwm/pwm-puv3.c +++ b/drivers/pwm/pwm-puv3.c | |||
| @@ -117,11 +117,6 @@ static int pwm_probe(struct platform_device *pdev) | |||
| 117 | return PTR_ERR(puv3->clk); | 117 | return PTR_ERR(puv3->clk); |
| 118 | 118 | ||
| 119 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 119 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 120 | if (r == NULL) { | ||
| 121 | dev_err(&pdev->dev, "no memory resource defined\n"); | ||
| 122 | return -ENODEV; | ||
| 123 | } | ||
| 124 | |||
| 125 | puv3->base = devm_ioremap_resource(&pdev->dev, r); | 120 | puv3->base = devm_ioremap_resource(&pdev->dev, r); |
| 126 | if (IS_ERR(puv3->base)) | 121 | if (IS_ERR(puv3->base)) |
| 127 | return PTR_ERR(puv3->base); | 122 | return PTR_ERR(puv3->base); |
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c index dee6ab552a0a..dc9717551d39 100644 --- a/drivers/pwm/pwm-pxa.c +++ b/drivers/pwm/pwm-pxa.c | |||
| @@ -147,11 +147,6 @@ static int pwm_probe(struct platform_device *pdev) | |||
| 147 | pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1; | 147 | pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1; |
| 148 | 148 | ||
| 149 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 149 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 150 | if (r == NULL) { | ||
| 151 | dev_err(&pdev->dev, "no memory resource defined\n"); | ||
| 152 | return -ENODEV; | ||
| 153 | } | ||
| 154 | |||
| 155 | pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r); | 150 | pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r); |
| 156 | if (IS_ERR(pwm->mmio_base)) | 151 | if (IS_ERR(pwm->mmio_base)) |
| 157 | return PTR_ERR(pwm->mmio_base); | 152 | return PTR_ERR(pwm->mmio_base); |
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index 3d75f4a88f98..a5402933001f 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c | |||
| @@ -181,11 +181,6 @@ static int tegra_pwm_probe(struct platform_device *pdev) | |||
| 181 | pwm->dev = &pdev->dev; | 181 | pwm->dev = &pdev->dev; |
| 182 | 182 | ||
| 183 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 183 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 184 | if (!r) { | ||
| 185 | dev_err(&pdev->dev, "no memory resources defined\n"); | ||
| 186 | return -ENODEV; | ||
| 187 | } | ||
| 188 | |||
| 189 | pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r); | 184 | pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r); |
| 190 | if (IS_ERR(pwm->mmio_base)) | 185 | if (IS_ERR(pwm->mmio_base)) |
| 191 | return PTR_ERR(pwm->mmio_base); | 186 | return PTR_ERR(pwm->mmio_base); |
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c index 0d65fb2e02c7..72ca42dfa733 100644 --- a/drivers/pwm/pwm-tiecap.c +++ b/drivers/pwm/pwm-tiecap.c | |||
| @@ -240,11 +240,6 @@ static int ecap_pwm_probe(struct platform_device *pdev) | |||
| 240 | pc->chip.npwm = 1; | 240 | pc->chip.npwm = 1; |
| 241 | 241 | ||
| 242 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 242 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 243 | if (!r) { | ||
| 244 | dev_err(&pdev->dev, "no memory resource defined\n"); | ||
| 245 | return -ENODEV; | ||
| 246 | } | ||
| 247 | |||
| 248 | pc->mmio_base = devm_ioremap_resource(&pdev->dev, r); | 243 | pc->mmio_base = devm_ioremap_resource(&pdev->dev, r); |
| 249 | if (IS_ERR(pc->mmio_base)) | 244 | if (IS_ERR(pc->mmio_base)) |
| 250 | return PTR_ERR(pc->mmio_base); | 245 | return PTR_ERR(pc->mmio_base); |
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c index 6a217596942f..48a485c2e422 100644 --- a/drivers/pwm/pwm-tiehrpwm.c +++ b/drivers/pwm/pwm-tiehrpwm.c | |||
| @@ -471,11 +471,6 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev) | |||
| 471 | pc->chip.npwm = NUM_PWM_CHANNEL; | 471 | pc->chip.npwm = NUM_PWM_CHANNEL; |
| 472 | 472 | ||
| 473 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 473 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 474 | if (!r) { | ||
| 475 | dev_err(&pdev->dev, "no memory resource defined\n"); | ||
| 476 | return -ENODEV; | ||
| 477 | } | ||
| 478 | |||
| 479 | pc->mmio_base = devm_ioremap_resource(&pdev->dev, r); | 474 | pc->mmio_base = devm_ioremap_resource(&pdev->dev, r); |
| 480 | if (IS_ERR(pc->mmio_base)) | 475 | if (IS_ERR(pc->mmio_base)) |
| 481 | return PTR_ERR(pc->mmio_base); | 476 | return PTR_ERR(pc->mmio_base); |
diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/pwm/pwm-tipwmss.c index c9c3d3a1e0eb..3b119bc2c3c6 100644 --- a/drivers/pwm/pwm-tipwmss.c +++ b/drivers/pwm/pwm-tipwmss.c | |||
| @@ -70,11 +70,6 @@ static int pwmss_probe(struct platform_device *pdev) | |||
| 70 | mutex_init(&info->pwmss_lock); | 70 | mutex_init(&info->pwmss_lock); |
| 71 | 71 | ||
| 72 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 72 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 73 | if (!r) { | ||
| 74 | dev_err(&pdev->dev, "no memory resource defined\n"); | ||
| 75 | return -ENODEV; | ||
| 76 | } | ||
| 77 | |||
| 78 | info->mmio_base = devm_ioremap_resource(&pdev->dev, r); | 73 | info->mmio_base = devm_ioremap_resource(&pdev->dev, r); |
| 79 | if (IS_ERR(info->mmio_base)) | 74 | if (IS_ERR(info->mmio_base)) |
| 80 | return PTR_ERR(info->mmio_base); | 75 | return PTR_ERR(info->mmio_base); |
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c index 69effd19afc7..323125abf3f4 100644 --- a/drivers/pwm/pwm-vt8500.c +++ b/drivers/pwm/pwm-vt8500.c | |||
| @@ -230,11 +230,6 @@ static int vt8500_pwm_probe(struct platform_device *pdev) | |||
| 230 | } | 230 | } |
| 231 | 231 | ||
| 232 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 232 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 233 | if (r == NULL) { | ||
| 234 | dev_err(&pdev->dev, "no memory resource defined\n"); | ||
| 235 | return -ENODEV; | ||
| 236 | } | ||
| 237 | |||
| 238 | chip->base = devm_ioremap_resource(&pdev->dev, r); | 233 | chip->base = devm_ioremap_resource(&pdev->dev, r); |
| 239 | if (IS_ERR(chip->base)) | 234 | if (IS_ERR(chip->base)) |
| 240 | return PTR_ERR(chip->base); | 235 | return PTR_ERR(chip->base); |
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 0c81915b1997..b9838130a7b0 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
| @@ -20,7 +20,6 @@ if RTC_CLASS | |||
| 20 | config RTC_HCTOSYS | 20 | config RTC_HCTOSYS |
| 21 | bool "Set system time from RTC on startup and resume" | 21 | bool "Set system time from RTC on startup and resume" |
| 22 | default y | 22 | default y |
| 23 | depends on !ALWAYS_USE_PERSISTENT_CLOCK | ||
| 24 | help | 23 | help |
| 25 | If you say yes here, the system time (wall clock) will be set using | 24 | If you say yes here, the system time (wall clock) will be set using |
| 26 | the value read from a specified RTC device. This is useful to avoid | 25 | the value read from a specified RTC device. This is useful to avoid |
| @@ -29,7 +28,6 @@ config RTC_HCTOSYS | |||
| 29 | config RTC_SYSTOHC | 28 | config RTC_SYSTOHC |
| 30 | bool "Set the RTC time based on NTP synchronization" | 29 | bool "Set the RTC time based on NTP synchronization" |
| 31 | default y | 30 | default y |
| 32 | depends on !ALWAYS_USE_PERSISTENT_CLOCK | ||
| 33 | help | 31 | help |
| 34 | If you say yes here, the system time (wall clock) will be stored | 32 | If you say yes here, the system time (wall clock) will be stored |
| 35 | in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11 | 33 | in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11 |
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c index f5dfb6e5e7d9..d592e2fe43f7 100644 --- a/drivers/rtc/rtc-nuc900.c +++ b/drivers/rtc/rtc-nuc900.c | |||
| @@ -234,11 +234,6 @@ static int __init nuc900_rtc_probe(struct platform_device *pdev) | |||
| 234 | return -ENOMEM; | 234 | return -ENOMEM; |
| 235 | } | 235 | } |
| 236 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 236 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 237 | if (!res) { | ||
| 238 | dev_err(&pdev->dev, "platform_get_resource failed\n"); | ||
| 239 | return -ENXIO; | ||
| 240 | } | ||
| 241 | |||
| 242 | nuc900_rtc->rtc_reg = devm_ioremap_resource(&pdev->dev, res); | 237 | nuc900_rtc->rtc_reg = devm_ioremap_resource(&pdev->dev, res); |
| 243 | if (IS_ERR(nuc900_rtc->rtc_reg)) | 238 | if (IS_ERR(nuc900_rtc->rtc_reg)) |
| 244 | return PTR_ERR(nuc900_rtc->rtc_reg); | 239 | return PTR_ERR(nuc900_rtc->rtc_reg); |
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index 4e1bdb832e37..b0ba3fc991ea 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c | |||
| @@ -347,11 +347,6 @@ static int __init omap_rtc_probe(struct platform_device *pdev) | |||
| 347 | } | 347 | } |
| 348 | 348 | ||
| 349 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 349 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 350 | if (!res) { | ||
| 351 | pr_debug("%s: RTC resource data missing\n", pdev->name); | ||
| 352 | return -ENOENT; | ||
| 353 | } | ||
| 354 | |||
| 355 | rtc_base = devm_ioremap_resource(&pdev->dev, res); | 350 | rtc_base = devm_ioremap_resource(&pdev->dev, res); |
| 356 | if (IS_ERR(rtc_base)) | 351 | if (IS_ERR(rtc_base)) |
| 357 | return PTR_ERR(rtc_base); | 352 | return PTR_ERR(rtc_base); |
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 14040b22888d..0b495e8b8e66 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
| @@ -477,11 +477,6 @@ static int s3c_rtc_probe(struct platform_device *pdev) | |||
| 477 | /* get the memory region */ | 477 | /* get the memory region */ |
| 478 | 478 | ||
| 479 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 479 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 480 | if (res == NULL) { | ||
| 481 | dev_err(&pdev->dev, "failed to get memory region resource\n"); | ||
| 482 | return -ENOENT; | ||
| 483 | } | ||
| 484 | |||
| 485 | s3c_rtc_base = devm_ioremap_resource(&pdev->dev, res); | 480 | s3c_rtc_base = devm_ioremap_resource(&pdev->dev, res); |
| 486 | if (IS_ERR(s3c_rtc_base)) | 481 | if (IS_ERR(s3c_rtc_base)) |
| 487 | return PTR_ERR(s3c_rtc_base); | 482 | return PTR_ERR(s3c_rtc_base); |
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c index a34315d25478..76af92ad5a8a 100644 --- a/drivers/rtc/rtc-tegra.c +++ b/drivers/rtc/rtc-tegra.c | |||
| @@ -322,12 +322,6 @@ static int __init tegra_rtc_probe(struct platform_device *pdev) | |||
| 322 | return -ENOMEM; | 322 | return -ENOMEM; |
| 323 | 323 | ||
| 324 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 324 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 325 | if (!res) { | ||
| 326 | dev_err(&pdev->dev, | ||
| 327 | "Unable to allocate resources for device.\n"); | ||
| 328 | return -EBUSY; | ||
| 329 | } | ||
| 330 | |||
| 331 | info->rtc_base = devm_ioremap_resource(&pdev->dev, res); | 325 | info->rtc_base = devm_ioremap_resource(&pdev->dev, res); |
| 332 | if (IS_ERR(info->rtc_base)) | 326 | if (IS_ERR(info->rtc_base)) |
| 333 | return PTR_ERR(info->rtc_base); | 327 | return PTR_ERR(info->rtc_base); |
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 787bd2c22bca..380387a47b1d 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c | |||
| @@ -526,13 +526,17 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master, | |||
| 526 | } | 526 | } |
| 527 | 527 | ||
| 528 | if (xfer->tx_buf) | 528 | if (xfer->tx_buf) |
| 529 | spi_writel(as, TDR, *(u8 *)(xfer->tx_buf)); | 529 | if (xfer->bits_per_word > 8) |
| 530 | spi_writel(as, TDR, *(u16 *)(xfer->tx_buf)); | ||
| 531 | else | ||
| 532 | spi_writel(as, TDR, *(u8 *)(xfer->tx_buf)); | ||
| 530 | else | 533 | else |
| 531 | spi_writel(as, TDR, 0); | 534 | spi_writel(as, TDR, 0); |
| 532 | 535 | ||
| 533 | dev_dbg(master->dev.parent, | 536 | dev_dbg(master->dev.parent, |
| 534 | " start pio xfer %p: len %u tx %p rx %p\n", | 537 | " start pio xfer %p: len %u tx %p rx %p bitpw %d\n", |
| 535 | xfer, xfer->len, xfer->tx_buf, xfer->rx_buf); | 538 | xfer, xfer->len, xfer->tx_buf, xfer->rx_buf, |
| 539 | xfer->bits_per_word); | ||
| 536 | 540 | ||
| 537 | /* Enable relevant interrupts */ | 541 | /* Enable relevant interrupts */ |
| 538 | spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES)); | 542 | spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES)); |
| @@ -950,21 +954,39 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer) | |||
| 950 | { | 954 | { |
| 951 | u8 *txp; | 955 | u8 *txp; |
| 952 | u8 *rxp; | 956 | u8 *rxp; |
| 957 | u16 *txp16; | ||
| 958 | u16 *rxp16; | ||
| 953 | unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; | 959 | unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; |
| 954 | 960 | ||
| 955 | if (xfer->rx_buf) { | 961 | if (xfer->rx_buf) { |
| 956 | rxp = ((u8 *)xfer->rx_buf) + xfer_pos; | 962 | if (xfer->bits_per_word > 8) { |
| 957 | *rxp = spi_readl(as, RDR); | 963 | rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos); |
| 964 | *rxp16 = spi_readl(as, RDR); | ||
| 965 | } else { | ||
| 966 | rxp = ((u8 *)xfer->rx_buf) + xfer_pos; | ||
| 967 | *rxp = spi_readl(as, RDR); | ||
| 968 | } | ||
| 958 | } else { | 969 | } else { |
| 959 | spi_readl(as, RDR); | 970 | spi_readl(as, RDR); |
| 960 | } | 971 | } |
| 961 | 972 | if (xfer->bits_per_word > 8) { | |
| 962 | as->current_remaining_bytes--; | 973 | as->current_remaining_bytes -= 2; |
| 974 | if (as->current_remaining_bytes < 0) | ||
| 975 | as->current_remaining_bytes = 0; | ||
| 976 | } else { | ||
| 977 | as->current_remaining_bytes--; | ||
| 978 | } | ||
| 963 | 979 | ||
| 964 | if (as->current_remaining_bytes) { | 980 | if (as->current_remaining_bytes) { |
| 965 | if (xfer->tx_buf) { | 981 | if (xfer->tx_buf) { |
| 966 | txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1; | 982 | if (xfer->bits_per_word > 8) { |
| 967 | spi_writel(as, TDR, *txp); | 983 | txp16 = (u16 *)(((u8 *)xfer->tx_buf) |
| 984 | + xfer_pos + 2); | ||
| 985 | spi_writel(as, TDR, *txp16); | ||
| 986 | } else { | ||
| 987 | txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1; | ||
| 988 | spi_writel(as, TDR, *txp); | ||
| 989 | } | ||
| 968 | } else { | 990 | } else { |
| 969 | spi_writel(as, TDR, 0); | 991 | spi_writel(as, TDR, 0); |
| 970 | } | 992 | } |
| @@ -1378,9 +1400,16 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) | |||
| 1378 | } | 1400 | } |
| 1379 | } | 1401 | } |
| 1380 | 1402 | ||
| 1403 | if (xfer->bits_per_word > 8) { | ||
| 1404 | if (xfer->len % 2) { | ||
| 1405 | dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n"); | ||
| 1406 | return -EINVAL; | ||
| 1407 | } | ||
| 1408 | } | ||
| 1409 | |||
| 1381 | /* FIXME implement these protocol options!! */ | 1410 | /* FIXME implement these protocol options!! */ |
| 1382 | if (xfer->speed_hz) { | 1411 | if (xfer->speed_hz < spi->max_speed_hz) { |
| 1383 | dev_dbg(&spi->dev, "no protocol options yet\n"); | 1412 | dev_dbg(&spi->dev, "can't change speed in transfer\n"); |
| 1384 | return -ENOPROTOOPT; | 1413 | return -ENOPROTOOPT; |
| 1385 | } | 1414 | } |
| 1386 | 1415 | ||
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 2e8f24a1fb95..50b13c9b1ab6 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c | |||
| @@ -784,7 +784,7 @@ static const struct of_device_id davinci_spi_of_match[] = { | |||
| 784 | }, | 784 | }, |
| 785 | { }, | 785 | { }, |
| 786 | }; | 786 | }; |
| 787 | MODULE_DEVICE_TABLE(of, davini_spi_of_match); | 787 | MODULE_DEVICE_TABLE(of, davinci_spi_of_match); |
| 788 | 788 | ||
| 789 | /** | 789 | /** |
| 790 | * spi_davinci_get_pdata - Get platform data from DTS binding | 790 | * spi_davinci_get_pdata - Get platform data from DTS binding |
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c index d65c000efe35..09df8e22dba0 100644 --- a/drivers/spi/spi-tegra20-sflash.c +++ b/drivers/spi/spi-tegra20-sflash.c | |||
| @@ -489,11 +489,6 @@ static int tegra_sflash_probe(struct platform_device *pdev) | |||
| 489 | tegra_sflash_parse_dt(tsd); | 489 | tegra_sflash_parse_dt(tsd); |
| 490 | 490 | ||
| 491 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 491 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 492 | if (!r) { | ||
| 493 | dev_err(&pdev->dev, "No IO memory resource\n"); | ||
| 494 | ret = -ENODEV; | ||
| 495 | goto exit_free_master; | ||
| 496 | } | ||
| 497 | tsd->base = devm_ioremap_resource(&pdev->dev, r); | 492 | tsd->base = devm_ioremap_resource(&pdev->dev, r); |
| 498 | if (IS_ERR(tsd->base)) { | 493 | if (IS_ERR(tsd->base)) { |
| 499 | ret = PTR_ERR(tsd->base); | 494 | ret = PTR_ERR(tsd->base); |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 163fd802b7ac..32b7bb111eb6 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
| @@ -334,7 +334,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master) | |||
| 334 | spi->dev.parent = &master->dev; | 334 | spi->dev.parent = &master->dev; |
| 335 | spi->dev.bus = &spi_bus_type; | 335 | spi->dev.bus = &spi_bus_type; |
| 336 | spi->dev.release = spidev_release; | 336 | spi->dev.release = spidev_release; |
| 337 | spi->cs_gpio = -EINVAL; | 337 | spi->cs_gpio = -ENOENT; |
| 338 | device_initialize(&spi->dev); | 338 | device_initialize(&spi->dev); |
| 339 | return spi; | 339 | return spi; |
| 340 | } | 340 | } |
| @@ -1067,8 +1067,11 @@ static int of_spi_register_master(struct spi_master *master) | |||
| 1067 | nb = of_gpio_named_count(np, "cs-gpios"); | 1067 | nb = of_gpio_named_count(np, "cs-gpios"); |
| 1068 | master->num_chipselect = max(nb, (int)master->num_chipselect); | 1068 | master->num_chipselect = max(nb, (int)master->num_chipselect); |
| 1069 | 1069 | ||
| 1070 | if (nb < 1) | 1070 | /* Return error only for an incorrectly formed cs-gpios property */ |
| 1071 | if (nb == 0 || nb == -ENOENT) | ||
| 1071 | return 0; | 1072 | return 0; |
| 1073 | else if (nb < 0) | ||
| 1074 | return nb; | ||
| 1072 | 1075 | ||
| 1073 | cs = devm_kzalloc(&master->dev, | 1076 | cs = devm_kzalloc(&master->dev, |
| 1074 | sizeof(int) * master->num_chipselect, | 1077 | sizeof(int) * master->num_chipselect, |
| @@ -1079,7 +1082,7 @@ static int of_spi_register_master(struct spi_master *master) | |||
| 1079 | return -ENOMEM; | 1082 | return -ENOMEM; |
| 1080 | 1083 | ||
| 1081 | for (i = 0; i < master->num_chipselect; i++) | 1084 | for (i = 0; i < master->num_chipselect; i++) |
| 1082 | cs[i] = -EINVAL; | 1085 | cs[i] = -ENOENT; |
| 1083 | 1086 | ||
| 1084 | for (i = 0; i < nb; i++) | 1087 | for (i = 0; i < nb; i++) |
| 1085 | cs[i] = of_get_named_gpio(np, "cs-gpios", i); | 1088 | cs[i] = of_get_named_gpio(np, "cs-gpios", i); |
diff --git a/drivers/staging/dwc2/platform.c b/drivers/staging/dwc2/platform.c index 1f3d581a1078..b610960e93d3 100644 --- a/drivers/staging/dwc2/platform.c +++ b/drivers/staging/dwc2/platform.c | |||
| @@ -102,11 +102,6 @@ static int dwc2_driver_probe(struct platform_device *dev) | |||
| 102 | } | 102 | } |
| 103 | 103 | ||
| 104 | res = platform_get_resource(dev, IORESOURCE_MEM, 0); | 104 | res = platform_get_resource(dev, IORESOURCE_MEM, 0); |
| 105 | if (!res) { | ||
| 106 | dev_err(&dev->dev, "missing memory base resource\n"); | ||
| 107 | return -EINVAL; | ||
| 108 | } | ||
| 109 | |||
| 110 | hsotg->regs = devm_ioremap_resource(&dev->dev, res); | 105 | hsotg->regs = devm_ioremap_resource(&dev->dev, res); |
| 111 | if (IS_ERR(hsotg->regs)) | 106 | if (IS_ERR(hsotg->regs)) |
| 112 | return PTR_ERR(hsotg->regs); | 107 | return PTR_ERR(hsotg->regs); |
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c index a88959f9a07a..863b22e51b45 100644 --- a/drivers/staging/nvec/nvec.c +++ b/drivers/staging/nvec/nvec.c | |||
| @@ -800,11 +800,6 @@ static int tegra_nvec_probe(struct platform_device *pdev) | |||
| 800 | } | 800 | } |
| 801 | 801 | ||
| 802 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 802 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 803 | if (!res) { | ||
| 804 | dev_err(&pdev->dev, "no mem resource?\n"); | ||
| 805 | return -ENODEV; | ||
| 806 | } | ||
| 807 | |||
| 808 | base = devm_ioremap_resource(&pdev->dev, res); | 803 | base = devm_ioremap_resource(&pdev->dev, res); |
| 809 | if (IS_ERR(base)) | 804 | if (IS_ERR(base)) |
| 810 | return PTR_ERR(base); | 805 | return PTR_ERR(base); |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index ffbc6a94be52..262ef1f23b38 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
| @@ -1250,7 +1250,7 @@ static u32 iscsit_do_crypto_hash_sg( | |||
| 1250 | 1250 | ||
| 1251 | static void iscsit_do_crypto_hash_buf( | 1251 | static void iscsit_do_crypto_hash_buf( |
| 1252 | struct hash_desc *hash, | 1252 | struct hash_desc *hash, |
| 1253 | unsigned char *buf, | 1253 | const void *buf, |
| 1254 | u32 payload_length, | 1254 | u32 payload_length, |
| 1255 | u32 padding, | 1255 | u32 padding, |
| 1256 | u8 *pad_bytes, | 1256 | u8 *pad_bytes, |
| @@ -2524,9 +2524,8 @@ static int iscsit_send_conn_drop_async_message( | |||
| 2524 | if (conn->conn_ops->HeaderDigest) { | 2524 | if (conn->conn_ops->HeaderDigest) { |
| 2525 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | 2525 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; |
| 2526 | 2526 | ||
| 2527 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, | 2527 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, |
| 2528 | (unsigned char *)hdr, ISCSI_HDR_LEN, | 2528 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); |
| 2529 | 0, NULL, (u8 *)header_digest); | ||
| 2530 | 2529 | ||
| 2531 | cmd->tx_size += ISCSI_CRC_LEN; | 2530 | cmd->tx_size += ISCSI_CRC_LEN; |
| 2532 | pr_debug("Attaching CRC32C HeaderDigest to" | 2531 | pr_debug("Attaching CRC32C HeaderDigest to" |
| @@ -2662,9 +2661,8 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
| 2662 | if (conn->conn_ops->HeaderDigest) { | 2661 | if (conn->conn_ops->HeaderDigest) { |
| 2663 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | 2662 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; |
| 2664 | 2663 | ||
| 2665 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, | 2664 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu, |
| 2666 | (unsigned char *)cmd->pdu, ISCSI_HDR_LEN, | 2665 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); |
| 2667 | 0, NULL, (u8 *)header_digest); | ||
| 2668 | 2666 | ||
| 2669 | iov[0].iov_len += ISCSI_CRC_LEN; | 2667 | iov[0].iov_len += ISCSI_CRC_LEN; |
| 2670 | tx_size += ISCSI_CRC_LEN; | 2668 | tx_size += ISCSI_CRC_LEN; |
| @@ -2841,9 +2839,8 @@ iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
| 2841 | if (conn->conn_ops->HeaderDigest) { | 2839 | if (conn->conn_ops->HeaderDigest) { |
| 2842 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | 2840 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; |
| 2843 | 2841 | ||
| 2844 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, | 2842 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0], |
| 2845 | (unsigned char *)&cmd->pdu[0], ISCSI_HDR_LEN, | 2843 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); |
| 2846 | 0, NULL, (u8 *)header_digest); | ||
| 2847 | 2844 | ||
| 2848 | iov[0].iov_len += ISCSI_CRC_LEN; | 2845 | iov[0].iov_len += ISCSI_CRC_LEN; |
| 2849 | tx_size += ISCSI_CRC_LEN; | 2846 | tx_size += ISCSI_CRC_LEN; |
| @@ -2900,9 +2897,8 @@ static int iscsit_send_unsolicited_nopin( | |||
| 2900 | if (conn->conn_ops->HeaderDigest) { | 2897 | if (conn->conn_ops->HeaderDigest) { |
| 2901 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | 2898 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; |
| 2902 | 2899 | ||
| 2903 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, | 2900 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, |
| 2904 | (unsigned char *)hdr, ISCSI_HDR_LEN, | 2901 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); |
| 2905 | 0, NULL, (u8 *)header_digest); | ||
| 2906 | 2902 | ||
| 2907 | tx_size += ISCSI_CRC_LEN; | 2903 | tx_size += ISCSI_CRC_LEN; |
| 2908 | pr_debug("Attaching CRC32C HeaderDigest to" | 2904 | pr_debug("Attaching CRC32C HeaderDigest to" |
| @@ -2949,9 +2945,8 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
| 2949 | if (conn->conn_ops->HeaderDigest) { | 2945 | if (conn->conn_ops->HeaderDigest) { |
| 2950 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | 2946 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; |
| 2951 | 2947 | ||
| 2952 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, | 2948 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, |
| 2953 | (unsigned char *)hdr, ISCSI_HDR_LEN, | 2949 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); |
| 2954 | 0, NULL, (u8 *)header_digest); | ||
| 2955 | 2950 | ||
| 2956 | iov[0].iov_len += ISCSI_CRC_LEN; | 2951 | iov[0].iov_len += ISCSI_CRC_LEN; |
| 2957 | tx_size += ISCSI_CRC_LEN; | 2952 | tx_size += ISCSI_CRC_LEN; |
| @@ -3040,9 +3035,8 @@ static int iscsit_send_r2t( | |||
| 3040 | if (conn->conn_ops->HeaderDigest) { | 3035 | if (conn->conn_ops->HeaderDigest) { |
| 3041 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | 3036 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; |
| 3042 | 3037 | ||
| 3043 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, | 3038 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, |
| 3044 | (unsigned char *)hdr, ISCSI_HDR_LEN, | 3039 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); |
| 3045 | 0, NULL, (u8 *)header_digest); | ||
| 3046 | 3040 | ||
| 3047 | cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; | 3041 | cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; |
| 3048 | tx_size += ISCSI_CRC_LEN; | 3042 | tx_size += ISCSI_CRC_LEN; |
| @@ -3256,9 +3250,8 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
| 3256 | if (conn->conn_ops->HeaderDigest) { | 3250 | if (conn->conn_ops->HeaderDigest) { |
| 3257 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | 3251 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; |
| 3258 | 3252 | ||
| 3259 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, | 3253 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu, |
| 3260 | (unsigned char *)cmd->pdu, ISCSI_HDR_LEN, | 3254 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); |
| 3261 | 0, NULL, (u8 *)header_digest); | ||
| 3262 | 3255 | ||
| 3263 | iov[0].iov_len += ISCSI_CRC_LEN; | 3256 | iov[0].iov_len += ISCSI_CRC_LEN; |
| 3264 | tx_size += ISCSI_CRC_LEN; | 3257 | tx_size += ISCSI_CRC_LEN; |
| @@ -3329,9 +3322,8 @@ iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
| 3329 | if (conn->conn_ops->HeaderDigest) { | 3322 | if (conn->conn_ops->HeaderDigest) { |
| 3330 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | 3323 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; |
| 3331 | 3324 | ||
| 3332 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, | 3325 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, |
| 3333 | (unsigned char *)hdr, ISCSI_HDR_LEN, | 3326 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); |
| 3334 | 0, NULL, (u8 *)header_digest); | ||
| 3335 | 3327 | ||
| 3336 | cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; | 3328 | cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; |
| 3337 | tx_size += ISCSI_CRC_LEN; | 3329 | tx_size += ISCSI_CRC_LEN; |
| @@ -3504,9 +3496,8 @@ static int iscsit_send_text_rsp( | |||
| 3504 | if (conn->conn_ops->HeaderDigest) { | 3496 | if (conn->conn_ops->HeaderDigest) { |
| 3505 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | 3497 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; |
| 3506 | 3498 | ||
| 3507 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, | 3499 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, |
| 3508 | (unsigned char *)hdr, ISCSI_HDR_LEN, | 3500 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); |
| 3509 | 0, NULL, (u8 *)header_digest); | ||
| 3510 | 3501 | ||
| 3511 | iov[0].iov_len += ISCSI_CRC_LEN; | 3502 | iov[0].iov_len += ISCSI_CRC_LEN; |
| 3512 | tx_size += ISCSI_CRC_LEN; | 3503 | tx_size += ISCSI_CRC_LEN; |
| @@ -3557,11 +3548,11 @@ static int iscsit_send_reject( | |||
| 3557 | struct iscsi_cmd *cmd, | 3548 | struct iscsi_cmd *cmd, |
| 3558 | struct iscsi_conn *conn) | 3549 | struct iscsi_conn *conn) |
| 3559 | { | 3550 | { |
| 3560 | u32 iov_count = 0, tx_size = 0; | 3551 | struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0]; |
| 3561 | struct iscsi_reject *hdr; | ||
| 3562 | struct kvec *iov; | 3552 | struct kvec *iov; |
| 3553 | u32 iov_count = 0, tx_size; | ||
| 3563 | 3554 | ||
| 3564 | iscsit_build_reject(cmd, conn, (struct iscsi_reject *)&cmd->pdu[0]); | 3555 | iscsit_build_reject(cmd, conn, hdr); |
| 3565 | 3556 | ||
| 3566 | iov = &cmd->iov_misc[0]; | 3557 | iov = &cmd->iov_misc[0]; |
| 3567 | iov[iov_count].iov_base = cmd->pdu; | 3558 | iov[iov_count].iov_base = cmd->pdu; |
| @@ -3574,9 +3565,8 @@ static int iscsit_send_reject( | |||
| 3574 | if (conn->conn_ops->HeaderDigest) { | 3565 | if (conn->conn_ops->HeaderDigest) { |
| 3575 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | 3566 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; |
| 3576 | 3567 | ||
| 3577 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, | 3568 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, |
| 3578 | (unsigned char *)hdr, ISCSI_HDR_LEN, | 3569 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); |
| 3579 | 0, NULL, (u8 *)header_digest); | ||
| 3580 | 3570 | ||
| 3581 | iov[0].iov_len += ISCSI_CRC_LEN; | 3571 | iov[0].iov_len += ISCSI_CRC_LEN; |
| 3582 | tx_size += ISCSI_CRC_LEN; | 3572 | tx_size += ISCSI_CRC_LEN; |
| @@ -3585,9 +3575,8 @@ static int iscsit_send_reject( | |||
| 3585 | } | 3575 | } |
| 3586 | 3576 | ||
| 3587 | if (conn->conn_ops->DataDigest) { | 3577 | if (conn->conn_ops->DataDigest) { |
| 3588 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, | 3578 | iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr, |
| 3589 | (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN, | 3579 | ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc); |
| 3590 | 0, NULL, (u8 *)&cmd->data_crc); | ||
| 3591 | 3580 | ||
| 3592 | iov[iov_count].iov_base = &cmd->data_crc; | 3581 | iov[iov_count].iov_base = &cmd->data_crc; |
| 3593 | iov[iov_count++].iov_len = ISCSI_CRC_LEN; | 3582 | iov[iov_count++].iov_len = ISCSI_CRC_LEN; |
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index 7816af6cdd12..40d9dbca987b 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c | |||
| @@ -823,7 +823,7 @@ static int iscsit_attach_ooo_cmdsn( | |||
| 823 | /* | 823 | /* |
| 824 | * CmdSN is greater than the tail of the list. | 824 | * CmdSN is greater than the tail of the list. |
| 825 | */ | 825 | */ |
| 826 | if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn) | 826 | if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn)) |
| 827 | list_add_tail(&ooo_cmdsn->ooo_list, | 827 | list_add_tail(&ooo_cmdsn->ooo_list, |
| 828 | &sess->sess_ooo_cmdsn_list); | 828 | &sess->sess_ooo_cmdsn_list); |
| 829 | else { | 829 | else { |
| @@ -833,11 +833,12 @@ static int iscsit_attach_ooo_cmdsn( | |||
| 833 | */ | 833 | */ |
| 834 | list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, | 834 | list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, |
| 835 | ooo_list) { | 835 | ooo_list) { |
| 836 | if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) | 836 | if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn)) |
| 837 | continue; | 837 | continue; |
| 838 | 838 | ||
| 839 | /* Insert before this entry */ | ||
| 839 | list_add(&ooo_cmdsn->ooo_list, | 840 | list_add(&ooo_cmdsn->ooo_list, |
| 840 | &ooo_tmp->ooo_list); | 841 | ooo_tmp->ooo_list.prev); |
| 841 | break; | 842 | break; |
| 842 | } | 843 | } |
| 843 | } | 844 | } |
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index f690be9e5293..c2185fc31136 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c | |||
| @@ -436,7 +436,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr) | |||
| 436 | /* | 436 | /* |
| 437 | * Extra parameters for ISER from RFC-5046 | 437 | * Extra parameters for ISER from RFC-5046 |
| 438 | */ | 438 | */ |
| 439 | param = iscsi_set_default_param(pl, RDMAEXTENTIONS, INITIAL_RDMAEXTENTIONS, | 439 | param = iscsi_set_default_param(pl, RDMAEXTENSIONS, INITIAL_RDMAEXTENSIONS, |
| 440 | PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, | 440 | PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, |
| 441 | TYPERANGE_BOOL_AND, USE_LEADING_ONLY); | 441 | TYPERANGE_BOOL_AND, USE_LEADING_ONLY); |
| 442 | if (!param) | 442 | if (!param) |
| @@ -529,7 +529,7 @@ int iscsi_set_keys_to_negotiate( | |||
| 529 | SET_PSTATE_NEGOTIATE(param); | 529 | SET_PSTATE_NEGOTIATE(param); |
| 530 | } else if (!strcmp(param->name, OFMARKINT)) { | 530 | } else if (!strcmp(param->name, OFMARKINT)) { |
| 531 | SET_PSTATE_NEGOTIATE(param); | 531 | SET_PSTATE_NEGOTIATE(param); |
| 532 | } else if (!strcmp(param->name, RDMAEXTENTIONS)) { | 532 | } else if (!strcmp(param->name, RDMAEXTENSIONS)) { |
| 533 | if (iser == true) | 533 | if (iser == true) |
| 534 | SET_PSTATE_NEGOTIATE(param); | 534 | SET_PSTATE_NEGOTIATE(param); |
| 535 | } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { | 535 | } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { |
| @@ -580,7 +580,7 @@ int iscsi_set_keys_irrelevant_for_discovery( | |||
| 580 | param->state &= ~PSTATE_NEGOTIATE; | 580 | param->state &= ~PSTATE_NEGOTIATE; |
| 581 | else if (!strcmp(param->name, OFMARKINT)) | 581 | else if (!strcmp(param->name, OFMARKINT)) |
| 582 | param->state &= ~PSTATE_NEGOTIATE; | 582 | param->state &= ~PSTATE_NEGOTIATE; |
| 583 | else if (!strcmp(param->name, RDMAEXTENTIONS)) | 583 | else if (!strcmp(param->name, RDMAEXTENSIONS)) |
| 584 | param->state &= ~PSTATE_NEGOTIATE; | 584 | param->state &= ~PSTATE_NEGOTIATE; |
| 585 | else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) | 585 | else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) |
| 586 | param->state &= ~PSTATE_NEGOTIATE; | 586 | param->state &= ~PSTATE_NEGOTIATE; |
| @@ -1977,7 +1977,7 @@ void iscsi_set_session_parameters( | |||
| 1977 | ops->SessionType = !strcmp(param->value, DISCOVERY); | 1977 | ops->SessionType = !strcmp(param->value, DISCOVERY); |
| 1978 | pr_debug("SessionType: %s\n", | 1978 | pr_debug("SessionType: %s\n", |
| 1979 | param->value); | 1979 | param->value); |
| 1980 | } else if (!strcmp(param->name, RDMAEXTENTIONS)) { | 1980 | } else if (!strcmp(param->name, RDMAEXTENSIONS)) { |
| 1981 | ops->RDMAExtensions = !strcmp(param->value, YES); | 1981 | ops->RDMAExtensions = !strcmp(param->value, YES); |
| 1982 | pr_debug("RDMAExtensions: %s\n", | 1982 | pr_debug("RDMAExtensions: %s\n", |
| 1983 | param->value); | 1983 | param->value); |
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h index f31b9c4b83f2..915b06798505 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.h +++ b/drivers/target/iscsi/iscsi_target_parameters.h | |||
| @@ -91,7 +91,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *, | |||
| 91 | /* | 91 | /* |
| 92 | * Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046 | 92 | * Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046 |
| 93 | */ | 93 | */ |
| 94 | #define RDMAEXTENTIONS "RDMAExtensions" | 94 | #define RDMAEXTENSIONS "RDMAExtensions" |
| 95 | #define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength" | 95 | #define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength" |
| 96 | #define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength" | 96 | #define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength" |
| 97 | 97 | ||
| @@ -142,7 +142,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *, | |||
| 142 | /* | 142 | /* |
| 143 | * Initial values for iSER parameters following RFC-5046 Section 6 | 143 | * Initial values for iSER parameters following RFC-5046 Section 6 |
| 144 | */ | 144 | */ |
| 145 | #define INITIAL_RDMAEXTENTIONS NO | 145 | #define INITIAL_RDMAEXTENSIONS NO |
| 146 | #define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144" | 146 | #define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144" |
| 147 | #define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192" | 147 | #define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192" |
| 148 | 148 | ||
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 43b7ac6c5b1c..4a8bd36d3958 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
| @@ -1584,6 +1584,13 @@ static struct target_core_configfs_attribute target_core_attr_dev_udev_path = { | |||
| 1584 | .store = target_core_store_dev_udev_path, | 1584 | .store = target_core_store_dev_udev_path, |
| 1585 | }; | 1585 | }; |
| 1586 | 1586 | ||
| 1587 | static ssize_t target_core_show_dev_enable(void *p, char *page) | ||
| 1588 | { | ||
| 1589 | struct se_device *dev = p; | ||
| 1590 | |||
| 1591 | return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED)); | ||
| 1592 | } | ||
| 1593 | |||
| 1587 | static ssize_t target_core_store_dev_enable( | 1594 | static ssize_t target_core_store_dev_enable( |
| 1588 | void *p, | 1595 | void *p, |
| 1589 | const char *page, | 1596 | const char *page, |
| @@ -1609,8 +1616,8 @@ static ssize_t target_core_store_dev_enable( | |||
| 1609 | static struct target_core_configfs_attribute target_core_attr_dev_enable = { | 1616 | static struct target_core_configfs_attribute target_core_attr_dev_enable = { |
| 1610 | .attr = { .ca_owner = THIS_MODULE, | 1617 | .attr = { .ca_owner = THIS_MODULE, |
| 1611 | .ca_name = "enable", | 1618 | .ca_name = "enable", |
| 1612 | .ca_mode = S_IWUSR }, | 1619 | .ca_mode = S_IRUGO | S_IWUSR }, |
| 1613 | .show = NULL, | 1620 | .show = target_core_show_dev_enable, |
| 1614 | .store = target_core_store_dev_enable, | 1621 | .store = target_core_store_dev_enable, |
| 1615 | }; | 1622 | }; |
| 1616 | 1623 | ||
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 2e4d655471bc..4630481b6043 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
| @@ -68,7 +68,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) | |||
| 68 | struct se_dev_entry *deve = se_cmd->se_deve; | 68 | struct se_dev_entry *deve = se_cmd->se_deve; |
| 69 | 69 | ||
| 70 | deve->total_cmds++; | 70 | deve->total_cmds++; |
| 71 | deve->total_bytes += se_cmd->data_length; | ||
| 72 | 71 | ||
| 73 | if ((se_cmd->data_direction == DMA_TO_DEVICE) && | 72 | if ((se_cmd->data_direction == DMA_TO_DEVICE) && |
| 74 | (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { | 73 | (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { |
| @@ -85,8 +84,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) | |||
| 85 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) | 84 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) |
| 86 | deve->read_bytes += se_cmd->data_length; | 85 | deve->read_bytes += se_cmd->data_length; |
| 87 | 86 | ||
| 88 | deve->deve_cmds++; | ||
| 89 | |||
| 90 | se_lun = deve->se_lun; | 87 | se_lun = deve->se_lun; |
| 91 | se_cmd->se_lun = deve->se_lun; | 88 | se_cmd->se_lun = deve->se_lun; |
| 92 | se_cmd->pr_res_key = deve->pr_res_key; | 89 | se_cmd->pr_res_key = deve->pr_res_key; |
| @@ -275,17 +272,6 @@ int core_free_device_list_for_node( | |||
| 275 | return 0; | 272 | return 0; |
| 276 | } | 273 | } |
| 277 | 274 | ||
| 278 | void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) | ||
| 279 | { | ||
| 280 | struct se_dev_entry *deve; | ||
| 281 | unsigned long flags; | ||
| 282 | |||
| 283 | spin_lock_irqsave(&se_nacl->device_list_lock, flags); | ||
| 284 | deve = se_nacl->device_list[se_cmd->orig_fe_lun]; | ||
| 285 | deve->deve_cmds--; | ||
| 286 | spin_unlock_irqrestore(&se_nacl->device_list_lock, flags); | ||
| 287 | } | ||
| 288 | |||
| 289 | void core_update_device_list_access( | 275 | void core_update_device_list_access( |
| 290 | u32 mapped_lun, | 276 | u32 mapped_lun, |
| 291 | u32 lun_access, | 277 | u32 lun_access, |
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 58ed683e04ae..1b1d544e927a 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
| @@ -153,10 +153,6 @@ static int fd_configure_device(struct se_device *dev) | |||
| 153 | struct request_queue *q = bdev_get_queue(inode->i_bdev); | 153 | struct request_queue *q = bdev_get_queue(inode->i_bdev); |
| 154 | unsigned long long dev_size; | 154 | unsigned long long dev_size; |
| 155 | 155 | ||
| 156 | dev->dev_attrib.hw_block_size = | ||
| 157 | bdev_logical_block_size(inode->i_bdev); | ||
| 158 | dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); | ||
| 159 | |||
| 160 | /* | 156 | /* |
| 161 | * Determine the number of bytes from i_size_read() minus | 157 | * Determine the number of bytes from i_size_read() minus |
| 162 | * one (1) logical sector from underlying struct block_device | 158 | * one (1) logical sector from underlying struct block_device |
| @@ -203,9 +199,6 @@ static int fd_configure_device(struct se_device *dev) | |||
| 203 | goto fail; | 199 | goto fail; |
| 204 | } | 200 | } |
| 205 | 201 | ||
| 206 | dev->dev_attrib.hw_block_size = FD_BLOCKSIZE; | ||
| 207 | dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; | ||
| 208 | |||
| 209 | /* | 202 | /* |
| 210 | * Limit UNMAP emulation to 8k Number of LBAs (NoLB) | 203 | * Limit UNMAP emulation to 8k Number of LBAs (NoLB) |
| 211 | */ | 204 | */ |
| @@ -226,6 +219,8 @@ static int fd_configure_device(struct se_device *dev) | |||
| 226 | 219 | ||
| 227 | fd_dev->fd_block_size = dev->dev_attrib.hw_block_size; | 220 | fd_dev->fd_block_size = dev->dev_attrib.hw_block_size; |
| 228 | 221 | ||
| 222 | dev->dev_attrib.hw_block_size = FD_BLOCKSIZE; | ||
| 223 | dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; | ||
| 229 | dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; | 224 | dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; |
| 230 | 225 | ||
| 231 | if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { | 226 | if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 07f5f94634bb..aa1620abec6d 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
| @@ -615,6 +615,8 @@ iblock_execute_rw(struct se_cmd *cmd) | |||
| 615 | rw = WRITE_FUA; | 615 | rw = WRITE_FUA; |
| 616 | else if (!(q->flush_flags & REQ_FLUSH)) | 616 | else if (!(q->flush_flags & REQ_FLUSH)) |
| 617 | rw = WRITE_FUA; | 617 | rw = WRITE_FUA; |
| 618 | else | ||
| 619 | rw = WRITE; | ||
| 618 | } else { | 620 | } else { |
| 619 | rw = WRITE; | 621 | rw = WRITE; |
| 620 | } | 622 | } |
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 853bab60e362..18d49df4d0ac 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h | |||
| @@ -8,7 +8,6 @@ extern struct t10_alua_lu_gp *default_lu_gp; | |||
| 8 | struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); | 8 | struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); |
| 9 | int core_free_device_list_for_node(struct se_node_acl *, | 9 | int core_free_device_list_for_node(struct se_node_acl *, |
| 10 | struct se_portal_group *); | 10 | struct se_portal_group *); |
| 11 | void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *); | ||
| 12 | void core_update_device_list_access(u32, u32, struct se_node_acl *); | 11 | void core_update_device_list_access(u32, u32, struct se_node_acl *); |
| 13 | int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *, | 12 | int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *, |
| 14 | u32, u32, struct se_node_acl *, struct se_portal_group *); | 13 | u32, u32, struct se_node_acl *, struct se_portal_group *); |
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index e0b3c379aa14..0921a64b5550 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
| @@ -291,6 +291,11 @@ rd_execute_rw(struct se_cmd *cmd) | |||
| 291 | u32 src_len; | 291 | u32 src_len; |
| 292 | u64 tmp; | 292 | u64 tmp; |
| 293 | 293 | ||
| 294 | if (dev->rd_flags & RDF_NULLIO) { | ||
| 295 | target_complete_cmd(cmd, SAM_STAT_GOOD); | ||
| 296 | return 0; | ||
| 297 | } | ||
| 298 | |||
| 294 | tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; | 299 | tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; |
| 295 | rd_offset = do_div(tmp, PAGE_SIZE); | 300 | rd_offset = do_div(tmp, PAGE_SIZE); |
| 296 | rd_page = tmp; | 301 | rd_page = tmp; |
| @@ -373,11 +378,12 @@ rd_execute_rw(struct se_cmd *cmd) | |||
| 373 | } | 378 | } |
| 374 | 379 | ||
| 375 | enum { | 380 | enum { |
| 376 | Opt_rd_pages, Opt_err | 381 | Opt_rd_pages, Opt_rd_nullio, Opt_err |
| 377 | }; | 382 | }; |
| 378 | 383 | ||
| 379 | static match_table_t tokens = { | 384 | static match_table_t tokens = { |
| 380 | {Opt_rd_pages, "rd_pages=%d"}, | 385 | {Opt_rd_pages, "rd_pages=%d"}, |
| 386 | {Opt_rd_nullio, "rd_nullio=%d"}, | ||
| 381 | {Opt_err, NULL} | 387 | {Opt_err, NULL} |
| 382 | }; | 388 | }; |
| 383 | 389 | ||
| @@ -408,6 +414,14 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev, | |||
| 408 | " Count: %u\n", rd_dev->rd_page_count); | 414 | " Count: %u\n", rd_dev->rd_page_count); |
| 409 | rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; | 415 | rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; |
| 410 | break; | 416 | break; |
| 417 | case Opt_rd_nullio: | ||
| 418 | match_int(args, &arg); | ||
| 419 | if (arg != 1) | ||
| 420 | break; | ||
| 421 | |||
| 422 | pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg); | ||
| 423 | rd_dev->rd_flags |= RDF_NULLIO; | ||
| 424 | break; | ||
| 411 | default: | 425 | default: |
| 412 | break; | 426 | break; |
| 413 | } | 427 | } |
| @@ -424,8 +438,9 @@ static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b) | |||
| 424 | ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", | 438 | ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", |
| 425 | rd_dev->rd_dev_id); | 439 | rd_dev->rd_dev_id); |
| 426 | bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" | 440 | bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" |
| 427 | " SG_table_count: %u\n", rd_dev->rd_page_count, | 441 | " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count, |
| 428 | PAGE_SIZE, rd_dev->sg_table_count); | 442 | PAGE_SIZE, rd_dev->sg_table_count, |
| 443 | !!(rd_dev->rd_flags & RDF_NULLIO)); | ||
| 429 | return bl; | 444 | return bl; |
| 430 | } | 445 | } |
| 431 | 446 | ||
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 933b38b6e563..1789d1e14395 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h | |||
| @@ -22,6 +22,7 @@ struct rd_dev_sg_table { | |||
| 22 | } ____cacheline_aligned; | 22 | } ____cacheline_aligned; |
| 23 | 23 | ||
| 24 | #define RDF_HAS_PAGE_COUNT 0x01 | 24 | #define RDF_HAS_PAGE_COUNT 0x01 |
| 25 | #define RDF_NULLIO 0x02 | ||
| 25 | 26 | ||
| 26 | struct rd_dev { | 27 | struct rd_dev { |
| 27 | struct se_device dev; | 28 | struct se_device dev; |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index f8388b4024aa..4a793362309d 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
| @@ -2163,8 +2163,6 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) | |||
| 2163 | if (wait_for_tasks) | 2163 | if (wait_for_tasks) |
| 2164 | transport_wait_for_tasks(cmd); | 2164 | transport_wait_for_tasks(cmd); |
| 2165 | 2165 | ||
| 2166 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); | ||
| 2167 | |||
| 2168 | if (cmd->se_lun) | 2166 | if (cmd->se_lun) |
| 2169 | transport_lun_remove_cmd(cmd); | 2167 | transport_lun_remove_cmd(cmd); |
| 2170 | 2168 | ||
| @@ -2213,21 +2211,19 @@ static void target_release_cmd_kref(struct kref *kref) | |||
| 2213 | { | 2211 | { |
| 2214 | struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); | 2212 | struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); |
| 2215 | struct se_session *se_sess = se_cmd->se_sess; | 2213 | struct se_session *se_sess = se_cmd->se_sess; |
| 2216 | unsigned long flags; | ||
| 2217 | 2214 | ||
| 2218 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | ||
| 2219 | if (list_empty(&se_cmd->se_cmd_list)) { | 2215 | if (list_empty(&se_cmd->se_cmd_list)) { |
| 2220 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2216 | spin_unlock(&se_sess->sess_cmd_lock); |
| 2221 | se_cmd->se_tfo->release_cmd(se_cmd); | 2217 | se_cmd->se_tfo->release_cmd(se_cmd); |
| 2222 | return; | 2218 | return; |
| 2223 | } | 2219 | } |
| 2224 | if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { | 2220 | if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { |
| 2225 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2221 | spin_unlock(&se_sess->sess_cmd_lock); |
| 2226 | complete(&se_cmd->cmd_wait_comp); | 2222 | complete(&se_cmd->cmd_wait_comp); |
| 2227 | return; | 2223 | return; |
| 2228 | } | 2224 | } |
| 2229 | list_del(&se_cmd->se_cmd_list); | 2225 | list_del(&se_cmd->se_cmd_list); |
| 2230 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2226 | spin_unlock(&se_sess->sess_cmd_lock); |
| 2231 | 2227 | ||
| 2232 | se_cmd->se_tfo->release_cmd(se_cmd); | 2228 | se_cmd->se_tfo->release_cmd(se_cmd); |
| 2233 | } | 2229 | } |
| @@ -2238,7 +2234,8 @@ static void target_release_cmd_kref(struct kref *kref) | |||
| 2238 | */ | 2234 | */ |
| 2239 | int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) | 2235 | int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) |
| 2240 | { | 2236 | { |
| 2241 | return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); | 2237 | return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, |
| 2238 | &se_sess->sess_cmd_lock); | ||
| 2242 | } | 2239 | } |
| 2243 | EXPORT_SYMBOL(target_put_sess_cmd); | 2240 | EXPORT_SYMBOL(target_put_sess_cmd); |
| 2244 | 2241 | ||
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c index 5b4d75fd7b49..54ffd64ca3f7 100644 --- a/drivers/thermal/armada_thermal.c +++ b/drivers/thermal/armada_thermal.c | |||
| @@ -169,21 +169,11 @@ static int armada_thermal_probe(struct platform_device *pdev) | |||
| 169 | return -ENOMEM; | 169 | return -ENOMEM; |
| 170 | 170 | ||
| 171 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 171 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 172 | if (!res) { | ||
| 173 | dev_err(&pdev->dev, "Failed to get platform resource\n"); | ||
| 174 | return -ENODEV; | ||
| 175 | } | ||
| 176 | |||
| 177 | priv->sensor = devm_ioremap_resource(&pdev->dev, res); | 172 | priv->sensor = devm_ioremap_resource(&pdev->dev, res); |
| 178 | if (IS_ERR(priv->sensor)) | 173 | if (IS_ERR(priv->sensor)) |
| 179 | return PTR_ERR(priv->sensor); | 174 | return PTR_ERR(priv->sensor); |
| 180 | 175 | ||
| 181 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 176 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
| 182 | if (!res) { | ||
| 183 | dev_err(&pdev->dev, "Failed to get platform resource\n"); | ||
| 184 | return -ENODEV; | ||
| 185 | } | ||
| 186 | |||
| 187 | priv->control = devm_ioremap_resource(&pdev->dev, res); | 177 | priv->control = devm_ioremap_resource(&pdev->dev, res); |
| 188 | if (IS_ERR(priv->control)) | 178 | if (IS_ERR(priv->control)) |
| 189 | return PTR_ERR(priv->control); | 179 | return PTR_ERR(priv->control); |
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c index 4b15a5f270dc..a088d1365ca5 100644 --- a/drivers/thermal/dove_thermal.c +++ b/drivers/thermal/dove_thermal.c | |||
| @@ -149,10 +149,6 @@ static int dove_thermal_probe(struct platform_device *pdev) | |||
| 149 | return PTR_ERR(priv->sensor); | 149 | return PTR_ERR(priv->sensor); |
| 150 | 150 | ||
| 151 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 151 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
| 152 | if (!res) { | ||
| 153 | dev_err(&pdev->dev, "Failed to get platform resource\n"); | ||
| 154 | return -ENODEV; | ||
| 155 | } | ||
| 156 | priv->control = devm_ioremap_resource(&pdev->dev, res); | 152 | priv->control = devm_ioremap_resource(&pdev->dev, res); |
| 157 | if (IS_ERR(priv->control)) | 153 | if (IS_ERR(priv->control)) |
| 158 | return PTR_ERR(priv->control); | 154 | return PTR_ERR(priv->control); |
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c index d20ce9e61403..788b1ddcac6c 100644 --- a/drivers/thermal/exynos_thermal.c +++ b/drivers/thermal/exynos_thermal.c | |||
| @@ -925,11 +925,6 @@ static int exynos_tmu_probe(struct platform_device *pdev) | |||
| 925 | INIT_WORK(&data->irq_work, exynos_tmu_work); | 925 | INIT_WORK(&data->irq_work, exynos_tmu_work); |
| 926 | 926 | ||
| 927 | data->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 927 | data->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 928 | if (!data->mem) { | ||
| 929 | dev_err(&pdev->dev, "Failed to get platform resource\n"); | ||
| 930 | return -ENOENT; | ||
| 931 | } | ||
| 932 | |||
| 933 | data->base = devm_ioremap_resource(&pdev->dev, data->mem); | 928 | data->base = devm_ioremap_resource(&pdev->dev, data->mem); |
| 934 | if (IS_ERR(data->base)) | 929 | if (IS_ERR(data->base)) |
| 935 | return PTR_ERR(data->base); | 930 | return PTR_ERR(data->base); |
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 30d4f7a783cd..f0b9f6b52b32 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c | |||
| @@ -202,26 +202,6 @@ static int serial_omap_get_context_loss_count(struct uart_omap_port *up) | |||
| 202 | return pdata->get_context_loss_count(up->dev); | 202 | return pdata->get_context_loss_count(up->dev); |
| 203 | } | 203 | } |
| 204 | 204 | ||
| 205 | static void serial_omap_set_forceidle(struct uart_omap_port *up) | ||
| 206 | { | ||
| 207 | struct omap_uart_port_info *pdata = up->dev->platform_data; | ||
| 208 | |||
| 209 | if (!pdata || !pdata->set_forceidle) | ||
| 210 | return; | ||
| 211 | |||
| 212 | pdata->set_forceidle(up->dev); | ||
| 213 | } | ||
| 214 | |||
| 215 | static void serial_omap_set_noidle(struct uart_omap_port *up) | ||
| 216 | { | ||
| 217 | struct omap_uart_port_info *pdata = up->dev->platform_data; | ||
| 218 | |||
| 219 | if (!pdata || !pdata->set_noidle) | ||
| 220 | return; | ||
| 221 | |||
| 222 | pdata->set_noidle(up->dev); | ||
| 223 | } | ||
| 224 | |||
| 225 | static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable) | 205 | static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable) |
| 226 | { | 206 | { |
| 227 | struct omap_uart_port_info *pdata = up->dev->platform_data; | 207 | struct omap_uart_port_info *pdata = up->dev->platform_data; |
| @@ -298,8 +278,6 @@ static void serial_omap_stop_tx(struct uart_port *port) | |||
| 298 | serial_out(up, UART_IER, up->ier); | 278 | serial_out(up, UART_IER, up->ier); |
| 299 | } | 279 | } |
| 300 | 280 | ||
| 301 | serial_omap_set_forceidle(up); | ||
| 302 | |||
| 303 | pm_runtime_mark_last_busy(up->dev); | 281 | pm_runtime_mark_last_busy(up->dev); |
| 304 | pm_runtime_put_autosuspend(up->dev); | 282 | pm_runtime_put_autosuspend(up->dev); |
| 305 | } | 283 | } |
| @@ -364,7 +342,6 @@ static void serial_omap_start_tx(struct uart_port *port) | |||
| 364 | 342 | ||
| 365 | pm_runtime_get_sync(up->dev); | 343 | pm_runtime_get_sync(up->dev); |
| 366 | serial_omap_enable_ier_thri(up); | 344 | serial_omap_enable_ier_thri(up); |
| 367 | serial_omap_set_noidle(up); | ||
| 368 | pm_runtime_mark_last_busy(up->dev); | 345 | pm_runtime_mark_last_busy(up->dev); |
| 369 | pm_runtime_put_autosuspend(up->dev); | 346 | pm_runtime_put_autosuspend(up->dev); |
| 370 | } | 347 | } |
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 450107e5f657..49b098bedf9b 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c | |||
| @@ -370,11 +370,6 @@ static int ci_hdrc_probe(struct platform_device *pdev) | |||
| 370 | } | 370 | } |
| 371 | 371 | ||
| 372 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 372 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 373 | if (!res) { | ||
| 374 | dev_err(dev, "missing resource\n"); | ||
| 375 | return -ENODEV; | ||
| 376 | } | ||
| 377 | |||
| 378 | base = devm_ioremap_resource(dev, res); | 373 | base = devm_ioremap_resource(dev, res); |
| 379 | if (IS_ERR(base)) | 374 | if (IS_ERR(base)) |
| 380 | return PTR_ERR(base); | 375 | return PTR_ERR(base); |
diff --git a/drivers/usb/gadget/bcm63xx_udc.c b/drivers/usb/gadget/bcm63xx_udc.c index 6e6518264c42..792297798147 100644 --- a/drivers/usb/gadget/bcm63xx_udc.c +++ b/drivers/usb/gadget/bcm63xx_udc.c | |||
| @@ -2334,21 +2334,11 @@ static int bcm63xx_udc_probe(struct platform_device *pdev) | |||
| 2334 | } | 2334 | } |
| 2335 | 2335 | ||
| 2336 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2336 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 2337 | if (!res) { | ||
| 2338 | dev_err(dev, "error finding USBD resource\n"); | ||
| 2339 | return -ENXIO; | ||
| 2340 | } | ||
| 2341 | |||
| 2342 | udc->usbd_regs = devm_ioremap_resource(dev, res); | 2337 | udc->usbd_regs = devm_ioremap_resource(dev, res); |
| 2343 | if (IS_ERR(udc->usbd_regs)) | 2338 | if (IS_ERR(udc->usbd_regs)) |
| 2344 | return PTR_ERR(udc->usbd_regs); | 2339 | return PTR_ERR(udc->usbd_regs); |
| 2345 | 2340 | ||
| 2346 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 2341 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
| 2347 | if (!res) { | ||
| 2348 | dev_err(dev, "error finding IUDMA resource\n"); | ||
| 2349 | return -ENXIO; | ||
| 2350 | } | ||
| 2351 | |||
| 2352 | udc->iudma_regs = devm_ioremap_resource(dev, res); | 2342 | udc->iudma_regs = devm_ioremap_resource(dev, res); |
| 2353 | if (IS_ERR(udc->iudma_regs)) | 2343 | if (IS_ERR(udc->iudma_regs)) |
| 2354 | return PTR_ERR(udc->iudma_regs); | 2344 | return PTR_ERR(udc->iudma_regs); |
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c index f4988fbe78e7..f303cb04c2dd 100644 --- a/drivers/usb/host/ohci-nxp.c +++ b/drivers/usb/host/ohci-nxp.c | |||
| @@ -300,12 +300,6 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) | |||
| 300 | } | 300 | } |
| 301 | 301 | ||
| 302 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 302 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 303 | if (!res) { | ||
| 304 | dev_err(&pdev->dev, "Failed to get MEM resource\n"); | ||
| 305 | ret = -ENOMEM; | ||
| 306 | goto out8; | ||
| 307 | } | ||
| 308 | |||
| 309 | hcd->regs = devm_ioremap_resource(&pdev->dev, res); | 303 | hcd->regs = devm_ioremap_resource(&pdev->dev, res); |
| 310 | if (IS_ERR(hcd->regs)) { | 304 | if (IS_ERR(hcd->regs)) { |
| 311 | ret = PTR_ERR(hcd->regs); | 305 | ret = PTR_ERR(hcd->regs); |
diff --git a/drivers/usb/phy/phy-mv-u3d-usb.c b/drivers/usb/phy/phy-mv-u3d-usb.c index f7838a43347c..1568ea63e338 100644 --- a/drivers/usb/phy/phy-mv-u3d-usb.c +++ b/drivers/usb/phy/phy-mv-u3d-usb.c | |||
| @@ -278,11 +278,6 @@ static int mv_u3d_phy_probe(struct platform_device *pdev) | |||
| 278 | } | 278 | } |
| 279 | 279 | ||
| 280 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 280 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 281 | if (!res) { | ||
| 282 | dev_err(dev, "missing mem resource\n"); | ||
| 283 | return -ENODEV; | ||
| 284 | } | ||
| 285 | |||
| 286 | phy_base = devm_ioremap_resource(dev, res); | 281 | phy_base = devm_ioremap_resource(dev, res); |
| 287 | if (IS_ERR(phy_base)) | 282 | if (IS_ERR(phy_base)) |
| 288 | return PTR_ERR(phy_base); | 283 | return PTR_ERR(phy_base); |
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index 9d4381e64d51..eb25dd2a1429 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c | |||
| @@ -130,11 +130,6 @@ static int mxs_phy_probe(struct platform_device *pdev) | |||
| 130 | int ret; | 130 | int ret; |
| 131 | 131 | ||
| 132 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 132 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 133 | if (!res) { | ||
| 134 | dev_err(&pdev->dev, "can't get device resources\n"); | ||
| 135 | return -ENOENT; | ||
| 136 | } | ||
| 137 | |||
| 138 | base = devm_ioremap_resource(&pdev->dev, res); | 133 | base = devm_ioremap_resource(&pdev->dev, res); |
| 139 | if (IS_ERR(base)) | 134 | if (IS_ERR(base)) |
| 140 | return PTR_ERR(base); | 135 | return PTR_ERR(base); |
diff --git a/drivers/usb/phy/phy-samsung-usb2.c b/drivers/usb/phy/phy-samsung-usb2.c index 45ffe036dacc..9d5e273abcc7 100644 --- a/drivers/usb/phy/phy-samsung-usb2.c +++ b/drivers/usb/phy/phy-samsung-usb2.c | |||
| @@ -363,11 +363,6 @@ static int samsung_usb2phy_probe(struct platform_device *pdev) | |||
| 363 | int ret; | 363 | int ret; |
| 364 | 364 | ||
| 365 | phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 365 | phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 366 | if (!phy_mem) { | ||
| 367 | dev_err(dev, "%s: missing mem resource\n", __func__); | ||
| 368 | return -ENODEV; | ||
| 369 | } | ||
| 370 | |||
| 371 | phy_base = devm_ioremap_resource(dev, phy_mem); | 366 | phy_base = devm_ioremap_resource(dev, phy_mem); |
| 372 | if (IS_ERR(phy_base)) | 367 | if (IS_ERR(phy_base)) |
| 373 | return PTR_ERR(phy_base); | 368 | return PTR_ERR(phy_base); |
diff --git a/drivers/usb/phy/phy-samsung-usb3.c b/drivers/usb/phy/phy-samsung-usb3.c index 133f3d0c554f..5a9efcbcb532 100644 --- a/drivers/usb/phy/phy-samsung-usb3.c +++ b/drivers/usb/phy/phy-samsung-usb3.c | |||
| @@ -239,11 +239,6 @@ static int samsung_usb3phy_probe(struct platform_device *pdev) | |||
| 239 | int ret; | 239 | int ret; |
| 240 | 240 | ||
| 241 | phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 241 | phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 242 | if (!phy_mem) { | ||
| 243 | dev_err(dev, "%s: missing mem resource\n", __func__); | ||
| 244 | return -ENODEV; | ||
| 245 | } | ||
| 246 | |||
| 247 | phy_base = devm_ioremap_resource(dev, phy_mem); | 242 | phy_base = devm_ioremap_resource(dev, phy_mem); |
| 248 | if (IS_ERR(phy_base)) | 243 | if (IS_ERR(phy_base)) |
| 249 | return PTR_ERR(phy_base); | 244 | return PTR_ERR(phy_base); |
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index bff0775e258c..5174ebac288d 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | * | 3 | * |
| 4 | * Since these may be in userspace, we use (inline) accessors. | 4 | * Since these may be in userspace, we use (inline) accessors. |
| 5 | */ | 5 | */ |
| 6 | #include <linux/module.h> | ||
| 6 | #include <linux/vringh.h> | 7 | #include <linux/vringh.h> |
| 7 | #include <linux/virtio_ring.h> | 8 | #include <linux/virtio_ring.h> |
| 8 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
| @@ -1005,3 +1006,5 @@ int vringh_need_notify_kern(struct vringh *vrh) | |||
| 1005 | return __vringh_need_notify(vrh, getu16_kern); | 1006 | return __vringh_need_notify(vrh, getu16_kern); |
| 1006 | } | 1007 | } |
| 1007 | EXPORT_SYMBOL(vringh_need_notify_kern); | 1008 | EXPORT_SYMBOL(vringh_need_notify_kern); |
| 1009 | |||
| 1010 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c index 17f4d55c621c..a109934c0478 100644 --- a/drivers/video/omap2/dss/hdmi.c +++ b/drivers/video/omap2/dss/hdmi.c | |||
| @@ -1065,10 +1065,6 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev) | |||
| 1065 | mutex_init(&hdmi.ip_data.lock); | 1065 | mutex_init(&hdmi.ip_data.lock); |
| 1066 | 1066 | ||
| 1067 | res = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0); | 1067 | res = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0); |
| 1068 | if (!res) { | ||
| 1069 | DSSERR("can't get IORESOURCE_MEM HDMI\n"); | ||
| 1070 | return -EINVAL; | ||
| 1071 | } | ||
| 1072 | 1068 | ||
| 1073 | /* Base address taken from platform */ | 1069 | /* Base address taken from platform */ |
| 1074 | hdmi.ip_data.base_wp = devm_ioremap_resource(&pdev->dev, res); | 1070 | hdmi.ip_data.base_wp = devm_ioremap_resource(&pdev->dev, res); |
diff --git a/drivers/video/omap2/vrfb.c b/drivers/video/omap2/vrfb.c index 5261229c79af..f346b02eee1d 100644 --- a/drivers/video/omap2/vrfb.c +++ b/drivers/video/omap2/vrfb.c | |||
| @@ -353,11 +353,6 @@ static int __init vrfb_probe(struct platform_device *pdev) | |||
| 353 | /* first resource is the register res, the rest are vrfb contexts */ | 353 | /* first resource is the register res, the rest are vrfb contexts */ |
| 354 | 354 | ||
| 355 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 355 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 356 | if (!mem) { | ||
| 357 | dev_err(&pdev->dev, "can't get vrfb base address\n"); | ||
| 358 | return -EINVAL; | ||
| 359 | } | ||
| 360 | |||
| 361 | vrfb_base = devm_ioremap_resource(&pdev->dev, mem); | 356 | vrfb_base = devm_ioremap_resource(&pdev->dev, mem); |
| 362 | if (IS_ERR(vrfb_base)) | 357 | if (IS_ERR(vrfb_base)) |
| 363 | return PTR_ERR(vrfb_base); | 358 | return PTR_ERR(vrfb_base); |
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c index db2390aed387..6e94d8dd3d00 100644 --- a/drivers/w1/masters/omap_hdq.c +++ b/drivers/w1/masters/omap_hdq.c | |||
| @@ -555,11 +555,6 @@ static int omap_hdq_probe(struct platform_device *pdev) | |||
| 555 | platform_set_drvdata(pdev, hdq_data); | 555 | platform_set_drvdata(pdev, hdq_data); |
| 556 | 556 | ||
| 557 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 557 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 558 | if (!res) { | ||
| 559 | dev_dbg(&pdev->dev, "unable to get resource\n"); | ||
| 560 | return -ENXIO; | ||
| 561 | } | ||
| 562 | |||
| 563 | hdq_data->hdq_base = devm_ioremap_resource(dev, res); | 558 | hdq_data->hdq_base = devm_ioremap_resource(dev, res); |
| 564 | if (IS_ERR(hdq_data->hdq_base)) | 559 | if (IS_ERR(hdq_data->hdq_base)) |
| 565 | return PTR_ERR(hdq_data->hdq_base); | 560 | return PTR_ERR(hdq_data->hdq_base); |
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c index d184c48a0482..37cb09b27b63 100644 --- a/drivers/watchdog/ath79_wdt.c +++ b/drivers/watchdog/ath79_wdt.c | |||
| @@ -248,11 +248,6 @@ static int ath79_wdt_probe(struct platform_device *pdev) | |||
| 248 | return -EBUSY; | 248 | return -EBUSY; |
| 249 | 249 | ||
| 250 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 250 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 251 | if (!res) { | ||
| 252 | dev_err(&pdev->dev, "no memory resource found\n"); | ||
| 253 | return -EINVAL; | ||
| 254 | } | ||
| 255 | |||
| 256 | wdt_base = devm_ioremap_resource(&pdev->dev, res); | 251 | wdt_base = devm_ioremap_resource(&pdev->dev, res); |
| 257 | if (IS_ERR(wdt_base)) | 252 | if (IS_ERR(wdt_base)) |
| 258 | return PTR_ERR(wdt_base); | 253 | return PTR_ERR(wdt_base); |
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c index 100d4fbfde2a..bead7740c86a 100644 --- a/drivers/watchdog/davinci_wdt.c +++ b/drivers/watchdog/davinci_wdt.c | |||
| @@ -217,11 +217,6 @@ static int davinci_wdt_probe(struct platform_device *pdev) | |||
| 217 | dev_info(dev, "heartbeat %d sec\n", heartbeat); | 217 | dev_info(dev, "heartbeat %d sec\n", heartbeat); |
| 218 | 218 | ||
| 219 | wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 219 | wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 220 | if (wdt_mem == NULL) { | ||
| 221 | dev_err(dev, "failed to get memory region resource\n"); | ||
| 222 | return -ENOENT; | ||
| 223 | } | ||
| 224 | |||
| 225 | wdt_base = devm_ioremap_resource(dev, wdt_mem); | 220 | wdt_base = devm_ioremap_resource(dev, wdt_mem); |
| 226 | if (IS_ERR(wdt_base)) | 221 | if (IS_ERR(wdt_base)) |
| 227 | return PTR_ERR(wdt_base); | 222 | return PTR_ERR(wdt_base); |
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index ff908823688c..62946c2cb4f8 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c | |||
| @@ -257,11 +257,6 @@ static int __init imx2_wdt_probe(struct platform_device *pdev) | |||
| 257 | struct resource *res; | 257 | struct resource *res; |
| 258 | 258 | ||
| 259 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 259 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 260 | if (!res) { | ||
| 261 | dev_err(&pdev->dev, "can't get device resources\n"); | ||
| 262 | return -ENODEV; | ||
| 263 | } | ||
| 264 | |||
| 265 | imx2_wdt.base = devm_ioremap_resource(&pdev->dev, res); | 260 | imx2_wdt.base = devm_ioremap_resource(&pdev->dev, res); |
| 266 | if (IS_ERR(imx2_wdt.base)) | 261 | if (IS_ERR(imx2_wdt.base)) |
| 267 | return PTR_ERR(imx2_wdt.base); | 262 | return PTR_ERR(imx2_wdt.base); |
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index f03bf501527f..9e02d60a364b 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig | |||
| @@ -19,11 +19,10 @@ config XEN_SELFBALLOONING | |||
| 19 | by the current usage of anonymous memory ("committed AS") and | 19 | by the current usage of anonymous memory ("committed AS") and |
| 20 | controlled by various sysfs-settable parameters. Configuring | 20 | controlled by various sysfs-settable parameters. Configuring |
| 21 | FRONTSWAP is highly recommended; if it is not configured, self- | 21 | FRONTSWAP is highly recommended; if it is not configured, self- |
| 22 | ballooning is disabled by default but can be enabled with the | 22 | ballooning is disabled by default. If FRONTSWAP is configured, |
| 23 | 'selfballooning' kernel boot parameter. If FRONTSWAP is configured, | ||
| 24 | frontswap-selfshrinking is enabled by default but can be disabled | 23 | frontswap-selfshrinking is enabled by default but can be disabled |
| 25 | with the 'noselfshrink' kernel boot parameter; and self-ballooning | 24 | with the 'tmem.selfshrink=0' kernel boot parameter; and self-ballooning |
| 26 | is enabled by default but can be disabled with the 'noselfballooning' | 25 | is enabled by default but can be disabled with the 'tmem.selfballooning=0' |
| 27 | kernel boot parameter. Note that systems without a sufficiently | 26 | kernel boot parameter. Note that systems without a sufficiently |
| 28 | large swap device should not enable self-ballooning. | 27 | large swap device should not enable self-ballooning. |
| 29 | 28 | ||
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index a56776dbe095..930fb6817901 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
| @@ -407,7 +407,8 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
| 407 | nr_pages = ARRAY_SIZE(frame_list); | 407 | nr_pages = ARRAY_SIZE(frame_list); |
| 408 | 408 | ||
| 409 | for (i = 0; i < nr_pages; i++) { | 409 | for (i = 0; i < nr_pages; i++) { |
| 410 | if ((page = alloc_page(gfp)) == NULL) { | 410 | page = alloc_page(gfp); |
| 411 | if (page == NULL) { | ||
| 411 | nr_pages = i; | 412 | nr_pages = i; |
| 412 | state = BP_EAGAIN; | 413 | state = BP_EAGAIN; |
| 413 | break; | 414 | break; |
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index ca2b00e9d558..2cfc24d76fc5 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c | |||
| @@ -504,7 +504,7 @@ static void privcmd_close(struct vm_area_struct *vma) | |||
| 504 | struct page **pages = vma->vm_private_data; | 504 | struct page **pages = vma->vm_private_data; |
| 505 | int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 505 | int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
| 506 | 506 | ||
| 507 | if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages)) | 507 | if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) |
| 508 | return; | 508 | return; |
| 509 | 509 | ||
| 510 | xen_unmap_domain_mfn_range(vma, numpgs, pages); | 510 | xen_unmap_domain_mfn_range(vma, numpgs, pages); |
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index e3600be4e7fa..18e8bd8fa947 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c | |||
| @@ -11,11 +11,7 @@ | |||
| 11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
| 12 | #include <linux/pagemap.h> | 12 | #include <linux/pagemap.h> |
| 13 | #include <linux/cleancache.h> | 13 | #include <linux/cleancache.h> |
| 14 | |||
| 15 | /* temporary ifdef until include/linux/frontswap.h is upstream */ | ||
| 16 | #ifdef CONFIG_FRONTSWAP | ||
| 17 | #include <linux/frontswap.h> | 14 | #include <linux/frontswap.h> |
| 18 | #endif | ||
| 19 | 15 | ||
| 20 | #include <xen/xen.h> | 16 | #include <xen/xen.h> |
| 21 | #include <xen/interface/xen.h> | 17 | #include <xen/interface/xen.h> |
| @@ -24,6 +20,34 @@ | |||
| 24 | #include <asm/xen/hypervisor.h> | 20 | #include <asm/xen/hypervisor.h> |
| 25 | #include <xen/tmem.h> | 21 | #include <xen/tmem.h> |
| 26 | 22 | ||
| 23 | #ifndef CONFIG_XEN_TMEM_MODULE | ||
| 24 | bool __read_mostly tmem_enabled = false; | ||
| 25 | |||
| 26 | static int __init enable_tmem(char *s) | ||
| 27 | { | ||
| 28 | tmem_enabled = true; | ||
| 29 | return 1; | ||
| 30 | } | ||
| 31 | __setup("tmem", enable_tmem); | ||
| 32 | #endif | ||
| 33 | |||
| 34 | #ifdef CONFIG_CLEANCACHE | ||
| 35 | static bool cleancache __read_mostly = true; | ||
| 36 | module_param(cleancache, bool, S_IRUGO); | ||
| 37 | static bool selfballooning __read_mostly = true; | ||
| 38 | module_param(selfballooning, bool, S_IRUGO); | ||
| 39 | #endif /* CONFIG_CLEANCACHE */ | ||
| 40 | |||
| 41 | #ifdef CONFIG_FRONTSWAP | ||
| 42 | static bool frontswap __read_mostly = true; | ||
| 43 | module_param(frontswap, bool, S_IRUGO); | ||
| 44 | #endif /* CONFIG_FRONTSWAP */ | ||
| 45 | |||
| 46 | #ifdef CONFIG_XEN_SELFBALLOONING | ||
| 47 | static bool selfshrinking __read_mostly = true; | ||
| 48 | module_param(selfshrinking, bool, S_IRUGO); | ||
| 49 | #endif /* CONFIG_XEN_SELFBALLOONING */ | ||
| 50 | |||
| 27 | #define TMEM_CONTROL 0 | 51 | #define TMEM_CONTROL 0 |
| 28 | #define TMEM_NEW_POOL 1 | 52 | #define TMEM_NEW_POOL 1 |
| 29 | #define TMEM_DESTROY_POOL 2 | 53 | #define TMEM_DESTROY_POOL 2 |
| @@ -129,16 +153,6 @@ static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid) | |||
| 129 | return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0); | 153 | return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0); |
| 130 | } | 154 | } |
| 131 | 155 | ||
| 132 | #ifndef CONFIG_XEN_TMEM_MODULE | ||
| 133 | bool __read_mostly tmem_enabled = false; | ||
| 134 | |||
| 135 | static int __init enable_tmem(char *s) | ||
| 136 | { | ||
| 137 | tmem_enabled = true; | ||
| 138 | return 1; | ||
| 139 | } | ||
| 140 | __setup("tmem", enable_tmem); | ||
| 141 | #endif | ||
| 142 | 156 | ||
| 143 | #ifdef CONFIG_CLEANCACHE | 157 | #ifdef CONFIG_CLEANCACHE |
| 144 | static int xen_tmem_destroy_pool(u32 pool_id) | 158 | static int xen_tmem_destroy_pool(u32 pool_id) |
| @@ -230,20 +244,6 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize) | |||
| 230 | return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize); | 244 | return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize); |
| 231 | } | 245 | } |
| 232 | 246 | ||
| 233 | static bool disable_cleancache __read_mostly; | ||
| 234 | static bool disable_selfballooning __read_mostly; | ||
| 235 | #ifdef CONFIG_XEN_TMEM_MODULE | ||
| 236 | module_param(disable_cleancache, bool, S_IRUGO); | ||
| 237 | module_param(disable_selfballooning, bool, S_IRUGO); | ||
| 238 | #else | ||
| 239 | static int __init no_cleancache(char *s) | ||
| 240 | { | ||
| 241 | disable_cleancache = true; | ||
| 242 | return 1; | ||
| 243 | } | ||
| 244 | __setup("nocleancache", no_cleancache); | ||
| 245 | #endif | ||
| 246 | |||
| 247 | static struct cleancache_ops tmem_cleancache_ops = { | 247 | static struct cleancache_ops tmem_cleancache_ops = { |
| 248 | .put_page = tmem_cleancache_put_page, | 248 | .put_page = tmem_cleancache_put_page, |
| 249 | .get_page = tmem_cleancache_get_page, | 249 | .get_page = tmem_cleancache_get_page, |
| @@ -361,20 +361,6 @@ static void tmem_frontswap_init(unsigned ignored) | |||
| 361 | xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE); | 361 | xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE); |
| 362 | } | 362 | } |
| 363 | 363 | ||
| 364 | static bool disable_frontswap __read_mostly; | ||
| 365 | static bool disable_frontswap_selfshrinking __read_mostly; | ||
| 366 | #ifdef CONFIG_XEN_TMEM_MODULE | ||
| 367 | module_param(disable_frontswap, bool, S_IRUGO); | ||
| 368 | module_param(disable_frontswap_selfshrinking, bool, S_IRUGO); | ||
| 369 | #else | ||
| 370 | static int __init no_frontswap(char *s) | ||
| 371 | { | ||
| 372 | disable_frontswap = true; | ||
| 373 | return 1; | ||
| 374 | } | ||
| 375 | __setup("nofrontswap", no_frontswap); | ||
| 376 | #endif | ||
| 377 | |||
| 378 | static struct frontswap_ops tmem_frontswap_ops = { | 364 | static struct frontswap_ops tmem_frontswap_ops = { |
| 379 | .store = tmem_frontswap_store, | 365 | .store = tmem_frontswap_store, |
| 380 | .load = tmem_frontswap_load, | 366 | .load = tmem_frontswap_load, |
| @@ -382,8 +368,6 @@ static struct frontswap_ops tmem_frontswap_ops = { | |||
| 382 | .invalidate_area = tmem_frontswap_flush_area, | 368 | .invalidate_area = tmem_frontswap_flush_area, |
| 383 | .init = tmem_frontswap_init | 369 | .init = tmem_frontswap_init |
| 384 | }; | 370 | }; |
| 385 | #else /* CONFIG_FRONTSWAP */ | ||
| 386 | #define disable_frontswap_selfshrinking 1 | ||
| 387 | #endif | 371 | #endif |
| 388 | 372 | ||
| 389 | static int xen_tmem_init(void) | 373 | static int xen_tmem_init(void) |
| @@ -391,7 +375,7 @@ static int xen_tmem_init(void) | |||
| 391 | if (!xen_domain()) | 375 | if (!xen_domain()) |
| 392 | return 0; | 376 | return 0; |
| 393 | #ifdef CONFIG_FRONTSWAP | 377 | #ifdef CONFIG_FRONTSWAP |
| 394 | if (tmem_enabled && !disable_frontswap) { | 378 | if (tmem_enabled && frontswap) { |
| 395 | char *s = ""; | 379 | char *s = ""; |
| 396 | struct frontswap_ops *old_ops = | 380 | struct frontswap_ops *old_ops = |
| 397 | frontswap_register_ops(&tmem_frontswap_ops); | 381 | frontswap_register_ops(&tmem_frontswap_ops); |
| @@ -408,7 +392,7 @@ static int xen_tmem_init(void) | |||
| 408 | #endif | 392 | #endif |
| 409 | #ifdef CONFIG_CLEANCACHE | 393 | #ifdef CONFIG_CLEANCACHE |
| 410 | BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); | 394 | BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); |
| 411 | if (tmem_enabled && !disable_cleancache) { | 395 | if (tmem_enabled && cleancache) { |
| 412 | char *s = ""; | 396 | char *s = ""; |
| 413 | struct cleancache_ops *old_ops = | 397 | struct cleancache_ops *old_ops = |
| 414 | cleancache_register_ops(&tmem_cleancache_ops); | 398 | cleancache_register_ops(&tmem_cleancache_ops); |
| @@ -419,8 +403,15 @@ static int xen_tmem_init(void) | |||
| 419 | } | 403 | } |
| 420 | #endif | 404 | #endif |
| 421 | #ifdef CONFIG_XEN_SELFBALLOONING | 405 | #ifdef CONFIG_XEN_SELFBALLOONING |
| 422 | xen_selfballoon_init(!disable_selfballooning, | 406 | /* |
| 423 | !disable_frontswap_selfshrinking); | 407 | * There is no point of driving pages to the swap system if they |
| 408 | * aren't going anywhere in tmem universe. | ||
| 409 | */ | ||
| 410 | if (!frontswap) { | ||
| 411 | selfshrinking = false; | ||
| 412 | selfballooning = false; | ||
| 413 | } | ||
| 414 | xen_selfballoon_init(selfballooning, selfshrinking); | ||
| 424 | #endif | 415 | #endif |
| 425 | return 0; | 416 | return 0; |
| 426 | } | 417 | } |
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c index f2ef569c7cc1..f70984a892aa 100644 --- a/drivers/xen/xen-selfballoon.c +++ b/drivers/xen/xen-selfballoon.c | |||
| @@ -53,15 +53,12 @@ | |||
| 53 | * System configuration note: Selfballooning should not be enabled on | 53 | * System configuration note: Selfballooning should not be enabled on |
| 54 | * systems without a sufficiently large swap device configured; for best | 54 | * systems without a sufficiently large swap device configured; for best |
| 55 | * results, it is recommended that total swap be increased by the size | 55 | * results, it is recommended that total swap be increased by the size |
| 56 | * of the guest memory. Also, while technically not required to be | 56 | * of the guest memory. Note, that selfballooning should be disabled by default |
| 57 | * configured, it is highly recommended that frontswap also be configured | 57 | * if frontswap is not configured. Similarly selfballooning should be enabled |
| 58 | * and enabled when selfballooning is running. So, selfballooning | 58 | * by default if frontswap is configured and can be disabled with the |
| 59 | * is disabled by default if frontswap is not configured and can only | 59 | * "tmem.selfballooning=0" kernel boot option. Finally, when frontswap is |
| 60 | * be enabled with the "selfballooning" kernel boot option; similarly | 60 | * configured, frontswap-selfshrinking can be disabled with the |
| 61 | * selfballooning is enabled by default if frontswap is configured and | 61 | * "tmem.selfshrink=0" kernel boot option. |
| 62 | * can be disabled with the "noselfballooning" kernel boot option. Finally, | ||
| 63 | * when frontswap is configured, frontswap-selfshrinking can be disabled | ||
| 64 | * with the "noselfshrink" kernel boot option. | ||
| 65 | * | 62 | * |
| 66 | * Selfballooning is disallowed in domain0 and force-disabled. | 63 | * Selfballooning is disallowed in domain0 and force-disabled. |
| 67 | * | 64 | * |
| @@ -120,9 +117,6 @@ static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process); | |||
| 120 | /* Enable/disable with sysfs. */ | 117 | /* Enable/disable with sysfs. */ |
| 121 | static bool frontswap_selfshrinking __read_mostly; | 118 | static bool frontswap_selfshrinking __read_mostly; |
| 122 | 119 | ||
| 123 | /* Enable/disable with kernel boot option. */ | ||
| 124 | static bool use_frontswap_selfshrink = true; | ||
| 125 | |||
| 126 | /* | 120 | /* |
| 127 | * The default values for the following parameters were deemed reasonable | 121 | * The default values for the following parameters were deemed reasonable |
| 128 | * by experimentation, may be workload-dependent, and can all be | 122 | * by experimentation, may be workload-dependent, and can all be |
| @@ -176,35 +170,6 @@ static void frontswap_selfshrink(void) | |||
| 176 | frontswap_shrink(tgt_frontswap_pages); | 170 | frontswap_shrink(tgt_frontswap_pages); |
| 177 | } | 171 | } |
| 178 | 172 | ||
| 179 | static int __init xen_nofrontswap_selfshrink_setup(char *s) | ||
| 180 | { | ||
| 181 | use_frontswap_selfshrink = false; | ||
| 182 | return 1; | ||
| 183 | } | ||
| 184 | |||
| 185 | __setup("noselfshrink", xen_nofrontswap_selfshrink_setup); | ||
| 186 | |||
| 187 | /* Disable with kernel boot option. */ | ||
| 188 | static bool use_selfballooning = true; | ||
| 189 | |||
| 190 | static int __init xen_noselfballooning_setup(char *s) | ||
| 191 | { | ||
| 192 | use_selfballooning = false; | ||
| 193 | return 1; | ||
| 194 | } | ||
| 195 | |||
| 196 | __setup("noselfballooning", xen_noselfballooning_setup); | ||
| 197 | #else /* !CONFIG_FRONTSWAP */ | ||
| 198 | /* Enable with kernel boot option. */ | ||
| 199 | static bool use_selfballooning; | ||
| 200 | |||
| 201 | static int __init xen_selfballooning_setup(char *s) | ||
| 202 | { | ||
| 203 | use_selfballooning = true; | ||
| 204 | return 1; | ||
| 205 | } | ||
| 206 | |||
| 207 | __setup("selfballooning", xen_selfballooning_setup); | ||
| 208 | #endif /* CONFIG_FRONTSWAP */ | 173 | #endif /* CONFIG_FRONTSWAP */ |
| 209 | 174 | ||
| 210 | #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) | 175 | #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) |
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c index d73000800762..a6f42fc01407 100644 --- a/drivers/xen/xenbus/xenbus_dev_backend.c +++ b/drivers/xen/xenbus/xenbus_dev_backend.c | |||
| @@ -70,22 +70,21 @@ static long xenbus_alloc(domid_t domid) | |||
| 70 | return err; | 70 | return err; |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data) | 73 | static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, |
| 74 | unsigned long data) | ||
| 74 | { | 75 | { |
| 75 | if (!capable(CAP_SYS_ADMIN)) | 76 | if (!capable(CAP_SYS_ADMIN)) |
| 76 | return -EPERM; | 77 | return -EPERM; |
| 77 | 78 | ||
| 78 | switch (cmd) { | 79 | switch (cmd) { |
| 79 | case IOCTL_XENBUS_BACKEND_EVTCHN: | 80 | case IOCTL_XENBUS_BACKEND_EVTCHN: |
| 80 | if (xen_store_evtchn > 0) | 81 | if (xen_store_evtchn > 0) |
| 81 | return xen_store_evtchn; | 82 | return xen_store_evtchn; |
| 82 | return -ENODEV; | 83 | return -ENODEV; |
| 83 | 84 | case IOCTL_XENBUS_BACKEND_SETUP: | |
| 84 | case IOCTL_XENBUS_BACKEND_SETUP: | 85 | return xenbus_alloc(data); |
| 85 | return xenbus_alloc(data); | 86 | default: |
| 86 | 87 | return -ENOTTY; | |
| 87 | default: | ||
| 88 | return -ENOTTY; | ||
| 89 | } | 88 | } |
| 90 | } | 89 | } |
| 91 | 90 | ||
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index b4fb41558111..290e347b6db3 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
| @@ -918,7 +918,8 @@ again: | |||
| 918 | ref->parent, bsz, 0); | 918 | ref->parent, bsz, 0); |
| 919 | if (!eb || !extent_buffer_uptodate(eb)) { | 919 | if (!eb || !extent_buffer_uptodate(eb)) { |
| 920 | free_extent_buffer(eb); | 920 | free_extent_buffer(eb); |
| 921 | return -EIO; | 921 | ret = -EIO; |
| 922 | goto out; | ||
| 922 | } | 923 | } |
| 923 | ret = find_extent_in_eb(eb, bytenr, | 924 | ret = find_extent_in_eb(eb, bytenr, |
| 924 | *extent_item_pos, &eie); | 925 | *extent_item_pos, &eie); |
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 18af6f48781a..1431a6965017 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c | |||
| @@ -1700,7 +1700,7 @@ static int btrfsic_read_block(struct btrfsic_state *state, | |||
| 1700 | unsigned int j; | 1700 | unsigned int j; |
| 1701 | DECLARE_COMPLETION_ONSTACK(complete); | 1701 | DECLARE_COMPLETION_ONSTACK(complete); |
| 1702 | 1702 | ||
| 1703 | bio = bio_alloc(GFP_NOFS, num_pages - i); | 1703 | bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i); |
| 1704 | if (!bio) { | 1704 | if (!bio) { |
| 1705 | printk(KERN_INFO | 1705 | printk(KERN_INFO |
| 1706 | "btrfsic: bio_alloc() for %u pages failed!\n", | 1706 | "btrfsic: bio_alloc() for %u pages failed!\n", |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index de6de8e60b46..02fae7f7e42c 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
| @@ -951,10 +951,12 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, | |||
| 951 | BUG_ON(ret); /* -ENOMEM */ | 951 | BUG_ON(ret); /* -ENOMEM */ |
| 952 | } | 952 | } |
| 953 | if (new_flags != 0) { | 953 | if (new_flags != 0) { |
| 954 | int level = btrfs_header_level(buf); | ||
| 955 | |||
| 954 | ret = btrfs_set_disk_extent_flags(trans, root, | 956 | ret = btrfs_set_disk_extent_flags(trans, root, |
| 955 | buf->start, | 957 | buf->start, |
| 956 | buf->len, | 958 | buf->len, |
| 957 | new_flags, 0); | 959 | new_flags, level, 0); |
| 958 | if (ret) | 960 | if (ret) |
| 959 | return ret; | 961 | return ret; |
| 960 | } | 962 | } |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 63c328a9ce95..d6dd49b51ba8 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
| @@ -88,12 +88,12 @@ struct btrfs_ordered_sum; | |||
| 88 | /* holds checksums of all the data extents */ | 88 | /* holds checksums of all the data extents */ |
| 89 | #define BTRFS_CSUM_TREE_OBJECTID 7ULL | 89 | #define BTRFS_CSUM_TREE_OBJECTID 7ULL |
| 90 | 90 | ||
| 91 | /* for storing balance parameters in the root tree */ | ||
| 92 | #define BTRFS_BALANCE_OBJECTID -4ULL | ||
| 93 | |||
| 94 | /* holds quota configuration and tracking */ | 91 | /* holds quota configuration and tracking */ |
| 95 | #define BTRFS_QUOTA_TREE_OBJECTID 8ULL | 92 | #define BTRFS_QUOTA_TREE_OBJECTID 8ULL |
| 96 | 93 | ||
| 94 | /* for storing balance parameters in the root tree */ | ||
| 95 | #define BTRFS_BALANCE_OBJECTID -4ULL | ||
| 96 | |||
| 97 | /* orhpan objectid for tracking unlinked/truncated files */ | 97 | /* orhpan objectid for tracking unlinked/truncated files */ |
| 98 | #define BTRFS_ORPHAN_OBJECTID -5ULL | 98 | #define BTRFS_ORPHAN_OBJECTID -5ULL |
| 99 | 99 | ||
| @@ -3075,7 +3075,7 @@ int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, | |||
| 3075 | int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, | 3075 | int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, |
| 3076 | struct btrfs_root *root, | 3076 | struct btrfs_root *root, |
| 3077 | u64 bytenr, u64 num_bytes, u64 flags, | 3077 | u64 bytenr, u64 num_bytes, u64 flags, |
| 3078 | int is_data); | 3078 | int level, int is_data); |
| 3079 | int btrfs_free_extent(struct btrfs_trans_handle *trans, | 3079 | int btrfs_free_extent(struct btrfs_trans_handle *trans, |
| 3080 | struct btrfs_root *root, | 3080 | struct btrfs_root *root, |
| 3081 | u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, | 3081 | u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, |
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index f75fcaf79aeb..70b962cc177d 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h | |||
| @@ -60,6 +60,7 @@ struct btrfs_delayed_ref_node { | |||
| 60 | struct btrfs_delayed_extent_op { | 60 | struct btrfs_delayed_extent_op { |
| 61 | struct btrfs_disk_key key; | 61 | struct btrfs_disk_key key; |
| 62 | u64 flags_to_set; | 62 | u64 flags_to_set; |
| 63 | int level; | ||
| 63 | unsigned int update_key:1; | 64 | unsigned int update_key:1; |
| 64 | unsigned int update_flags:1; | 65 | unsigned int update_flags:1; |
| 65 | unsigned int is_data:1; | 66 | unsigned int is_data:1; |
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 7ba7b3900cb8..65241f32d3f8 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c | |||
| @@ -313,6 +313,11 @@ int btrfs_dev_replace_start(struct btrfs_root *root, | |||
| 313 | struct btrfs_device *tgt_device = NULL; | 313 | struct btrfs_device *tgt_device = NULL; |
| 314 | struct btrfs_device *src_device = NULL; | 314 | struct btrfs_device *src_device = NULL; |
| 315 | 315 | ||
| 316 | if (btrfs_fs_incompat(fs_info, RAID56)) { | ||
| 317 | pr_warn("btrfs: dev_replace cannot yet handle RAID5/RAID6\n"); | ||
| 318 | return -EINVAL; | ||
| 319 | } | ||
| 320 | |||
| 316 | switch (args->start.cont_reading_from_srcdev_mode) { | 321 | switch (args->start.cont_reading_from_srcdev_mode) { |
| 317 | case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS: | 322 | case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS: |
| 318 | case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID: | 323 | case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID: |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4e9ebe1f1827..e7b3cb5286a5 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
| @@ -152,7 +152,7 @@ static struct btrfs_lockdep_keyset { | |||
| 152 | { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" }, | 152 | { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" }, |
| 153 | { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" }, | 153 | { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" }, |
| 154 | { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" }, | 154 | { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" }, |
| 155 | { .id = BTRFS_ORPHAN_OBJECTID, .name_stem = "orphan" }, | 155 | { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" }, |
| 156 | { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, | 156 | { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, |
| 157 | { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, | 157 | { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, |
| 158 | { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, | 158 | { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, |
| @@ -1513,7 +1513,6 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, | |||
| 1513 | } | 1513 | } |
| 1514 | 1514 | ||
| 1515 | root->commit_root = btrfs_root_node(root); | 1515 | root->commit_root = btrfs_root_node(root); |
| 1516 | BUG_ON(!root->node); /* -ENOMEM */ | ||
| 1517 | out: | 1516 | out: |
| 1518 | if (location->objectid != BTRFS_TREE_LOG_OBJECTID) { | 1517 | if (location->objectid != BTRFS_TREE_LOG_OBJECTID) { |
| 1519 | root->ref_cows = 1; | 1518 | root->ref_cows = 1; |
| @@ -1988,30 +1987,33 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) | |||
| 1988 | { | 1987 | { |
| 1989 | free_extent_buffer(info->tree_root->node); | 1988 | free_extent_buffer(info->tree_root->node); |
| 1990 | free_extent_buffer(info->tree_root->commit_root); | 1989 | free_extent_buffer(info->tree_root->commit_root); |
| 1991 | free_extent_buffer(info->dev_root->node); | ||
| 1992 | free_extent_buffer(info->dev_root->commit_root); | ||
| 1993 | free_extent_buffer(info->extent_root->node); | ||
| 1994 | free_extent_buffer(info->extent_root->commit_root); | ||
| 1995 | free_extent_buffer(info->csum_root->node); | ||
| 1996 | free_extent_buffer(info->csum_root->commit_root); | ||
| 1997 | if (info->quota_root) { | ||
| 1998 | free_extent_buffer(info->quota_root->node); | ||
| 1999 | free_extent_buffer(info->quota_root->commit_root); | ||
| 2000 | } | ||
| 2001 | |||
| 2002 | info->tree_root->node = NULL; | 1990 | info->tree_root->node = NULL; |
| 2003 | info->tree_root->commit_root = NULL; | 1991 | info->tree_root->commit_root = NULL; |
| 2004 | info->dev_root->node = NULL; | 1992 | |
| 2005 | info->dev_root->commit_root = NULL; | 1993 | if (info->dev_root) { |
| 2006 | info->extent_root->node = NULL; | 1994 | free_extent_buffer(info->dev_root->node); |
| 2007 | info->extent_root->commit_root = NULL; | 1995 | free_extent_buffer(info->dev_root->commit_root); |
| 2008 | info->csum_root->node = NULL; | 1996 | info->dev_root->node = NULL; |
| 2009 | info->csum_root->commit_root = NULL; | 1997 | info->dev_root->commit_root = NULL; |
| 1998 | } | ||
| 1999 | if (info->extent_root) { | ||
| 2000 | free_extent_buffer(info->extent_root->node); | ||
| 2001 | free_extent_buffer(info->extent_root->commit_root); | ||
| 2002 | info->extent_root->node = NULL; | ||
| 2003 | info->extent_root->commit_root = NULL; | ||
| 2004 | } | ||
| 2005 | if (info->csum_root) { | ||
| 2006 | free_extent_buffer(info->csum_root->node); | ||
| 2007 | free_extent_buffer(info->csum_root->commit_root); | ||
| 2008 | info->csum_root->node = NULL; | ||
| 2009 | info->csum_root->commit_root = NULL; | ||
| 2010 | } | ||
| 2010 | if (info->quota_root) { | 2011 | if (info->quota_root) { |
| 2012 | free_extent_buffer(info->quota_root->node); | ||
| 2013 | free_extent_buffer(info->quota_root->commit_root); | ||
| 2011 | info->quota_root->node = NULL; | 2014 | info->quota_root->node = NULL; |
| 2012 | info->quota_root->commit_root = NULL; | 2015 | info->quota_root->commit_root = NULL; |
| 2013 | } | 2016 | } |
| 2014 | |||
| 2015 | if (chunk_root) { | 2017 | if (chunk_root) { |
| 2016 | free_extent_buffer(info->chunk_root->node); | 2018 | free_extent_buffer(info->chunk_root->node); |
| 2017 | free_extent_buffer(info->chunk_root->commit_root); | 2019 | free_extent_buffer(info->chunk_root->commit_root); |
| @@ -3128,7 +3130,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait) | |||
| 3128 | * caller | 3130 | * caller |
| 3129 | */ | 3131 | */ |
| 3130 | device->flush_bio = NULL; | 3132 | device->flush_bio = NULL; |
| 3131 | bio = bio_alloc(GFP_NOFS, 0); | 3133 | bio = btrfs_io_bio_alloc(GFP_NOFS, 0); |
| 3132 | if (!bio) | 3134 | if (!bio) |
| 3133 | return -ENOMEM; | 3135 | return -ENOMEM; |
| 3134 | 3136 | ||
| @@ -3659,8 +3661,11 @@ static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t, | |||
| 3659 | ordered_operations); | 3661 | ordered_operations); |
| 3660 | 3662 | ||
| 3661 | list_del_init(&btrfs_inode->ordered_operations); | 3663 | list_del_init(&btrfs_inode->ordered_operations); |
| 3664 | spin_unlock(&root->fs_info->ordered_extent_lock); | ||
| 3662 | 3665 | ||
| 3663 | btrfs_invalidate_inodes(btrfs_inode->root); | 3666 | btrfs_invalidate_inodes(btrfs_inode->root); |
| 3667 | |||
| 3668 | spin_lock(&root->fs_info->ordered_extent_lock); | ||
| 3664 | } | 3669 | } |
| 3665 | 3670 | ||
| 3666 | spin_unlock(&root->fs_info->ordered_extent_lock); | 3671 | spin_unlock(&root->fs_info->ordered_extent_lock); |
| @@ -3782,8 +3787,11 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) | |||
| 3782 | list_del_init(&btrfs_inode->delalloc_inodes); | 3787 | list_del_init(&btrfs_inode->delalloc_inodes); |
| 3783 | clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, | 3788 | clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, |
| 3784 | &btrfs_inode->runtime_flags); | 3789 | &btrfs_inode->runtime_flags); |
| 3790 | spin_unlock(&root->fs_info->delalloc_lock); | ||
| 3785 | 3791 | ||
| 3786 | btrfs_invalidate_inodes(btrfs_inode->root); | 3792 | btrfs_invalidate_inodes(btrfs_inode->root); |
| 3793 | |||
| 3794 | spin_lock(&root->fs_info->delalloc_lock); | ||
| 3787 | } | 3795 | } |
| 3788 | 3796 | ||
| 3789 | spin_unlock(&root->fs_info->delalloc_lock); | 3797 | spin_unlock(&root->fs_info->delalloc_lock); |
| @@ -3808,7 +3816,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root, | |||
| 3808 | while (start <= end) { | 3816 | while (start <= end) { |
| 3809 | eb = btrfs_find_tree_block(root, start, | 3817 | eb = btrfs_find_tree_block(root, start, |
| 3810 | root->leafsize); | 3818 | root->leafsize); |
| 3811 | start += eb->len; | 3819 | start += root->leafsize; |
| 3812 | if (!eb) | 3820 | if (!eb) |
| 3813 | continue; | 3821 | continue; |
| 3814 | wait_on_extent_buffer_writeback(eb); | 3822 | wait_on_extent_buffer_writeback(eb); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 2305b5c5cf00..df472ab1b5ac 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
| @@ -2070,8 +2070,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans, | |||
| 2070 | u32 item_size; | 2070 | u32 item_size; |
| 2071 | int ret; | 2071 | int ret; |
| 2072 | int err = 0; | 2072 | int err = 0; |
| 2073 | int metadata = (node->type == BTRFS_TREE_BLOCK_REF_KEY || | 2073 | int metadata = !extent_op->is_data; |
| 2074 | node->type == BTRFS_SHARED_BLOCK_REF_KEY); | ||
| 2075 | 2074 | ||
| 2076 | if (trans->aborted) | 2075 | if (trans->aborted) |
| 2077 | return 0; | 2076 | return 0; |
| @@ -2086,11 +2085,8 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans, | |||
| 2086 | key.objectid = node->bytenr; | 2085 | key.objectid = node->bytenr; |
| 2087 | 2086 | ||
| 2088 | if (metadata) { | 2087 | if (metadata) { |
| 2089 | struct btrfs_delayed_tree_ref *tree_ref; | ||
| 2090 | |||
| 2091 | tree_ref = btrfs_delayed_node_to_tree_ref(node); | ||
| 2092 | key.type = BTRFS_METADATA_ITEM_KEY; | 2088 | key.type = BTRFS_METADATA_ITEM_KEY; |
| 2093 | key.offset = tree_ref->level; | 2089 | key.offset = extent_op->level; |
| 2094 | } else { | 2090 | } else { |
| 2095 | key.type = BTRFS_EXTENT_ITEM_KEY; | 2091 | key.type = BTRFS_EXTENT_ITEM_KEY; |
| 2096 | key.offset = node->num_bytes; | 2092 | key.offset = node->num_bytes; |
| @@ -2719,7 +2715,7 @@ out: | |||
| 2719 | int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, | 2715 | int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, |
| 2720 | struct btrfs_root *root, | 2716 | struct btrfs_root *root, |
| 2721 | u64 bytenr, u64 num_bytes, u64 flags, | 2717 | u64 bytenr, u64 num_bytes, u64 flags, |
| 2722 | int is_data) | 2718 | int level, int is_data) |
| 2723 | { | 2719 | { |
| 2724 | struct btrfs_delayed_extent_op *extent_op; | 2720 | struct btrfs_delayed_extent_op *extent_op; |
| 2725 | int ret; | 2721 | int ret; |
| @@ -2732,6 +2728,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, | |||
| 2732 | extent_op->update_flags = 1; | 2728 | extent_op->update_flags = 1; |
| 2733 | extent_op->update_key = 0; | 2729 | extent_op->update_key = 0; |
| 2734 | extent_op->is_data = is_data ? 1 : 0; | 2730 | extent_op->is_data = is_data ? 1 : 0; |
| 2731 | extent_op->level = level; | ||
| 2735 | 2732 | ||
| 2736 | ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr, | 2733 | ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr, |
| 2737 | num_bytes, extent_op); | 2734 | num_bytes, extent_op); |
| @@ -3109,6 +3106,11 @@ again: | |||
| 3109 | WARN_ON(ret); | 3106 | WARN_ON(ret); |
| 3110 | 3107 | ||
| 3111 | if (i_size_read(inode) > 0) { | 3108 | if (i_size_read(inode) > 0) { |
| 3109 | ret = btrfs_check_trunc_cache_free_space(root, | ||
| 3110 | &root->fs_info->global_block_rsv); | ||
| 3111 | if (ret) | ||
| 3112 | goto out_put; | ||
| 3113 | |||
| 3112 | ret = btrfs_truncate_free_space_cache(root, trans, path, | 3114 | ret = btrfs_truncate_free_space_cache(root, trans, path, |
| 3113 | inode); | 3115 | inode); |
| 3114 | if (ret) | 3116 | if (ret) |
| @@ -4562,6 +4564,8 @@ static void init_global_block_rsv(struct btrfs_fs_info *fs_info) | |||
| 4562 | fs_info->csum_root->block_rsv = &fs_info->global_block_rsv; | 4564 | fs_info->csum_root->block_rsv = &fs_info->global_block_rsv; |
| 4563 | fs_info->dev_root->block_rsv = &fs_info->global_block_rsv; | 4565 | fs_info->dev_root->block_rsv = &fs_info->global_block_rsv; |
| 4564 | fs_info->tree_root->block_rsv = &fs_info->global_block_rsv; | 4566 | fs_info->tree_root->block_rsv = &fs_info->global_block_rsv; |
| 4567 | if (fs_info->quota_root) | ||
| 4568 | fs_info->quota_root->block_rsv = &fs_info->global_block_rsv; | ||
| 4565 | fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv; | 4569 | fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv; |
| 4566 | 4570 | ||
| 4567 | update_global_block_rsv(fs_info); | 4571 | update_global_block_rsv(fs_info); |
| @@ -6651,51 +6655,51 @@ use_block_rsv(struct btrfs_trans_handle *trans, | |||
| 6651 | struct btrfs_block_rsv *block_rsv; | 6655 | struct btrfs_block_rsv *block_rsv; |
| 6652 | struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; | 6656 | struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; |
| 6653 | int ret; | 6657 | int ret; |
| 6658 | bool global_updated = false; | ||
| 6654 | 6659 | ||
| 6655 | block_rsv = get_block_rsv(trans, root); | 6660 | block_rsv = get_block_rsv(trans, root); |
| 6656 | 6661 | ||
| 6657 | if (block_rsv->size == 0) { | 6662 | if (unlikely(block_rsv->size == 0)) |
| 6658 | ret = reserve_metadata_bytes(root, block_rsv, blocksize, | 6663 | goto try_reserve; |
| 6659 | BTRFS_RESERVE_NO_FLUSH); | 6664 | again: |
| 6660 | /* | 6665 | ret = block_rsv_use_bytes(block_rsv, blocksize); |
| 6661 | * If we couldn't reserve metadata bytes try and use some from | 6666 | if (!ret) |
| 6662 | * the global reserve. | ||
| 6663 | */ | ||
| 6664 | if (ret && block_rsv != global_rsv) { | ||
| 6665 | ret = block_rsv_use_bytes(global_rsv, blocksize); | ||
| 6666 | if (!ret) | ||
| 6667 | return global_rsv; | ||
| 6668 | return ERR_PTR(ret); | ||
| 6669 | } else if (ret) { | ||
| 6670 | return ERR_PTR(ret); | ||
| 6671 | } | ||
| 6672 | return block_rsv; | 6667 | return block_rsv; |
| 6668 | |||
| 6669 | if (block_rsv->failfast) | ||
| 6670 | return ERR_PTR(ret); | ||
| 6671 | |||
| 6672 | if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) { | ||
| 6673 | global_updated = true; | ||
| 6674 | update_global_block_rsv(root->fs_info); | ||
| 6675 | goto again; | ||
| 6673 | } | 6676 | } |
| 6674 | 6677 | ||
| 6675 | ret = block_rsv_use_bytes(block_rsv, blocksize); | 6678 | if (btrfs_test_opt(root, ENOSPC_DEBUG)) { |
| 6679 | static DEFINE_RATELIMIT_STATE(_rs, | ||
| 6680 | DEFAULT_RATELIMIT_INTERVAL * 10, | ||
| 6681 | /*DEFAULT_RATELIMIT_BURST*/ 1); | ||
| 6682 | if (__ratelimit(&_rs)) | ||
| 6683 | WARN(1, KERN_DEBUG | ||
| 6684 | "btrfs: block rsv returned %d\n", ret); | ||
| 6685 | } | ||
| 6686 | try_reserve: | ||
| 6687 | ret = reserve_metadata_bytes(root, block_rsv, blocksize, | ||
| 6688 | BTRFS_RESERVE_NO_FLUSH); | ||
| 6676 | if (!ret) | 6689 | if (!ret) |
| 6677 | return block_rsv; | 6690 | return block_rsv; |
| 6678 | if (ret && !block_rsv->failfast) { | 6691 | /* |
| 6679 | if (btrfs_test_opt(root, ENOSPC_DEBUG)) { | 6692 | * If we couldn't reserve metadata bytes try and use some from |
| 6680 | static DEFINE_RATELIMIT_STATE(_rs, | 6693 | * the global reserve if its space type is the same as the global |
| 6681 | DEFAULT_RATELIMIT_INTERVAL * 10, | 6694 | * reservation. |
| 6682 | /*DEFAULT_RATELIMIT_BURST*/ 1); | 6695 | */ |
| 6683 | if (__ratelimit(&_rs)) | 6696 | if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL && |
| 6684 | WARN(1, KERN_DEBUG | 6697 | block_rsv->space_info == global_rsv->space_info) { |
| 6685 | "btrfs: block rsv returned %d\n", ret); | 6698 | ret = block_rsv_use_bytes(global_rsv, blocksize); |
| 6686 | } | 6699 | if (!ret) |
| 6687 | ret = reserve_metadata_bytes(root, block_rsv, blocksize, | 6700 | return global_rsv; |
| 6688 | BTRFS_RESERVE_NO_FLUSH); | ||
| 6689 | if (!ret) { | ||
| 6690 | return block_rsv; | ||
| 6691 | } else if (ret && block_rsv != global_rsv) { | ||
| 6692 | ret = block_rsv_use_bytes(global_rsv, blocksize); | ||
| 6693 | if (!ret) | ||
| 6694 | return global_rsv; | ||
| 6695 | } | ||
| 6696 | } | 6701 | } |
| 6697 | 6702 | return ERR_PTR(ret); | |
| 6698 | return ERR_PTR(-ENOSPC); | ||
| 6699 | } | 6703 | } |
| 6700 | 6704 | ||
| 6701 | static void unuse_block_rsv(struct btrfs_fs_info *fs_info, | 6705 | static void unuse_block_rsv(struct btrfs_fs_info *fs_info, |
| @@ -6763,6 +6767,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, | |||
| 6763 | extent_op->update_key = 1; | 6767 | extent_op->update_key = 1; |
| 6764 | extent_op->update_flags = 1; | 6768 | extent_op->update_flags = 1; |
| 6765 | extent_op->is_data = 0; | 6769 | extent_op->is_data = 0; |
| 6770 | extent_op->level = level; | ||
| 6766 | 6771 | ||
| 6767 | ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, | 6772 | ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, |
| 6768 | ins.objectid, | 6773 | ins.objectid, |
| @@ -6934,7 +6939,8 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans, | |||
| 6934 | ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc); | 6939 | ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc); |
| 6935 | BUG_ON(ret); /* -ENOMEM */ | 6940 | BUG_ON(ret); /* -ENOMEM */ |
| 6936 | ret = btrfs_set_disk_extent_flags(trans, root, eb->start, | 6941 | ret = btrfs_set_disk_extent_flags(trans, root, eb->start, |
| 6937 | eb->len, flag, 0); | 6942 | eb->len, flag, |
| 6943 | btrfs_header_level(eb), 0); | ||
| 6938 | BUG_ON(ret); /* -ENOMEM */ | 6944 | BUG_ON(ret); /* -ENOMEM */ |
| 6939 | wc->flags[level] |= flag; | 6945 | wc->flags[level] |= flag; |
| 6940 | } | 6946 | } |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 32d67a822e93..e7e7afb4a872 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | 23 | ||
| 24 | static struct kmem_cache *extent_state_cache; | 24 | static struct kmem_cache *extent_state_cache; |
| 25 | static struct kmem_cache *extent_buffer_cache; | 25 | static struct kmem_cache *extent_buffer_cache; |
| 26 | static struct bio_set *btrfs_bioset; | ||
| 26 | 27 | ||
| 27 | #ifdef CONFIG_BTRFS_DEBUG | 28 | #ifdef CONFIG_BTRFS_DEBUG |
| 28 | static LIST_HEAD(buffers); | 29 | static LIST_HEAD(buffers); |
| @@ -125,10 +126,20 @@ int __init extent_io_init(void) | |||
| 125 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | 126 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); |
| 126 | if (!extent_buffer_cache) | 127 | if (!extent_buffer_cache) |
| 127 | goto free_state_cache; | 128 | goto free_state_cache; |
| 129 | |||
| 130 | btrfs_bioset = bioset_create(BIO_POOL_SIZE, | ||
| 131 | offsetof(struct btrfs_io_bio, bio)); | ||
| 132 | if (!btrfs_bioset) | ||
| 133 | goto free_buffer_cache; | ||
| 128 | return 0; | 134 | return 0; |
| 129 | 135 | ||
| 136 | free_buffer_cache: | ||
| 137 | kmem_cache_destroy(extent_buffer_cache); | ||
| 138 | extent_buffer_cache = NULL; | ||
| 139 | |||
| 130 | free_state_cache: | 140 | free_state_cache: |
| 131 | kmem_cache_destroy(extent_state_cache); | 141 | kmem_cache_destroy(extent_state_cache); |
| 142 | extent_state_cache = NULL; | ||
| 132 | return -ENOMEM; | 143 | return -ENOMEM; |
| 133 | } | 144 | } |
| 134 | 145 | ||
| @@ -145,6 +156,8 @@ void extent_io_exit(void) | |||
| 145 | kmem_cache_destroy(extent_state_cache); | 156 | kmem_cache_destroy(extent_state_cache); |
| 146 | if (extent_buffer_cache) | 157 | if (extent_buffer_cache) |
| 147 | kmem_cache_destroy(extent_buffer_cache); | 158 | kmem_cache_destroy(extent_buffer_cache); |
| 159 | if (btrfs_bioset) | ||
| 160 | bioset_free(btrfs_bioset); | ||
| 148 | } | 161 | } |
| 149 | 162 | ||
| 150 | void extent_io_tree_init(struct extent_io_tree *tree, | 163 | void extent_io_tree_init(struct extent_io_tree *tree, |
| @@ -1948,28 +1961,6 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) | |||
| 1948 | } | 1961 | } |
| 1949 | 1962 | ||
| 1950 | /* | 1963 | /* |
| 1951 | * helper function to unlock a page if all the extents in the tree | ||
| 1952 | * for that page are unlocked | ||
| 1953 | */ | ||
| 1954 | static void check_page_locked(struct extent_io_tree *tree, struct page *page) | ||
| 1955 | { | ||
| 1956 | u64 start = page_offset(page); | ||
| 1957 | u64 end = start + PAGE_CACHE_SIZE - 1; | ||
| 1958 | if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) | ||
| 1959 | unlock_page(page); | ||
| 1960 | } | ||
| 1961 | |||
| 1962 | /* | ||
| 1963 | * helper function to end page writeback if all the extents | ||
| 1964 | * in the tree for that page are done with writeback | ||
| 1965 | */ | ||
| 1966 | static void check_page_writeback(struct extent_io_tree *tree, | ||
| 1967 | struct page *page) | ||
| 1968 | { | ||
| 1969 | end_page_writeback(page); | ||
| 1970 | } | ||
| 1971 | |||
| 1972 | /* | ||
| 1973 | * When IO fails, either with EIO or csum verification fails, we | 1964 | * When IO fails, either with EIO or csum verification fails, we |
| 1974 | * try other mirrors that might have a good copy of the data. This | 1965 | * try other mirrors that might have a good copy of the data. This |
| 1975 | * io_failure_record is used to record state as we go through all the | 1966 | * io_failure_record is used to record state as we go through all the |
| @@ -2046,7 +2037,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start, | |||
| 2046 | if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num)) | 2037 | if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num)) |
| 2047 | return 0; | 2038 | return 0; |
| 2048 | 2039 | ||
| 2049 | bio = bio_alloc(GFP_NOFS, 1); | 2040 | bio = btrfs_io_bio_alloc(GFP_NOFS, 1); |
| 2050 | if (!bio) | 2041 | if (!bio) |
| 2051 | return -EIO; | 2042 | return -EIO; |
| 2052 | bio->bi_private = &compl; | 2043 | bio->bi_private = &compl; |
| @@ -2336,7 +2327,7 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page, | |||
| 2336 | return -EIO; | 2327 | return -EIO; |
| 2337 | } | 2328 | } |
| 2338 | 2329 | ||
| 2339 | bio = bio_alloc(GFP_NOFS, 1); | 2330 | bio = btrfs_io_bio_alloc(GFP_NOFS, 1); |
| 2340 | if (!bio) { | 2331 | if (!bio) { |
| 2341 | free_io_failure(inode, failrec, 0); | 2332 | free_io_failure(inode, failrec, 0); |
| 2342 | return -EIO; | 2333 | return -EIO; |
| @@ -2398,19 +2389,24 @@ static void end_bio_extent_writepage(struct bio *bio, int err) | |||
| 2398 | struct extent_io_tree *tree; | 2389 | struct extent_io_tree *tree; |
| 2399 | u64 start; | 2390 | u64 start; |
| 2400 | u64 end; | 2391 | u64 end; |
| 2401 | int whole_page; | ||
| 2402 | 2392 | ||
| 2403 | do { | 2393 | do { |
| 2404 | struct page *page = bvec->bv_page; | 2394 | struct page *page = bvec->bv_page; |
| 2405 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 2395 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
| 2406 | 2396 | ||
| 2407 | start = page_offset(page) + bvec->bv_offset; | 2397 | /* We always issue full-page reads, but if some block |
| 2408 | end = start + bvec->bv_len - 1; | 2398 | * in a page fails to read, blk_update_request() will |
| 2399 | * advance bv_offset and adjust bv_len to compensate. | ||
| 2400 | * Print a warning for nonzero offsets, and an error | ||
| 2401 | * if they don't add up to a full page. */ | ||
| 2402 | if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) | ||
| 2403 | printk("%s page write in btrfs with offset %u and length %u\n", | ||
| 2404 | bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE | ||
| 2405 | ? KERN_ERR "partial" : KERN_INFO "incomplete", | ||
| 2406 | bvec->bv_offset, bvec->bv_len); | ||
| 2409 | 2407 | ||
| 2410 | if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) | 2408 | start = page_offset(page); |
| 2411 | whole_page = 1; | 2409 | end = start + bvec->bv_offset + bvec->bv_len - 1; |
| 2412 | else | ||
| 2413 | whole_page = 0; | ||
| 2414 | 2410 | ||
| 2415 | if (--bvec >= bio->bi_io_vec) | 2411 | if (--bvec >= bio->bi_io_vec) |
| 2416 | prefetchw(&bvec->bv_page->flags); | 2412 | prefetchw(&bvec->bv_page->flags); |
| @@ -2418,10 +2414,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err) | |||
| 2418 | if (end_extent_writepage(page, err, start, end)) | 2414 | if (end_extent_writepage(page, err, start, end)) |
| 2419 | continue; | 2415 | continue; |
| 2420 | 2416 | ||
| 2421 | if (whole_page) | 2417 | end_page_writeback(page); |
| 2422 | end_page_writeback(page); | ||
| 2423 | else | ||
| 2424 | check_page_writeback(tree, page); | ||
| 2425 | } while (bvec >= bio->bi_io_vec); | 2418 | } while (bvec >= bio->bi_io_vec); |
| 2426 | 2419 | ||
| 2427 | bio_put(bio); | 2420 | bio_put(bio); |
| @@ -2446,7 +2439,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
| 2446 | struct extent_io_tree *tree; | 2439 | struct extent_io_tree *tree; |
| 2447 | u64 start; | 2440 | u64 start; |
| 2448 | u64 end; | 2441 | u64 end; |
| 2449 | int whole_page; | ||
| 2450 | int mirror; | 2442 | int mirror; |
| 2451 | int ret; | 2443 | int ret; |
| 2452 | 2444 | ||
| @@ -2457,19 +2449,26 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
| 2457 | struct page *page = bvec->bv_page; | 2449 | struct page *page = bvec->bv_page; |
| 2458 | struct extent_state *cached = NULL; | 2450 | struct extent_state *cached = NULL; |
| 2459 | struct extent_state *state; | 2451 | struct extent_state *state; |
| 2452 | struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); | ||
| 2460 | 2453 | ||
| 2461 | pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, " | 2454 | pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, " |
| 2462 | "mirror=%ld\n", (u64)bio->bi_sector, err, | 2455 | "mirror=%lu\n", (u64)bio->bi_sector, err, |
| 2463 | (long int)bio->bi_bdev); | 2456 | io_bio->mirror_num); |
| 2464 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 2457 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
| 2465 | 2458 | ||
| 2466 | start = page_offset(page) + bvec->bv_offset; | 2459 | /* We always issue full-page reads, but if some block |
| 2467 | end = start + bvec->bv_len - 1; | 2460 | * in a page fails to read, blk_update_request() will |
| 2461 | * advance bv_offset and adjust bv_len to compensate. | ||
| 2462 | * Print a warning for nonzero offsets, and an error | ||
| 2463 | * if they don't add up to a full page. */ | ||
| 2464 | if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) | ||
| 2465 | printk("%s page read in btrfs with offset %u and length %u\n", | ||
| 2466 | bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE | ||
| 2467 | ? KERN_ERR "partial" : KERN_INFO "incomplete", | ||
| 2468 | bvec->bv_offset, bvec->bv_len); | ||
| 2468 | 2469 | ||
| 2469 | if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) | 2470 | start = page_offset(page); |
| 2470 | whole_page = 1; | 2471 | end = start + bvec->bv_offset + bvec->bv_len - 1; |
| 2471 | else | ||
| 2472 | whole_page = 0; | ||
| 2473 | 2472 | ||
| 2474 | if (++bvec <= bvec_end) | 2473 | if (++bvec <= bvec_end) |
| 2475 | prefetchw(&bvec->bv_page->flags); | 2474 | prefetchw(&bvec->bv_page->flags); |
| @@ -2485,7 +2484,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
| 2485 | } | 2484 | } |
| 2486 | spin_unlock(&tree->lock); | 2485 | spin_unlock(&tree->lock); |
| 2487 | 2486 | ||
| 2488 | mirror = (int)(unsigned long)bio->bi_bdev; | 2487 | mirror = io_bio->mirror_num; |
| 2489 | if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { | 2488 | if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { |
| 2490 | ret = tree->ops->readpage_end_io_hook(page, start, end, | 2489 | ret = tree->ops->readpage_end_io_hook(page, start, end, |
| 2491 | state, mirror); | 2490 | state, mirror); |
| @@ -2528,39 +2527,35 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
| 2528 | } | 2527 | } |
| 2529 | unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); | 2528 | unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); |
| 2530 | 2529 | ||
| 2531 | if (whole_page) { | 2530 | if (uptodate) { |
| 2532 | if (uptodate) { | 2531 | SetPageUptodate(page); |
| 2533 | SetPageUptodate(page); | ||
| 2534 | } else { | ||
| 2535 | ClearPageUptodate(page); | ||
| 2536 | SetPageError(page); | ||
| 2537 | } | ||
| 2538 | unlock_page(page); | ||
| 2539 | } else { | 2532 | } else { |
| 2540 | if (uptodate) { | 2533 | ClearPageUptodate(page); |
| 2541 | check_page_uptodate(tree, page); | 2534 | SetPageError(page); |
| 2542 | } else { | ||
| 2543 | ClearPageUptodate(page); | ||
| 2544 | SetPageError(page); | ||
| 2545 | } | ||
| 2546 | check_page_locked(tree, page); | ||
| 2547 | } | 2535 | } |
| 2536 | unlock_page(page); | ||
| 2548 | } while (bvec <= bvec_end); | 2537 | } while (bvec <= bvec_end); |
| 2549 | 2538 | ||
| 2550 | bio_put(bio); | 2539 | bio_put(bio); |
| 2551 | } | 2540 | } |
| 2552 | 2541 | ||
| 2542 | /* | ||
| 2543 | * this allocates from the btrfs_bioset. We're returning a bio right now | ||
| 2544 | * but you can call btrfs_io_bio for the appropriate container_of magic | ||
| 2545 | */ | ||
| 2553 | struct bio * | 2546 | struct bio * |
| 2554 | btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, | 2547 | btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, |
| 2555 | gfp_t gfp_flags) | 2548 | gfp_t gfp_flags) |
| 2556 | { | 2549 | { |
| 2557 | struct bio *bio; | 2550 | struct bio *bio; |
| 2558 | 2551 | ||
| 2559 | bio = bio_alloc(gfp_flags, nr_vecs); | 2552 | bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset); |
| 2560 | 2553 | ||
| 2561 | if (bio == NULL && (current->flags & PF_MEMALLOC)) { | 2554 | if (bio == NULL && (current->flags & PF_MEMALLOC)) { |
| 2562 | while (!bio && (nr_vecs /= 2)) | 2555 | while (!bio && (nr_vecs /= 2)) { |
| 2563 | bio = bio_alloc(gfp_flags, nr_vecs); | 2556 | bio = bio_alloc_bioset(gfp_flags, |
| 2557 | nr_vecs, btrfs_bioset); | ||
| 2558 | } | ||
| 2564 | } | 2559 | } |
| 2565 | 2560 | ||
| 2566 | if (bio) { | 2561 | if (bio) { |
| @@ -2571,6 +2566,19 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, | |||
| 2571 | return bio; | 2566 | return bio; |
| 2572 | } | 2567 | } |
| 2573 | 2568 | ||
| 2569 | struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask) | ||
| 2570 | { | ||
| 2571 | return bio_clone_bioset(bio, gfp_mask, btrfs_bioset); | ||
| 2572 | } | ||
| 2573 | |||
| 2574 | |||
| 2575 | /* this also allocates from the btrfs_bioset */ | ||
| 2576 | struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) | ||
| 2577 | { | ||
| 2578 | return bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset); | ||
| 2579 | } | ||
| 2580 | |||
| 2581 | |||
| 2574 | static int __must_check submit_one_bio(int rw, struct bio *bio, | 2582 | static int __must_check submit_one_bio(int rw, struct bio *bio, |
| 2575 | int mirror_num, unsigned long bio_flags) | 2583 | int mirror_num, unsigned long bio_flags) |
| 2576 | { | 2584 | { |
| @@ -3988,7 +3996,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
| 3988 | last_for_get_extent = isize; | 3996 | last_for_get_extent = isize; |
| 3989 | } | 3997 | } |
| 3990 | 3998 | ||
| 3991 | lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, | 3999 | lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0, |
| 3992 | &cached_state); | 4000 | &cached_state); |
| 3993 | 4001 | ||
| 3994 | em = get_extent_skip_holes(inode, start, last_for_get_extent, | 4002 | em = get_extent_skip_holes(inode, start, last_for_get_extent, |
| @@ -4075,7 +4083,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
| 4075 | out_free: | 4083 | out_free: |
| 4076 | free_extent_map(em); | 4084 | free_extent_map(em); |
| 4077 | out: | 4085 | out: |
| 4078 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len, | 4086 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1, |
| 4079 | &cached_state, GFP_NOFS); | 4087 | &cached_state, GFP_NOFS); |
| 4080 | return ret; | 4088 | return ret; |
| 4081 | } | 4089 | } |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index a2c03a175009..41fb81e7ec53 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
| @@ -336,6 +336,8 @@ int extent_clear_unlock_delalloc(struct inode *inode, | |||
| 336 | struct bio * | 336 | struct bio * |
| 337 | btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, | 337 | btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, |
| 338 | gfp_t gfp_flags); | 338 | gfp_t gfp_flags); |
| 339 | struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs); | ||
| 340 | struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask); | ||
| 339 | 341 | ||
| 340 | struct btrfs_fs_info; | 342 | struct btrfs_fs_info; |
| 341 | 343 | ||
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index ecca6c7375a6..e53009657f0e 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
| @@ -197,30 +197,32 @@ int create_free_space_inode(struct btrfs_root *root, | |||
| 197 | block_group->key.objectid); | 197 | block_group->key.objectid); |
| 198 | } | 198 | } |
| 199 | 199 | ||
| 200 | int btrfs_truncate_free_space_cache(struct btrfs_root *root, | 200 | int btrfs_check_trunc_cache_free_space(struct btrfs_root *root, |
| 201 | struct btrfs_trans_handle *trans, | 201 | struct btrfs_block_rsv *rsv) |
| 202 | struct btrfs_path *path, | ||
| 203 | struct inode *inode) | ||
| 204 | { | 202 | { |
| 205 | struct btrfs_block_rsv *rsv; | ||
| 206 | u64 needed_bytes; | 203 | u64 needed_bytes; |
| 207 | loff_t oldsize; | 204 | int ret; |
| 208 | int ret = 0; | ||
| 209 | |||
| 210 | rsv = trans->block_rsv; | ||
| 211 | trans->block_rsv = &root->fs_info->global_block_rsv; | ||
| 212 | 205 | ||
| 213 | /* 1 for slack space, 1 for updating the inode */ | 206 | /* 1 for slack space, 1 for updating the inode */ |
| 214 | needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) + | 207 | needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) + |
| 215 | btrfs_calc_trans_metadata_size(root, 1); | 208 | btrfs_calc_trans_metadata_size(root, 1); |
| 216 | 209 | ||
| 217 | spin_lock(&trans->block_rsv->lock); | 210 | spin_lock(&rsv->lock); |
| 218 | if (trans->block_rsv->reserved < needed_bytes) { | 211 | if (rsv->reserved < needed_bytes) |
| 219 | spin_unlock(&trans->block_rsv->lock); | 212 | ret = -ENOSPC; |
| 220 | trans->block_rsv = rsv; | 213 | else |
| 221 | return -ENOSPC; | 214 | ret = 0; |
| 222 | } | 215 | spin_unlock(&rsv->lock); |
| 223 | spin_unlock(&trans->block_rsv->lock); | 216 | return 0; |
| 217 | } | ||
| 218 | |||
| 219 | int btrfs_truncate_free_space_cache(struct btrfs_root *root, | ||
| 220 | struct btrfs_trans_handle *trans, | ||
| 221 | struct btrfs_path *path, | ||
| 222 | struct inode *inode) | ||
| 223 | { | ||
| 224 | loff_t oldsize; | ||
| 225 | int ret = 0; | ||
| 224 | 226 | ||
| 225 | oldsize = i_size_read(inode); | 227 | oldsize = i_size_read(inode); |
| 226 | btrfs_i_size_write(inode, 0); | 228 | btrfs_i_size_write(inode, 0); |
| @@ -232,9 +234,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, | |||
| 232 | */ | 234 | */ |
| 233 | ret = btrfs_truncate_inode_items(trans, root, inode, | 235 | ret = btrfs_truncate_inode_items(trans, root, inode, |
| 234 | 0, BTRFS_EXTENT_DATA_KEY); | 236 | 0, BTRFS_EXTENT_DATA_KEY); |
| 235 | |||
| 236 | if (ret) { | 237 | if (ret) { |
| 237 | trans->block_rsv = rsv; | ||
| 238 | btrfs_abort_transaction(trans, root, ret); | 238 | btrfs_abort_transaction(trans, root, ret); |
| 239 | return ret; | 239 | return ret; |
| 240 | } | 240 | } |
| @@ -242,7 +242,6 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, | |||
| 242 | ret = btrfs_update_inode(trans, root, inode); | 242 | ret = btrfs_update_inode(trans, root, inode); |
| 243 | if (ret) | 243 | if (ret) |
| 244 | btrfs_abort_transaction(trans, root, ret); | 244 | btrfs_abort_transaction(trans, root, ret); |
| 245 | trans->block_rsv = rsv; | ||
| 246 | 245 | ||
| 247 | return ret; | 246 | return ret; |
| 248 | } | 247 | } |
| @@ -920,10 +919,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
| 920 | 919 | ||
| 921 | /* Make sure we can fit our crcs into the first page */ | 920 | /* Make sure we can fit our crcs into the first page */ |
| 922 | if (io_ctl.check_crcs && | 921 | if (io_ctl.check_crcs && |
| 923 | (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) { | 922 | (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) |
| 924 | WARN_ON(1); | ||
| 925 | goto out_nospc; | 923 | goto out_nospc; |
| 926 | } | ||
| 927 | 924 | ||
| 928 | io_ctl_set_generation(&io_ctl, trans->transid); | 925 | io_ctl_set_generation(&io_ctl, trans->transid); |
| 929 | 926 | ||
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 4dc17d8809c7..8b7f19f44961 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h | |||
| @@ -54,6 +54,8 @@ int create_free_space_inode(struct btrfs_root *root, | |||
| 54 | struct btrfs_block_group_cache *block_group, | 54 | struct btrfs_block_group_cache *block_group, |
| 55 | struct btrfs_path *path); | 55 | struct btrfs_path *path); |
| 56 | 56 | ||
| 57 | int btrfs_check_trunc_cache_free_space(struct btrfs_root *root, | ||
| 58 | struct btrfs_block_rsv *rsv); | ||
| 57 | int btrfs_truncate_free_space_cache(struct btrfs_root *root, | 59 | int btrfs_truncate_free_space_cache(struct btrfs_root *root, |
| 58 | struct btrfs_trans_handle *trans, | 60 | struct btrfs_trans_handle *trans, |
| 59 | struct btrfs_path *path, | 61 | struct btrfs_path *path, |
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index d26f67a59e36..2c66ddbbe670 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c | |||
| @@ -429,11 +429,12 @@ int btrfs_save_ino_cache(struct btrfs_root *root, | |||
| 429 | num_bytes = trans->bytes_reserved; | 429 | num_bytes = trans->bytes_reserved; |
| 430 | /* | 430 | /* |
| 431 | * 1 item for inode item insertion if need | 431 | * 1 item for inode item insertion if need |
| 432 | * 3 items for inode item update (in the worst case) | 432 | * 4 items for inode item update (in the worst case) |
| 433 | * 1 items for slack space if we need do truncation | ||
| 433 | * 1 item for free space object | 434 | * 1 item for free space object |
| 434 | * 3 items for pre-allocation | 435 | * 3 items for pre-allocation |
| 435 | */ | 436 | */ |
| 436 | trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8); | 437 | trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 10); |
| 437 | ret = btrfs_block_rsv_add(root, trans->block_rsv, | 438 | ret = btrfs_block_rsv_add(root, trans->block_rsv, |
| 438 | trans->bytes_reserved, | 439 | trans->bytes_reserved, |
| 439 | BTRFS_RESERVE_NO_FLUSH); | 440 | BTRFS_RESERVE_NO_FLUSH); |
| @@ -468,7 +469,8 @@ again: | |||
| 468 | if (i_size_read(inode) > 0) { | 469 | if (i_size_read(inode) > 0) { |
| 469 | ret = btrfs_truncate_free_space_cache(root, trans, path, inode); | 470 | ret = btrfs_truncate_free_space_cache(root, trans, path, inode); |
| 470 | if (ret) { | 471 | if (ret) { |
| 471 | btrfs_abort_transaction(trans, root, ret); | 472 | if (ret != -ENOSPC) |
| 473 | btrfs_abort_transaction(trans, root, ret); | ||
| 472 | goto out_put; | 474 | goto out_put; |
| 473 | } | 475 | } |
| 474 | } | 476 | } |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 9b31b3b091fc..af978f7682b3 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -715,8 +715,10 @@ retry: | |||
| 715 | async_extent->ram_size - 1, 0); | 715 | async_extent->ram_size - 1, 0); |
| 716 | 716 | ||
| 717 | em = alloc_extent_map(); | 717 | em = alloc_extent_map(); |
| 718 | if (!em) | 718 | if (!em) { |
| 719 | ret = -ENOMEM; | ||
| 719 | goto out_free_reserve; | 720 | goto out_free_reserve; |
| 721 | } | ||
| 720 | em->start = async_extent->start; | 722 | em->start = async_extent->start; |
| 721 | em->len = async_extent->ram_size; | 723 | em->len = async_extent->ram_size; |
| 722 | em->orig_start = em->start; | 724 | em->orig_start = em->start; |
| @@ -923,8 +925,10 @@ static noinline int __cow_file_range(struct btrfs_trans_handle *trans, | |||
| 923 | } | 925 | } |
| 924 | 926 | ||
| 925 | em = alloc_extent_map(); | 927 | em = alloc_extent_map(); |
| 926 | if (!em) | 928 | if (!em) { |
| 929 | ret = -ENOMEM; | ||
| 927 | goto out_reserve; | 930 | goto out_reserve; |
| 931 | } | ||
| 928 | em->start = start; | 932 | em->start = start; |
| 929 | em->orig_start = em->start; | 933 | em->orig_start = em->start; |
| 930 | ram_size = ins.offset; | 934 | ram_size = ins.offset; |
| @@ -4724,6 +4728,7 @@ void btrfs_evict_inode(struct inode *inode) | |||
| 4724 | btrfs_end_transaction(trans, root); | 4728 | btrfs_end_transaction(trans, root); |
| 4725 | btrfs_btree_balance_dirty(root); | 4729 | btrfs_btree_balance_dirty(root); |
| 4726 | no_delete: | 4730 | no_delete: |
| 4731 | btrfs_remove_delayed_node(inode); | ||
| 4727 | clear_inode(inode); | 4732 | clear_inode(inode); |
| 4728 | return; | 4733 | return; |
| 4729 | } | 4734 | } |
| @@ -4839,14 +4844,13 @@ static void inode_tree_add(struct inode *inode) | |||
| 4839 | struct rb_node **p; | 4844 | struct rb_node **p; |
| 4840 | struct rb_node *parent; | 4845 | struct rb_node *parent; |
| 4841 | u64 ino = btrfs_ino(inode); | 4846 | u64 ino = btrfs_ino(inode); |
| 4842 | again: | ||
| 4843 | p = &root->inode_tree.rb_node; | ||
| 4844 | parent = NULL; | ||
| 4845 | 4847 | ||
| 4846 | if (inode_unhashed(inode)) | 4848 | if (inode_unhashed(inode)) |
| 4847 | return; | 4849 | return; |
| 4848 | 4850 | again: | |
| 4851 | parent = NULL; | ||
| 4849 | spin_lock(&root->inode_lock); | 4852 | spin_lock(&root->inode_lock); |
| 4853 | p = &root->inode_tree.rb_node; | ||
| 4850 | while (*p) { | 4854 | while (*p) { |
| 4851 | parent = *p; | 4855 | parent = *p; |
| 4852 | entry = rb_entry(parent, struct btrfs_inode, rb_node); | 4856 | entry = rb_entry(parent, struct btrfs_inode, rb_node); |
| @@ -6928,7 +6932,11 @@ struct btrfs_dio_private { | |||
| 6928 | /* IO errors */ | 6932 | /* IO errors */ |
| 6929 | int errors; | 6933 | int errors; |
| 6930 | 6934 | ||
| 6935 | /* orig_bio is our btrfs_io_bio */ | ||
| 6931 | struct bio *orig_bio; | 6936 | struct bio *orig_bio; |
| 6937 | |||
| 6938 | /* dio_bio came from fs/direct-io.c */ | ||
| 6939 | struct bio *dio_bio; | ||
| 6932 | }; | 6940 | }; |
| 6933 | 6941 | ||
| 6934 | static void btrfs_endio_direct_read(struct bio *bio, int err) | 6942 | static void btrfs_endio_direct_read(struct bio *bio, int err) |
| @@ -6938,6 +6946,7 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) | |||
| 6938 | struct bio_vec *bvec = bio->bi_io_vec; | 6946 | struct bio_vec *bvec = bio->bi_io_vec; |
| 6939 | struct inode *inode = dip->inode; | 6947 | struct inode *inode = dip->inode; |
| 6940 | struct btrfs_root *root = BTRFS_I(inode)->root; | 6948 | struct btrfs_root *root = BTRFS_I(inode)->root; |
| 6949 | struct bio *dio_bio; | ||
| 6941 | u64 start; | 6950 | u64 start; |
| 6942 | 6951 | ||
| 6943 | start = dip->logical_offset; | 6952 | start = dip->logical_offset; |
| @@ -6977,14 +6986,15 @@ failed: | |||
| 6977 | 6986 | ||
| 6978 | unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, | 6987 | unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, |
| 6979 | dip->logical_offset + dip->bytes - 1); | 6988 | dip->logical_offset + dip->bytes - 1); |
| 6980 | bio->bi_private = dip->private; | 6989 | dio_bio = dip->dio_bio; |
| 6981 | 6990 | ||
| 6982 | kfree(dip); | 6991 | kfree(dip); |
| 6983 | 6992 | ||
| 6984 | /* If we had a csum failure make sure to clear the uptodate flag */ | 6993 | /* If we had a csum failure make sure to clear the uptodate flag */ |
| 6985 | if (err) | 6994 | if (err) |
| 6986 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | 6995 | clear_bit(BIO_UPTODATE, &dio_bio->bi_flags); |
| 6987 | dio_end_io(bio, err); | 6996 | dio_end_io(dio_bio, err); |
| 6997 | bio_put(bio); | ||
| 6988 | } | 6998 | } |
| 6989 | 6999 | ||
| 6990 | static void btrfs_endio_direct_write(struct bio *bio, int err) | 7000 | static void btrfs_endio_direct_write(struct bio *bio, int err) |
| @@ -6995,6 +7005,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err) | |||
| 6995 | struct btrfs_ordered_extent *ordered = NULL; | 7005 | struct btrfs_ordered_extent *ordered = NULL; |
| 6996 | u64 ordered_offset = dip->logical_offset; | 7006 | u64 ordered_offset = dip->logical_offset; |
| 6997 | u64 ordered_bytes = dip->bytes; | 7007 | u64 ordered_bytes = dip->bytes; |
| 7008 | struct bio *dio_bio; | ||
| 6998 | int ret; | 7009 | int ret; |
| 6999 | 7010 | ||
| 7000 | if (err) | 7011 | if (err) |
| @@ -7022,14 +7033,15 @@ out_test: | |||
| 7022 | goto again; | 7033 | goto again; |
| 7023 | } | 7034 | } |
| 7024 | out_done: | 7035 | out_done: |
| 7025 | bio->bi_private = dip->private; | 7036 | dio_bio = dip->dio_bio; |
| 7026 | 7037 | ||
| 7027 | kfree(dip); | 7038 | kfree(dip); |
| 7028 | 7039 | ||
| 7029 | /* If we had an error make sure to clear the uptodate flag */ | 7040 | /* If we had an error make sure to clear the uptodate flag */ |
| 7030 | if (err) | 7041 | if (err) |
| 7031 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | 7042 | clear_bit(BIO_UPTODATE, &dio_bio->bi_flags); |
| 7032 | dio_end_io(bio, err); | 7043 | dio_end_io(dio_bio, err); |
| 7044 | bio_put(bio); | ||
| 7033 | } | 7045 | } |
| 7034 | 7046 | ||
| 7035 | static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, | 7047 | static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, |
| @@ -7065,10 +7077,10 @@ static void btrfs_end_dio_bio(struct bio *bio, int err) | |||
| 7065 | if (!atomic_dec_and_test(&dip->pending_bios)) | 7077 | if (!atomic_dec_and_test(&dip->pending_bios)) |
| 7066 | goto out; | 7078 | goto out; |
| 7067 | 7079 | ||
| 7068 | if (dip->errors) | 7080 | if (dip->errors) { |
| 7069 | bio_io_error(dip->orig_bio); | 7081 | bio_io_error(dip->orig_bio); |
| 7070 | else { | 7082 | } else { |
| 7071 | set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags); | 7083 | set_bit(BIO_UPTODATE, &dip->dio_bio->bi_flags); |
| 7072 | bio_endio(dip->orig_bio, 0); | 7084 | bio_endio(dip->orig_bio, 0); |
| 7073 | } | 7085 | } |
| 7074 | out: | 7086 | out: |
| @@ -7243,25 +7255,34 @@ out_err: | |||
| 7243 | return 0; | 7255 | return 0; |
| 7244 | } | 7256 | } |
| 7245 | 7257 | ||
| 7246 | static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, | 7258 | static void btrfs_submit_direct(int rw, struct bio *dio_bio, |
| 7247 | loff_t file_offset) | 7259 | struct inode *inode, loff_t file_offset) |
| 7248 | { | 7260 | { |
| 7249 | struct btrfs_root *root = BTRFS_I(inode)->root; | 7261 | struct btrfs_root *root = BTRFS_I(inode)->root; |
| 7250 | struct btrfs_dio_private *dip; | 7262 | struct btrfs_dio_private *dip; |
| 7251 | struct bio_vec *bvec = bio->bi_io_vec; | 7263 | struct bio_vec *bvec = dio_bio->bi_io_vec; |
| 7264 | struct bio *io_bio; | ||
| 7252 | int skip_sum; | 7265 | int skip_sum; |
| 7253 | int write = rw & REQ_WRITE; | 7266 | int write = rw & REQ_WRITE; |
| 7254 | int ret = 0; | 7267 | int ret = 0; |
| 7255 | 7268 | ||
| 7256 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; | 7269 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
| 7257 | 7270 | ||
| 7271 | io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS); | ||
| 7272 | |||
| 7273 | if (!io_bio) { | ||
| 7274 | ret = -ENOMEM; | ||
| 7275 | goto free_ordered; | ||
| 7276 | } | ||
| 7277 | |||
| 7258 | dip = kmalloc(sizeof(*dip), GFP_NOFS); | 7278 | dip = kmalloc(sizeof(*dip), GFP_NOFS); |
| 7259 | if (!dip) { | 7279 | if (!dip) { |
| 7260 | ret = -ENOMEM; | 7280 | ret = -ENOMEM; |
| 7261 | goto free_ordered; | 7281 | goto free_io_bio; |
| 7262 | } | 7282 | } |
| 7263 | 7283 | ||
| 7264 | dip->private = bio->bi_private; | 7284 | dip->private = dio_bio->bi_private; |
| 7285 | io_bio->bi_private = dio_bio->bi_private; | ||
| 7265 | dip->inode = inode; | 7286 | dip->inode = inode; |
| 7266 | dip->logical_offset = file_offset; | 7287 | dip->logical_offset = file_offset; |
| 7267 | 7288 | ||
| @@ -7269,22 +7290,27 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, | |||
| 7269 | do { | 7290 | do { |
| 7270 | dip->bytes += bvec->bv_len; | 7291 | dip->bytes += bvec->bv_len; |
| 7271 | bvec++; | 7292 | bvec++; |
| 7272 | } while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1)); | 7293 | } while (bvec <= (dio_bio->bi_io_vec + dio_bio->bi_vcnt - 1)); |
| 7273 | 7294 | ||
| 7274 | dip->disk_bytenr = (u64)bio->bi_sector << 9; | 7295 | dip->disk_bytenr = (u64)dio_bio->bi_sector << 9; |
| 7275 | bio->bi_private = dip; | 7296 | io_bio->bi_private = dip; |
| 7276 | dip->errors = 0; | 7297 | dip->errors = 0; |
| 7277 | dip->orig_bio = bio; | 7298 | dip->orig_bio = io_bio; |
| 7299 | dip->dio_bio = dio_bio; | ||
| 7278 | atomic_set(&dip->pending_bios, 0); | 7300 | atomic_set(&dip->pending_bios, 0); |
| 7279 | 7301 | ||
| 7280 | if (write) | 7302 | if (write) |
| 7281 | bio->bi_end_io = btrfs_endio_direct_write; | 7303 | io_bio->bi_end_io = btrfs_endio_direct_write; |
| 7282 | else | 7304 | else |
| 7283 | bio->bi_end_io = btrfs_endio_direct_read; | 7305 | io_bio->bi_end_io = btrfs_endio_direct_read; |
| 7284 | 7306 | ||
| 7285 | ret = btrfs_submit_direct_hook(rw, dip, skip_sum); | 7307 | ret = btrfs_submit_direct_hook(rw, dip, skip_sum); |
| 7286 | if (!ret) | 7308 | if (!ret) |
| 7287 | return; | 7309 | return; |
| 7310 | |||
| 7311 | free_io_bio: | ||
| 7312 | bio_put(io_bio); | ||
| 7313 | |||
| 7288 | free_ordered: | 7314 | free_ordered: |
| 7289 | /* | 7315 | /* |
| 7290 | * If this is a write, we need to clean up the reserved space and kill | 7316 | * If this is a write, we need to clean up the reserved space and kill |
| @@ -7300,7 +7326,7 @@ free_ordered: | |||
| 7300 | btrfs_put_ordered_extent(ordered); | 7326 | btrfs_put_ordered_extent(ordered); |
| 7301 | btrfs_put_ordered_extent(ordered); | 7327 | btrfs_put_ordered_extent(ordered); |
| 7302 | } | 7328 | } |
| 7303 | bio_endio(bio, ret); | 7329 | bio_endio(dio_bio, ret); |
| 7304 | } | 7330 | } |
| 7305 | 7331 | ||
| 7306 | static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb, | 7332 | static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb, |
| @@ -7979,7 +8005,6 @@ void btrfs_destroy_inode(struct inode *inode) | |||
| 7979 | inode_tree_del(inode); | 8005 | inode_tree_del(inode); |
| 7980 | btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); | 8006 | btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); |
| 7981 | free: | 8007 | free: |
| 7982 | btrfs_remove_delayed_node(inode); | ||
| 7983 | call_rcu(&inode->i_rcu, btrfs_i_callback); | 8008 | call_rcu(&inode->i_rcu, btrfs_i_callback); |
| 7984 | } | 8009 | } |
| 7985 | 8010 | ||
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 0de4a2fcfb24..0f81d67cdc8d 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
| @@ -1801,7 +1801,11 @@ static noinline int copy_to_sk(struct btrfs_root *root, | |||
| 1801 | item_off = btrfs_item_ptr_offset(leaf, i); | 1801 | item_off = btrfs_item_ptr_offset(leaf, i); |
| 1802 | item_len = btrfs_item_size_nr(leaf, i); | 1802 | item_len = btrfs_item_size_nr(leaf, i); |
| 1803 | 1803 | ||
| 1804 | if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE) | 1804 | btrfs_item_key_to_cpu(leaf, key, i); |
| 1805 | if (!key_in_sk(key, sk)) | ||
| 1806 | continue; | ||
| 1807 | |||
| 1808 | if (sizeof(sh) + item_len > BTRFS_SEARCH_ARGS_BUFSIZE) | ||
| 1805 | item_len = 0; | 1809 | item_len = 0; |
| 1806 | 1810 | ||
| 1807 | if (sizeof(sh) + item_len + *sk_offset > | 1811 | if (sizeof(sh) + item_len + *sk_offset > |
| @@ -1810,10 +1814,6 @@ static noinline int copy_to_sk(struct btrfs_root *root, | |||
| 1810 | goto overflow; | 1814 | goto overflow; |
| 1811 | } | 1815 | } |
| 1812 | 1816 | ||
| 1813 | btrfs_item_key_to_cpu(leaf, key, i); | ||
| 1814 | if (!key_in_sk(key, sk)) | ||
| 1815 | continue; | ||
| 1816 | |||
| 1817 | sh.objectid = key->objectid; | 1817 | sh.objectid = key->objectid; |
| 1818 | sh.offset = key->offset; | 1818 | sh.offset = key->offset; |
| 1819 | sh.type = key->type; | 1819 | sh.type = key->type; |
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 0740621daf6c..0525e1389f5b 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c | |||
| @@ -1050,7 +1050,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, | |||
| 1050 | } | 1050 | } |
| 1051 | 1051 | ||
| 1052 | /* put a new bio on the list */ | 1052 | /* put a new bio on the list */ |
| 1053 | bio = bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1); | 1053 | bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1); |
| 1054 | if (!bio) | 1054 | if (!bio) |
| 1055 | return -ENOMEM; | 1055 | return -ENOMEM; |
| 1056 | 1056 | ||
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 704a1b8d2a2b..395b82031a42 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
| @@ -1773,7 +1773,7 @@ again: | |||
| 1773 | if (!eb || !extent_buffer_uptodate(eb)) { | 1773 | if (!eb || !extent_buffer_uptodate(eb)) { |
| 1774 | ret = (!eb) ? -ENOMEM : -EIO; | 1774 | ret = (!eb) ? -ENOMEM : -EIO; |
| 1775 | free_extent_buffer(eb); | 1775 | free_extent_buffer(eb); |
| 1776 | return ret; | 1776 | break; |
| 1777 | } | 1777 | } |
| 1778 | btrfs_tree_lock(eb); | 1778 | btrfs_tree_lock(eb); |
| 1779 | if (cow) { | 1779 | if (cow) { |
| @@ -3350,6 +3350,11 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info, | |||
| 3350 | } | 3350 | } |
| 3351 | 3351 | ||
| 3352 | truncate: | 3352 | truncate: |
| 3353 | ret = btrfs_check_trunc_cache_free_space(root, | ||
| 3354 | &fs_info->global_block_rsv); | ||
| 3355 | if (ret) | ||
| 3356 | goto out; | ||
| 3357 | |||
| 3353 | path = btrfs_alloc_path(); | 3358 | path = btrfs_alloc_path(); |
| 3354 | if (!path) { | 3359 | if (!path) { |
| 3355 | ret = -ENOMEM; | 3360 | ret = -ENOMEM; |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index f489e24659a4..79bd479317cb 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
| @@ -1296,7 +1296,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info, | |||
| 1296 | } | 1296 | } |
| 1297 | 1297 | ||
| 1298 | WARN_ON(!page->page); | 1298 | WARN_ON(!page->page); |
| 1299 | bio = bio_alloc(GFP_NOFS, 1); | 1299 | bio = btrfs_io_bio_alloc(GFP_NOFS, 1); |
| 1300 | if (!bio) { | 1300 | if (!bio) { |
| 1301 | page->io_error = 1; | 1301 | page->io_error = 1; |
| 1302 | sblock->no_io_error_seen = 0; | 1302 | sblock->no_io_error_seen = 0; |
| @@ -1431,7 +1431,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, | |||
| 1431 | return -EIO; | 1431 | return -EIO; |
| 1432 | } | 1432 | } |
| 1433 | 1433 | ||
| 1434 | bio = bio_alloc(GFP_NOFS, 1); | 1434 | bio = btrfs_io_bio_alloc(GFP_NOFS, 1); |
| 1435 | if (!bio) | 1435 | if (!bio) |
| 1436 | return -EIO; | 1436 | return -EIO; |
| 1437 | bio->bi_bdev = page_bad->dev->bdev; | 1437 | bio->bi_bdev = page_bad->dev->bdev; |
| @@ -1522,7 +1522,7 @@ again: | |||
| 1522 | sbio->dev = wr_ctx->tgtdev; | 1522 | sbio->dev = wr_ctx->tgtdev; |
| 1523 | bio = sbio->bio; | 1523 | bio = sbio->bio; |
| 1524 | if (!bio) { | 1524 | if (!bio) { |
| 1525 | bio = bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio); | 1525 | bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio); |
| 1526 | if (!bio) { | 1526 | if (!bio) { |
| 1527 | mutex_unlock(&wr_ctx->wr_lock); | 1527 | mutex_unlock(&wr_ctx->wr_lock); |
| 1528 | return -ENOMEM; | 1528 | return -ENOMEM; |
| @@ -1930,7 +1930,7 @@ again: | |||
| 1930 | sbio->dev = spage->dev; | 1930 | sbio->dev = spage->dev; |
| 1931 | bio = sbio->bio; | 1931 | bio = sbio->bio; |
| 1932 | if (!bio) { | 1932 | if (!bio) { |
| 1933 | bio = bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio); | 1933 | bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio); |
| 1934 | if (!bio) | 1934 | if (!bio) |
| 1935 | return -ENOMEM; | 1935 | return -ENOMEM; |
| 1936 | sbio->bio = bio; | 1936 | sbio->bio = bio; |
| @@ -3307,7 +3307,7 @@ static int write_page_nocow(struct scrub_ctx *sctx, | |||
| 3307 | "btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n"); | 3307 | "btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n"); |
| 3308 | return -EIO; | 3308 | return -EIO; |
| 3309 | } | 3309 | } |
| 3310 | bio = bio_alloc(GFP_NOFS, 1); | 3310 | bio = btrfs_io_bio_alloc(GFP_NOFS, 1); |
| 3311 | if (!bio) { | 3311 | if (!bio) { |
| 3312 | spin_lock(&sctx->stat_lock); | 3312 | spin_lock(&sctx->stat_lock); |
| 3313 | sctx->stat.malloc_errors++; | 3313 | sctx->stat.malloc_errors++; |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index a4807ced23cc..f0857e092a3c 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
| @@ -1263,6 +1263,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) | |||
| 1263 | 1263 | ||
| 1264 | btrfs_dev_replace_suspend_for_unmount(fs_info); | 1264 | btrfs_dev_replace_suspend_for_unmount(fs_info); |
| 1265 | btrfs_scrub_cancel(fs_info); | 1265 | btrfs_scrub_cancel(fs_info); |
| 1266 | btrfs_pause_balance(fs_info); | ||
| 1266 | 1267 | ||
| 1267 | ret = btrfs_commit_super(root); | 1268 | ret = btrfs_commit_super(root); |
| 1268 | if (ret) | 1269 | if (ret) |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0e925ced971b..8bffb9174afb 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -3120,14 +3120,13 @@ int btrfs_balance(struct btrfs_balance_control *bctl, | |||
| 3120 | allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; | 3120 | allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; |
| 3121 | if (num_devices == 1) | 3121 | if (num_devices == 1) |
| 3122 | allowed |= BTRFS_BLOCK_GROUP_DUP; | 3122 | allowed |= BTRFS_BLOCK_GROUP_DUP; |
| 3123 | else if (num_devices < 4) | 3123 | else if (num_devices > 1) |
| 3124 | allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1); | 3124 | allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1); |
| 3125 | else | 3125 | if (num_devices > 2) |
| 3126 | allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | | 3126 | allowed |= BTRFS_BLOCK_GROUP_RAID5; |
| 3127 | BTRFS_BLOCK_GROUP_RAID10 | | 3127 | if (num_devices > 3) |
| 3128 | BTRFS_BLOCK_GROUP_RAID5 | | 3128 | allowed |= (BTRFS_BLOCK_GROUP_RAID10 | |
| 3129 | BTRFS_BLOCK_GROUP_RAID6); | 3129 | BTRFS_BLOCK_GROUP_RAID6); |
| 3130 | |||
| 3131 | if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) && | 3130 | if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) && |
| 3132 | (!alloc_profile_is_valid(bctl->data.target, 1) || | 3131 | (!alloc_profile_is_valid(bctl->data.target, 1) || |
| 3133 | (bctl->data.target & ~allowed))) { | 3132 | (bctl->data.target & ~allowed))) { |
| @@ -5019,42 +5018,16 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, | |||
| 5019 | return 0; | 5018 | return 0; |
| 5020 | } | 5019 | } |
| 5021 | 5020 | ||
| 5022 | static void *merge_stripe_index_into_bio_private(void *bi_private, | ||
| 5023 | unsigned int stripe_index) | ||
| 5024 | { | ||
| 5025 | /* | ||
| 5026 | * with single, dup, RAID0, RAID1 and RAID10, stripe_index is | ||
| 5027 | * at most 1. | ||
| 5028 | * The alternative solution (instead of stealing bits from the | ||
| 5029 | * pointer) would be to allocate an intermediate structure | ||
| 5030 | * that contains the old private pointer plus the stripe_index. | ||
| 5031 | */ | ||
| 5032 | BUG_ON((((uintptr_t)bi_private) & 3) != 0); | ||
| 5033 | BUG_ON(stripe_index > 3); | ||
| 5034 | return (void *)(((uintptr_t)bi_private) | stripe_index); | ||
| 5035 | } | ||
| 5036 | |||
| 5037 | static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private) | ||
| 5038 | { | ||
| 5039 | return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3)); | ||
| 5040 | } | ||
| 5041 | |||
| 5042 | static unsigned int extract_stripe_index_from_bio_private(void *bi_private) | ||
| 5043 | { | ||
| 5044 | return (unsigned int)((uintptr_t)bi_private) & 3; | ||
| 5045 | } | ||
| 5046 | |||
| 5047 | static void btrfs_end_bio(struct bio *bio, int err) | 5021 | static void btrfs_end_bio(struct bio *bio, int err) |
| 5048 | { | 5022 | { |
| 5049 | struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private); | 5023 | struct btrfs_bio *bbio = bio->bi_private; |
| 5050 | int is_orig_bio = 0; | 5024 | int is_orig_bio = 0; |
| 5051 | 5025 | ||
| 5052 | if (err) { | 5026 | if (err) { |
| 5053 | atomic_inc(&bbio->error); | 5027 | atomic_inc(&bbio->error); |
| 5054 | if (err == -EIO || err == -EREMOTEIO) { | 5028 | if (err == -EIO || err == -EREMOTEIO) { |
| 5055 | unsigned int stripe_index = | 5029 | unsigned int stripe_index = |
| 5056 | extract_stripe_index_from_bio_private( | 5030 | btrfs_io_bio(bio)->stripe_index; |
| 5057 | bio->bi_private); | ||
| 5058 | struct btrfs_device *dev; | 5031 | struct btrfs_device *dev; |
| 5059 | 5032 | ||
| 5060 | BUG_ON(stripe_index >= bbio->num_stripes); | 5033 | BUG_ON(stripe_index >= bbio->num_stripes); |
| @@ -5084,8 +5057,7 @@ static void btrfs_end_bio(struct bio *bio, int err) | |||
| 5084 | } | 5057 | } |
| 5085 | bio->bi_private = bbio->private; | 5058 | bio->bi_private = bbio->private; |
| 5086 | bio->bi_end_io = bbio->end_io; | 5059 | bio->bi_end_io = bbio->end_io; |
| 5087 | bio->bi_bdev = (struct block_device *) | 5060 | btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; |
| 5088 | (unsigned long)bbio->mirror_num; | ||
| 5089 | /* only send an error to the higher layers if it is | 5061 | /* only send an error to the higher layers if it is |
| 5090 | * beyond the tolerance of the btrfs bio | 5062 | * beyond the tolerance of the btrfs bio |
| 5091 | */ | 5063 | */ |
| @@ -5211,8 +5183,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, | |||
| 5211 | struct btrfs_device *dev = bbio->stripes[dev_nr].dev; | 5183 | struct btrfs_device *dev = bbio->stripes[dev_nr].dev; |
| 5212 | 5184 | ||
| 5213 | bio->bi_private = bbio; | 5185 | bio->bi_private = bbio; |
| 5214 | bio->bi_private = merge_stripe_index_into_bio_private( | 5186 | btrfs_io_bio(bio)->stripe_index = dev_nr; |
| 5215 | bio->bi_private, (unsigned int)dev_nr); | ||
| 5216 | bio->bi_end_io = btrfs_end_bio; | 5187 | bio->bi_end_io = btrfs_end_bio; |
| 5217 | bio->bi_sector = physical >> 9; | 5188 | bio->bi_sector = physical >> 9; |
| 5218 | #ifdef DEBUG | 5189 | #ifdef DEBUG |
| @@ -5273,8 +5244,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) | |||
| 5273 | if (atomic_dec_and_test(&bbio->stripes_pending)) { | 5244 | if (atomic_dec_and_test(&bbio->stripes_pending)) { |
| 5274 | bio->bi_private = bbio->private; | 5245 | bio->bi_private = bbio->private; |
| 5275 | bio->bi_end_io = bbio->end_io; | 5246 | bio->bi_end_io = bbio->end_io; |
| 5276 | bio->bi_bdev = (struct block_device *) | 5247 | btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; |
| 5277 | (unsigned long)bbio->mirror_num; | ||
| 5278 | bio->bi_sector = logical >> 9; | 5248 | bio->bi_sector = logical >> 9; |
| 5279 | kfree(bbio); | 5249 | kfree(bbio); |
| 5280 | bio_endio(bio, -EIO); | 5250 | bio_endio(bio, -EIO); |
| @@ -5352,7 +5322,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | |||
| 5352 | } | 5322 | } |
| 5353 | 5323 | ||
| 5354 | if (dev_nr < total_devs - 1) { | 5324 | if (dev_nr < total_devs - 1) { |
| 5355 | bio = bio_clone(first_bio, GFP_NOFS); | 5325 | bio = btrfs_bio_clone(first_bio, GFP_NOFS); |
| 5356 | BUG_ON(!bio); /* -ENOMEM */ | 5326 | BUG_ON(!bio); /* -ENOMEM */ |
| 5357 | } else { | 5327 | } else { |
| 5358 | bio = first_bio; | 5328 | bio = first_bio; |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 845ccbb0d2e3..f6247e2a47f7 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
| @@ -152,6 +152,26 @@ struct btrfs_fs_devices { | |||
| 152 | int rotating; | 152 | int rotating; |
| 153 | }; | 153 | }; |
| 154 | 154 | ||
| 155 | /* | ||
| 156 | * we need the mirror number and stripe index to be passed around | ||
| 157 | * the call chain while we are processing end_io (especially errors). | ||
| 158 | * Really, what we need is a btrfs_bio structure that has this info | ||
| 159 | * and is properly sized with its stripe array, but we're not there | ||
| 160 | * quite yet. We have our own btrfs bioset, and all of the bios | ||
| 161 | * we allocate are actually btrfs_io_bios. We'll cram as much of | ||
| 162 | * struct btrfs_bio as we can into this over time. | ||
| 163 | */ | ||
| 164 | struct btrfs_io_bio { | ||
| 165 | unsigned long mirror_num; | ||
| 166 | unsigned long stripe_index; | ||
| 167 | struct bio bio; | ||
| 168 | }; | ||
| 169 | |||
| 170 | static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio) | ||
| 171 | { | ||
| 172 | return container_of(bio, struct btrfs_io_bio, bio); | ||
| 173 | } | ||
| 174 | |||
| 155 | struct btrfs_bio_stripe { | 175 | struct btrfs_bio_stripe { |
| 156 | struct btrfs_device *dev; | 176 | struct btrfs_device *dev; |
| 157 | u64 physical; | 177 | u64 physical; |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 0aabb344b02e..5aae3d12d400 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
| @@ -209,7 +209,6 @@ typedef struct ext4_io_end { | |||
| 209 | ssize_t size; /* size of the extent */ | 209 | ssize_t size; /* size of the extent */ |
| 210 | struct kiocb *iocb; /* iocb struct for AIO */ | 210 | struct kiocb *iocb; /* iocb struct for AIO */ |
| 211 | int result; /* error value for AIO */ | 211 | int result; /* error value for AIO */ |
| 212 | atomic_t count; /* reference counter */ | ||
| 213 | } ext4_io_end_t; | 212 | } ext4_io_end_t; |
| 214 | 213 | ||
| 215 | struct ext4_io_submit { | 214 | struct ext4_io_submit { |
| @@ -2651,14 +2650,11 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp, | |||
| 2651 | 2650 | ||
| 2652 | /* page-io.c */ | 2651 | /* page-io.c */ |
| 2653 | extern int __init ext4_init_pageio(void); | 2652 | extern int __init ext4_init_pageio(void); |
| 2653 | extern void ext4_add_complete_io(ext4_io_end_t *io_end); | ||
| 2654 | extern void ext4_exit_pageio(void); | 2654 | extern void ext4_exit_pageio(void); |
| 2655 | extern void ext4_ioend_shutdown(struct inode *); | 2655 | extern void ext4_ioend_shutdown(struct inode *); |
| 2656 | extern void ext4_free_io_end(ext4_io_end_t *io); | ||
| 2656 | extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); | 2657 | extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); |
| 2657 | extern ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end); | ||
| 2658 | extern int ext4_put_io_end(ext4_io_end_t *io_end); | ||
| 2659 | extern void ext4_put_io_end_defer(ext4_io_end_t *io_end); | ||
| 2660 | extern void ext4_io_submit_init(struct ext4_io_submit *io, | ||
| 2661 | struct writeback_control *wbc); | ||
| 2662 | extern void ext4_end_io_work(struct work_struct *work); | 2658 | extern void ext4_end_io_work(struct work_struct *work); |
| 2663 | extern void ext4_io_submit(struct ext4_io_submit *io); | 2659 | extern void ext4_io_submit(struct ext4_io_submit *io); |
| 2664 | extern int ext4_bio_write_page(struct ext4_io_submit *io, | 2660 | extern int ext4_bio_write_page(struct ext4_io_submit *io, |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 107936db244e..bc0f1910b9cf 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
| @@ -3642,7 +3642,7 @@ int ext4_find_delalloc_range(struct inode *inode, | |||
| 3642 | { | 3642 | { |
| 3643 | struct extent_status es; | 3643 | struct extent_status es; |
| 3644 | 3644 | ||
| 3645 | ext4_es_find_delayed_extent(inode, lblk_start, &es); | 3645 | ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es); |
| 3646 | if (es.es_len == 0) | 3646 | if (es.es_len == 0) |
| 3647 | return 0; /* there is no delay extent in this tree */ | 3647 | return 0; /* there is no delay extent in this tree */ |
| 3648 | else if (es.es_lblk <= lblk_start && | 3648 | else if (es.es_lblk <= lblk_start && |
| @@ -4608,9 +4608,10 @@ static int ext4_find_delayed_extent(struct inode *inode, | |||
| 4608 | struct extent_status es; | 4608 | struct extent_status es; |
| 4609 | ext4_lblk_t block, next_del; | 4609 | ext4_lblk_t block, next_del; |
| 4610 | 4610 | ||
| 4611 | ext4_es_find_delayed_extent(inode, newes->es_lblk, &es); | ||
| 4612 | |||
| 4613 | if (newes->es_pblk == 0) { | 4611 | if (newes->es_pblk == 0) { |
| 4612 | ext4_es_find_delayed_extent_range(inode, newes->es_lblk, | ||
| 4613 | newes->es_lblk + newes->es_len - 1, &es); | ||
| 4614 | |||
| 4614 | /* | 4615 | /* |
| 4615 | * No extent in extent-tree contains block @newes->es_pblk, | 4616 | * No extent in extent-tree contains block @newes->es_pblk, |
| 4616 | * then the block may stay in 1)a hole or 2)delayed-extent. | 4617 | * then the block may stay in 1)a hole or 2)delayed-extent. |
| @@ -4630,7 +4631,7 @@ static int ext4_find_delayed_extent(struct inode *inode, | |||
| 4630 | } | 4631 | } |
| 4631 | 4632 | ||
| 4632 | block = newes->es_lblk + newes->es_len; | 4633 | block = newes->es_lblk + newes->es_len; |
| 4633 | ext4_es_find_delayed_extent(inode, block, &es); | 4634 | ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es); |
| 4634 | if (es.es_len == 0) | 4635 | if (es.es_len == 0) |
| 4635 | next_del = EXT_MAX_BLOCKS; | 4636 | next_del = EXT_MAX_BLOCKS; |
| 4636 | else | 4637 | else |
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index fe3337a85ede..e6941e622d31 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c | |||
| @@ -232,14 +232,16 @@ static struct extent_status *__es_tree_search(struct rb_root *root, | |||
| 232 | } | 232 | } |
| 233 | 233 | ||
| 234 | /* | 234 | /* |
| 235 | * ext4_es_find_delayed_extent: find the 1st delayed extent covering @es->lblk | 235 | * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering |
| 236 | * if it exists, otherwise, the next extent after @es->lblk. | 236 | * @es->lblk if it exists, otherwise, the next extent after @es->lblk. |
| 237 | * | 237 | * |
| 238 | * @inode: the inode which owns delayed extents | 238 | * @inode: the inode which owns delayed extents |
| 239 | * @lblk: the offset where we start to search | 239 | * @lblk: the offset where we start to search |
| 240 | * @end: the offset where we stop to search | ||
| 240 | * @es: delayed extent that we found | 241 | * @es: delayed extent that we found |
| 241 | */ | 242 | */ |
| 242 | void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk, | 243 | void ext4_es_find_delayed_extent_range(struct inode *inode, |
| 244 | ext4_lblk_t lblk, ext4_lblk_t end, | ||
| 243 | struct extent_status *es) | 245 | struct extent_status *es) |
| 244 | { | 246 | { |
| 245 | struct ext4_es_tree *tree = NULL; | 247 | struct ext4_es_tree *tree = NULL; |
| @@ -247,7 +249,8 @@ void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk, | |||
| 247 | struct rb_node *node; | 249 | struct rb_node *node; |
| 248 | 250 | ||
| 249 | BUG_ON(es == NULL); | 251 | BUG_ON(es == NULL); |
| 250 | trace_ext4_es_find_delayed_extent_enter(inode, lblk); | 252 | BUG_ON(end < lblk); |
| 253 | trace_ext4_es_find_delayed_extent_range_enter(inode, lblk); | ||
| 251 | 254 | ||
| 252 | read_lock(&EXT4_I(inode)->i_es_lock); | 255 | read_lock(&EXT4_I(inode)->i_es_lock); |
| 253 | tree = &EXT4_I(inode)->i_es_tree; | 256 | tree = &EXT4_I(inode)->i_es_tree; |
| @@ -270,6 +273,10 @@ out: | |||
| 270 | if (es1 && !ext4_es_is_delayed(es1)) { | 273 | if (es1 && !ext4_es_is_delayed(es1)) { |
| 271 | while ((node = rb_next(&es1->rb_node)) != NULL) { | 274 | while ((node = rb_next(&es1->rb_node)) != NULL) { |
| 272 | es1 = rb_entry(node, struct extent_status, rb_node); | 275 | es1 = rb_entry(node, struct extent_status, rb_node); |
| 276 | if (es1->es_lblk > end) { | ||
| 277 | es1 = NULL; | ||
| 278 | break; | ||
| 279 | } | ||
| 273 | if (ext4_es_is_delayed(es1)) | 280 | if (ext4_es_is_delayed(es1)) |
| 274 | break; | 281 | break; |
| 275 | } | 282 | } |
| @@ -285,7 +292,7 @@ out: | |||
| 285 | read_unlock(&EXT4_I(inode)->i_es_lock); | 292 | read_unlock(&EXT4_I(inode)->i_es_lock); |
| 286 | 293 | ||
| 287 | ext4_es_lru_add(inode); | 294 | ext4_es_lru_add(inode); |
| 288 | trace_ext4_es_find_delayed_extent_exit(inode, es); | 295 | trace_ext4_es_find_delayed_extent_range_exit(inode, es); |
| 289 | } | 296 | } |
| 290 | 297 | ||
| 291 | static struct extent_status * | 298 | static struct extent_status * |
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h index d8e2d4dc311e..f740eb03b707 100644 --- a/fs/ext4/extents_status.h +++ b/fs/ext4/extents_status.h | |||
| @@ -62,7 +62,8 @@ extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, | |||
| 62 | unsigned long long status); | 62 | unsigned long long status); |
| 63 | extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, | 63 | extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, |
| 64 | ext4_lblk_t len); | 64 | ext4_lblk_t len); |
| 65 | extern void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk, | 65 | extern void ext4_es_find_delayed_extent_range(struct inode *inode, |
| 66 | ext4_lblk_t lblk, ext4_lblk_t end, | ||
| 66 | struct extent_status *es); | 67 | struct extent_status *es); |
| 67 | extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, | 68 | extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, |
| 68 | struct extent_status *es); | 69 | struct extent_status *es); |
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 4959e29573b6..b1b4d51b5d86 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
| @@ -465,7 +465,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) | |||
| 465 | * If there is a delay extent at this offset, | 465 | * If there is a delay extent at this offset, |
| 466 | * it will be as a data. | 466 | * it will be as a data. |
| 467 | */ | 467 | */ |
| 468 | ext4_es_find_delayed_extent(inode, last, &es); | 468 | ext4_es_find_delayed_extent_range(inode, last, last, &es); |
| 469 | if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { | 469 | if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { |
| 470 | if (last != start) | 470 | if (last != start) |
| 471 | dataoff = last << blkbits; | 471 | dataoff = last << blkbits; |
| @@ -548,7 +548,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) | |||
| 548 | * If there is a delay extent at this offset, | 548 | * If there is a delay extent at this offset, |
| 549 | * we will skip this extent. | 549 | * we will skip this extent. |
| 550 | */ | 550 | */ |
| 551 | ext4_es_find_delayed_extent(inode, last, &es); | 551 | ext4_es_find_delayed_extent_range(inode, last, last, &es); |
| 552 | if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { | 552 | if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { |
| 553 | last = es.es_lblk + es.es_len; | 553 | last = es.es_lblk + es.es_len; |
| 554 | holeoff = last << blkbits; | 554 | holeoff = last << blkbits; |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 0723774bdfb5..d6382b89ecbd 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
| @@ -1488,10 +1488,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd, | |||
| 1488 | struct ext4_io_submit io_submit; | 1488 | struct ext4_io_submit io_submit; |
| 1489 | 1489 | ||
| 1490 | BUG_ON(mpd->next_page <= mpd->first_page); | 1490 | BUG_ON(mpd->next_page <= mpd->first_page); |
| 1491 | ext4_io_submit_init(&io_submit, mpd->wbc); | 1491 | memset(&io_submit, 0, sizeof(io_submit)); |
| 1492 | io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); | ||
| 1493 | if (!io_submit.io_end) | ||
| 1494 | return -ENOMEM; | ||
| 1495 | /* | 1492 | /* |
| 1496 | * We need to start from the first_page to the next_page - 1 | 1493 | * We need to start from the first_page to the next_page - 1 |
| 1497 | * to make sure we also write the mapped dirty buffer_heads. | 1494 | * to make sure we also write the mapped dirty buffer_heads. |
| @@ -1579,8 +1576,6 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd, | |||
| 1579 | pagevec_release(&pvec); | 1576 | pagevec_release(&pvec); |
| 1580 | } | 1577 | } |
| 1581 | ext4_io_submit(&io_submit); | 1578 | ext4_io_submit(&io_submit); |
| 1582 | /* Drop io_end reference we got from init */ | ||
| 1583 | ext4_put_io_end_defer(io_submit.io_end); | ||
| 1584 | return ret; | 1579 | return ret; |
| 1585 | } | 1580 | } |
| 1586 | 1581 | ||
| @@ -2239,16 +2234,9 @@ static int ext4_writepage(struct page *page, | |||
| 2239 | */ | 2234 | */ |
| 2240 | return __ext4_journalled_writepage(page, len); | 2235 | return __ext4_journalled_writepage(page, len); |
| 2241 | 2236 | ||
| 2242 | ext4_io_submit_init(&io_submit, wbc); | 2237 | memset(&io_submit, 0, sizeof(io_submit)); |
| 2243 | io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); | ||
| 2244 | if (!io_submit.io_end) { | ||
| 2245 | redirty_page_for_writepage(wbc, page); | ||
| 2246 | return -ENOMEM; | ||
| 2247 | } | ||
| 2248 | ret = ext4_bio_write_page(&io_submit, page, len, wbc); | 2238 | ret = ext4_bio_write_page(&io_submit, page, len, wbc); |
| 2249 | ext4_io_submit(&io_submit); | 2239 | ext4_io_submit(&io_submit); |
| 2250 | /* Drop io_end reference we got from init */ | ||
| 2251 | ext4_put_io_end_defer(io_submit.io_end); | ||
| 2252 | return ret; | 2240 | return ret; |
| 2253 | } | 2241 | } |
| 2254 | 2242 | ||
| @@ -3079,13 +3067,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, | |||
| 3079 | struct inode *inode = file_inode(iocb->ki_filp); | 3067 | struct inode *inode = file_inode(iocb->ki_filp); |
| 3080 | ext4_io_end_t *io_end = iocb->private; | 3068 | ext4_io_end_t *io_end = iocb->private; |
| 3081 | 3069 | ||
| 3082 | /* if not async direct IO just return */ | 3070 | /* if not async direct IO or dio with 0 bytes write, just return */ |
| 3083 | if (!io_end) { | 3071 | if (!io_end || !size) |
| 3084 | inode_dio_done(inode); | 3072 | goto out; |
| 3085 | if (is_async) | ||
| 3086 | aio_complete(iocb, ret, 0); | ||
| 3087 | return; | ||
| 3088 | } | ||
| 3089 | 3073 | ||
| 3090 | ext_debug("ext4_end_io_dio(): io_end 0x%p " | 3074 | ext_debug("ext4_end_io_dio(): io_end 0x%p " |
| 3091 | "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", | 3075 | "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", |
| @@ -3093,13 +3077,25 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, | |||
| 3093 | size); | 3077 | size); |
| 3094 | 3078 | ||
| 3095 | iocb->private = NULL; | 3079 | iocb->private = NULL; |
| 3080 | |||
| 3081 | /* if not aio dio with unwritten extents, just free io and return */ | ||
| 3082 | if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { | ||
| 3083 | ext4_free_io_end(io_end); | ||
| 3084 | out: | ||
| 3085 | inode_dio_done(inode); | ||
| 3086 | if (is_async) | ||
| 3087 | aio_complete(iocb, ret, 0); | ||
| 3088 | return; | ||
| 3089 | } | ||
| 3090 | |||
| 3096 | io_end->offset = offset; | 3091 | io_end->offset = offset; |
| 3097 | io_end->size = size; | 3092 | io_end->size = size; |
| 3098 | if (is_async) { | 3093 | if (is_async) { |
| 3099 | io_end->iocb = iocb; | 3094 | io_end->iocb = iocb; |
| 3100 | io_end->result = ret; | 3095 | io_end->result = ret; |
| 3101 | } | 3096 | } |
| 3102 | ext4_put_io_end_defer(io_end); | 3097 | |
| 3098 | ext4_add_complete_io(io_end); | ||
| 3103 | } | 3099 | } |
| 3104 | 3100 | ||
| 3105 | /* | 3101 | /* |
| @@ -3133,7 +3129,6 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | |||
| 3133 | get_block_t *get_block_func = NULL; | 3129 | get_block_t *get_block_func = NULL; |
| 3134 | int dio_flags = 0; | 3130 | int dio_flags = 0; |
| 3135 | loff_t final_size = offset + count; | 3131 | loff_t final_size = offset + count; |
| 3136 | ext4_io_end_t *io_end = NULL; | ||
| 3137 | 3132 | ||
| 3138 | /* Use the old path for reads and writes beyond i_size. */ | 3133 | /* Use the old path for reads and writes beyond i_size. */ |
| 3139 | if (rw != WRITE || final_size > inode->i_size) | 3134 | if (rw != WRITE || final_size > inode->i_size) |
| @@ -3172,16 +3167,13 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | |||
| 3172 | iocb->private = NULL; | 3167 | iocb->private = NULL; |
| 3173 | ext4_inode_aio_set(inode, NULL); | 3168 | ext4_inode_aio_set(inode, NULL); |
| 3174 | if (!is_sync_kiocb(iocb)) { | 3169 | if (!is_sync_kiocb(iocb)) { |
| 3175 | io_end = ext4_init_io_end(inode, GFP_NOFS); | 3170 | ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS); |
| 3176 | if (!io_end) { | 3171 | if (!io_end) { |
| 3177 | ret = -ENOMEM; | 3172 | ret = -ENOMEM; |
| 3178 | goto retake_lock; | 3173 | goto retake_lock; |
| 3179 | } | 3174 | } |
| 3180 | io_end->flag |= EXT4_IO_END_DIRECT; | 3175 | io_end->flag |= EXT4_IO_END_DIRECT; |
| 3181 | /* | 3176 | iocb->private = io_end; |
| 3182 | * Grab reference for DIO. Will be dropped in ext4_end_io_dio() | ||
| 3183 | */ | ||
| 3184 | iocb->private = ext4_get_io_end(io_end); | ||
| 3185 | /* | 3177 | /* |
| 3186 | * we save the io structure for current async direct | 3178 | * we save the io structure for current async direct |
| 3187 | * IO, so that later ext4_map_blocks() could flag the | 3179 | * IO, so that later ext4_map_blocks() could flag the |
| @@ -3205,27 +3197,26 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | |||
| 3205 | NULL, | 3197 | NULL, |
| 3206 | dio_flags); | 3198 | dio_flags); |
| 3207 | 3199 | ||
| 3200 | if (iocb->private) | ||
| 3201 | ext4_inode_aio_set(inode, NULL); | ||
| 3208 | /* | 3202 | /* |
| 3209 | * Put our reference to io_end. This can free the io_end structure e.g. | 3203 | * The io_end structure takes a reference to the inode, that |
| 3210 | * in sync IO case or in case of error. It can even perform extent | 3204 | * structure needs to be destroyed and the reference to the |
| 3211 | * conversion if all bios we submitted finished before we got here. | 3205 | * inode need to be dropped, when IO is complete, even with 0 |
| 3212 | * Note that in that case iocb->private can be already set to NULL | 3206 | * byte write, or failed. |
| 3213 | * here. | 3207 | * |
| 3208 | * In the successful AIO DIO case, the io_end structure will | ||
| 3209 | * be destroyed and the reference to the inode will be dropped | ||
| 3210 | * after the end_io call back function is called. | ||
| 3211 | * | ||
| 3212 | * In the case there is 0 byte write, or error case, since VFS | ||
| 3213 | * direct IO won't invoke the end_io call back function, we | ||
| 3214 | * need to free the end_io structure here. | ||
| 3214 | */ | 3215 | */ |
| 3215 | if (io_end) { | 3216 | if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { |
| 3216 | ext4_inode_aio_set(inode, NULL); | 3217 | ext4_free_io_end(iocb->private); |
| 3217 | ext4_put_io_end(io_end); | 3218 | iocb->private = NULL; |
| 3218 | /* | 3219 | } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode, |
| 3219 | * In case of error or no write ext4_end_io_dio() was not | ||
| 3220 | * called so we have to put iocb's reference. | ||
| 3221 | */ | ||
| 3222 | if (ret <= 0 && ret != -EIOCBQUEUED) { | ||
| 3223 | WARN_ON(iocb->private != io_end); | ||
| 3224 | ext4_put_io_end(io_end); | ||
| 3225 | iocb->private = NULL; | ||
| 3226 | } | ||
| 3227 | } | ||
| 3228 | if (ret > 0 && !overwrite && ext4_test_inode_state(inode, | ||
| 3229 | EXT4_STATE_DIO_UNWRITTEN)) { | 3220 | EXT4_STATE_DIO_UNWRITTEN)) { |
| 3230 | int err; | 3221 | int err; |
| 3231 | /* | 3222 | /* |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index b1ed9e07434b..def84082a9a9 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
| @@ -2105,7 +2105,11 @@ repeat: | |||
| 2105 | group = ac->ac_g_ex.fe_group; | 2105 | group = ac->ac_g_ex.fe_group; |
| 2106 | 2106 | ||
| 2107 | for (i = 0; i < ngroups; group++, i++) { | 2107 | for (i = 0; i < ngroups; group++, i++) { |
| 2108 | if (group == ngroups) | 2108 | /* |
| 2109 | * Artificially restricted ngroups for non-extent | ||
| 2110 | * files makes group > ngroups possible on first loop. | ||
| 2111 | */ | ||
| 2112 | if (group >= ngroups) | ||
| 2109 | group = 0; | 2113 | group = 0; |
| 2110 | 2114 | ||
| 2111 | /* This now checks without needing the buddy page */ | 2115 | /* This now checks without needing the buddy page */ |
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 19599bded62a..4acf1f78881b 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
| @@ -62,28 +62,15 @@ void ext4_ioend_shutdown(struct inode *inode) | |||
| 62 | cancel_work_sync(&EXT4_I(inode)->i_unwritten_work); | 62 | cancel_work_sync(&EXT4_I(inode)->i_unwritten_work); |
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | static void ext4_release_io_end(ext4_io_end_t *io_end) | 65 | void ext4_free_io_end(ext4_io_end_t *io) |
| 66 | { | 66 | { |
| 67 | BUG_ON(!list_empty(&io_end->list)); | 67 | BUG_ON(!io); |
| 68 | BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); | 68 | BUG_ON(!list_empty(&io->list)); |
| 69 | 69 | BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN); | |
| 70 | if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count)) | ||
| 71 | wake_up_all(ext4_ioend_wq(io_end->inode)); | ||
| 72 | if (io_end->flag & EXT4_IO_END_DIRECT) | ||
| 73 | inode_dio_done(io_end->inode); | ||
| 74 | if (io_end->iocb) | ||
| 75 | aio_complete(io_end->iocb, io_end->result, 0); | ||
| 76 | kmem_cache_free(io_end_cachep, io_end); | ||
| 77 | } | ||
| 78 | |||
| 79 | static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end) | ||
| 80 | { | ||
| 81 | struct inode *inode = io_end->inode; | ||
| 82 | 70 | ||
| 83 | io_end->flag &= ~EXT4_IO_END_UNWRITTEN; | 71 | if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count)) |
| 84 | /* Wake up anyone waiting on unwritten extent conversion */ | 72 | wake_up_all(ext4_ioend_wq(io->inode)); |
| 85 | if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten)) | 73 | kmem_cache_free(io_end_cachep, io); |
| 86 | wake_up_all(ext4_ioend_wq(inode)); | ||
| 87 | } | 74 | } |
| 88 | 75 | ||
| 89 | /* check a range of space and convert unwritten extents to written. */ | 76 | /* check a range of space and convert unwritten extents to written. */ |
| @@ -106,8 +93,13 @@ static int ext4_end_io(ext4_io_end_t *io) | |||
| 106 | "(inode %lu, offset %llu, size %zd, error %d)", | 93 | "(inode %lu, offset %llu, size %zd, error %d)", |
| 107 | inode->i_ino, offset, size, ret); | 94 | inode->i_ino, offset, size, ret); |
| 108 | } | 95 | } |
| 109 | ext4_clear_io_unwritten_flag(io); | 96 | /* Wake up anyone waiting on unwritten extent conversion */ |
| 110 | ext4_release_io_end(io); | 97 | if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten)) |
| 98 | wake_up_all(ext4_ioend_wq(inode)); | ||
| 99 | if (io->flag & EXT4_IO_END_DIRECT) | ||
| 100 | inode_dio_done(inode); | ||
| 101 | if (io->iocb) | ||
| 102 | aio_complete(io->iocb, io->result, 0); | ||
| 111 | return ret; | 103 | return ret; |
| 112 | } | 104 | } |
| 113 | 105 | ||
| @@ -138,7 +130,7 @@ static void dump_completed_IO(struct inode *inode) | |||
| 138 | } | 130 | } |
| 139 | 131 | ||
| 140 | /* Add the io_end to per-inode completed end_io list. */ | 132 | /* Add the io_end to per-inode completed end_io list. */ |
| 141 | static void ext4_add_complete_io(ext4_io_end_t *io_end) | 133 | void ext4_add_complete_io(ext4_io_end_t *io_end) |
| 142 | { | 134 | { |
| 143 | struct ext4_inode_info *ei = EXT4_I(io_end->inode); | 135 | struct ext4_inode_info *ei = EXT4_I(io_end->inode); |
| 144 | struct workqueue_struct *wq; | 136 | struct workqueue_struct *wq; |
| @@ -175,6 +167,8 @@ static int ext4_do_flush_completed_IO(struct inode *inode) | |||
| 175 | err = ext4_end_io(io); | 167 | err = ext4_end_io(io); |
| 176 | if (unlikely(!ret && err)) | 168 | if (unlikely(!ret && err)) |
| 177 | ret = err; | 169 | ret = err; |
| 170 | io->flag &= ~EXT4_IO_END_UNWRITTEN; | ||
| 171 | ext4_free_io_end(io); | ||
| 178 | } | 172 | } |
| 179 | return ret; | 173 | return ret; |
| 180 | } | 174 | } |
| @@ -206,43 +200,10 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) | |||
| 206 | atomic_inc(&EXT4_I(inode)->i_ioend_count); | 200 | atomic_inc(&EXT4_I(inode)->i_ioend_count); |
| 207 | io->inode = inode; | 201 | io->inode = inode; |
| 208 | INIT_LIST_HEAD(&io->list); | 202 | INIT_LIST_HEAD(&io->list); |
| 209 | atomic_set(&io->count, 1); | ||
| 210 | } | 203 | } |
| 211 | return io; | 204 | return io; |
| 212 | } | 205 | } |
| 213 | 206 | ||
| 214 | void ext4_put_io_end_defer(ext4_io_end_t *io_end) | ||
| 215 | { | ||
| 216 | if (atomic_dec_and_test(&io_end->count)) { | ||
| 217 | if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) { | ||
| 218 | ext4_release_io_end(io_end); | ||
| 219 | return; | ||
| 220 | } | ||
| 221 | ext4_add_complete_io(io_end); | ||
| 222 | } | ||
| 223 | } | ||
| 224 | |||
| 225 | int ext4_put_io_end(ext4_io_end_t *io_end) | ||
| 226 | { | ||
| 227 | int err = 0; | ||
| 228 | |||
| 229 | if (atomic_dec_and_test(&io_end->count)) { | ||
| 230 | if (io_end->flag & EXT4_IO_END_UNWRITTEN) { | ||
| 231 | err = ext4_convert_unwritten_extents(io_end->inode, | ||
| 232 | io_end->offset, io_end->size); | ||
| 233 | ext4_clear_io_unwritten_flag(io_end); | ||
| 234 | } | ||
| 235 | ext4_release_io_end(io_end); | ||
| 236 | } | ||
| 237 | return err; | ||
| 238 | } | ||
| 239 | |||
| 240 | ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end) | ||
| 241 | { | ||
| 242 | atomic_inc(&io_end->count); | ||
| 243 | return io_end; | ||
| 244 | } | ||
| 245 | |||
| 246 | /* | 207 | /* |
| 247 | * Print an buffer I/O error compatible with the fs/buffer.c. This | 208 | * Print an buffer I/O error compatible with the fs/buffer.c. This |
| 248 | * provides compatibility with dmesg scrapers that look for a specific | 209 | * provides compatibility with dmesg scrapers that look for a specific |
| @@ -325,7 +286,12 @@ static void ext4_end_bio(struct bio *bio, int error) | |||
| 325 | bi_sector >> (inode->i_blkbits - 9)); | 286 | bi_sector >> (inode->i_blkbits - 9)); |
| 326 | } | 287 | } |
| 327 | 288 | ||
| 328 | ext4_put_io_end_defer(io_end); | 289 | if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { |
| 290 | ext4_free_io_end(io_end); | ||
| 291 | return; | ||
| 292 | } | ||
| 293 | |||
| 294 | ext4_add_complete_io(io_end); | ||
| 329 | } | 295 | } |
| 330 | 296 | ||
| 331 | void ext4_io_submit(struct ext4_io_submit *io) | 297 | void ext4_io_submit(struct ext4_io_submit *io) |
| @@ -339,37 +305,40 @@ void ext4_io_submit(struct ext4_io_submit *io) | |||
| 339 | bio_put(io->io_bio); | 305 | bio_put(io->io_bio); |
| 340 | } | 306 | } |
| 341 | io->io_bio = NULL; | 307 | io->io_bio = NULL; |
| 342 | } | 308 | io->io_op = 0; |
| 343 | |||
| 344 | void ext4_io_submit_init(struct ext4_io_submit *io, | ||
| 345 | struct writeback_control *wbc) | ||
| 346 | { | ||
| 347 | io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); | ||
| 348 | io->io_bio = NULL; | ||
| 349 | io->io_end = NULL; | 309 | io->io_end = NULL; |
| 350 | } | 310 | } |
| 351 | 311 | ||
| 352 | static int io_submit_init_bio(struct ext4_io_submit *io, | 312 | static int io_submit_init(struct ext4_io_submit *io, |
| 353 | struct buffer_head *bh) | 313 | struct inode *inode, |
| 314 | struct writeback_control *wbc, | ||
| 315 | struct buffer_head *bh) | ||
| 354 | { | 316 | { |
| 317 | ext4_io_end_t *io_end; | ||
| 318 | struct page *page = bh->b_page; | ||
| 355 | int nvecs = bio_get_nr_vecs(bh->b_bdev); | 319 | int nvecs = bio_get_nr_vecs(bh->b_bdev); |
| 356 | struct bio *bio; | 320 | struct bio *bio; |
| 357 | 321 | ||
| 322 | io_end = ext4_init_io_end(inode, GFP_NOFS); | ||
| 323 | if (!io_end) | ||
| 324 | return -ENOMEM; | ||
| 358 | bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); | 325 | bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); |
| 359 | bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); | 326 | bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); |
| 360 | bio->bi_bdev = bh->b_bdev; | 327 | bio->bi_bdev = bh->b_bdev; |
| 328 | bio->bi_private = io->io_end = io_end; | ||
| 361 | bio->bi_end_io = ext4_end_bio; | 329 | bio->bi_end_io = ext4_end_bio; |
| 362 | bio->bi_private = ext4_get_io_end(io->io_end); | 330 | |
| 363 | if (!io->io_end->size) | 331 | io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); |
| 364 | io->io_end->offset = (bh->b_page->index << PAGE_CACHE_SHIFT) | 332 | |
| 365 | + bh_offset(bh); | ||
| 366 | io->io_bio = bio; | 333 | io->io_bio = bio; |
| 334 | io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); | ||
| 367 | io->io_next_block = bh->b_blocknr; | 335 | io->io_next_block = bh->b_blocknr; |
| 368 | return 0; | 336 | return 0; |
| 369 | } | 337 | } |
| 370 | 338 | ||
| 371 | static int io_submit_add_bh(struct ext4_io_submit *io, | 339 | static int io_submit_add_bh(struct ext4_io_submit *io, |
| 372 | struct inode *inode, | 340 | struct inode *inode, |
| 341 | struct writeback_control *wbc, | ||
| 373 | struct buffer_head *bh) | 342 | struct buffer_head *bh) |
| 374 | { | 343 | { |
| 375 | ext4_io_end_t *io_end; | 344 | ext4_io_end_t *io_end; |
| @@ -380,18 +349,18 @@ submit_and_retry: | |||
| 380 | ext4_io_submit(io); | 349 | ext4_io_submit(io); |
| 381 | } | 350 | } |
| 382 | if (io->io_bio == NULL) { | 351 | if (io->io_bio == NULL) { |
| 383 | ret = io_submit_init_bio(io, bh); | 352 | ret = io_submit_init(io, inode, wbc, bh); |
| 384 | if (ret) | 353 | if (ret) |
| 385 | return ret; | 354 | return ret; |
| 386 | } | 355 | } |
| 387 | ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); | ||
| 388 | if (ret != bh->b_size) | ||
| 389 | goto submit_and_retry; | ||
| 390 | io_end = io->io_end; | 356 | io_end = io->io_end; |
| 391 | if (test_clear_buffer_uninit(bh)) | 357 | if (test_clear_buffer_uninit(bh)) |
| 392 | ext4_set_io_unwritten_flag(inode, io_end); | 358 | ext4_set_io_unwritten_flag(inode, io_end); |
| 393 | io_end->size += bh->b_size; | 359 | io->io_end->size += bh->b_size; |
| 394 | io->io_next_block++; | 360 | io->io_next_block++; |
| 361 | ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); | ||
| 362 | if (ret != bh->b_size) | ||
| 363 | goto submit_and_retry; | ||
| 395 | return 0; | 364 | return 0; |
| 396 | } | 365 | } |
| 397 | 366 | ||
| @@ -463,7 +432,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, | |||
| 463 | do { | 432 | do { |
| 464 | if (!buffer_async_write(bh)) | 433 | if (!buffer_async_write(bh)) |
| 465 | continue; | 434 | continue; |
| 466 | ret = io_submit_add_bh(io, inode, bh); | 435 | ret = io_submit_add_bh(io, inode, wbc, bh); |
| 467 | if (ret) { | 436 | if (ret) { |
| 468 | /* | 437 | /* |
| 469 | * We only get here on ENOMEM. Not much else | 438 | * We only get here on ENOMEM. Not much else |
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index 5b3d2bd4813a..64b8c7639520 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h | |||
| @@ -77,7 +77,7 @@ struct acpi_signal_fatal_info { | |||
| 77 | /* | 77 | /* |
| 78 | * OSL Initialization and shutdown primitives | 78 | * OSL Initialization and shutdown primitives |
| 79 | */ | 79 | */ |
| 80 | acpi_status __initdata acpi_os_initialize(void); | 80 | acpi_status __init acpi_os_initialize(void); |
| 81 | 81 | ||
| 82 | acpi_status acpi_os_terminate(void); | 82 | acpi_status acpi_os_terminate(void); |
| 83 | 83 | ||
diff --git a/include/acpi/processor.h b/include/acpi/processor.h index b327b5a9296d..ea69367fdd3b 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h | |||
| @@ -329,10 +329,16 @@ int acpi_processor_power_init(struct acpi_processor *pr); | |||
| 329 | int acpi_processor_power_exit(struct acpi_processor *pr); | 329 | int acpi_processor_power_exit(struct acpi_processor *pr); |
| 330 | int acpi_processor_cst_has_changed(struct acpi_processor *pr); | 330 | int acpi_processor_cst_has_changed(struct acpi_processor *pr); |
| 331 | int acpi_processor_hotplug(struct acpi_processor *pr); | 331 | int acpi_processor_hotplug(struct acpi_processor *pr); |
| 332 | int acpi_processor_suspend(struct device *dev); | ||
| 333 | int acpi_processor_resume(struct device *dev); | ||
| 334 | extern struct cpuidle_driver acpi_idle_driver; | 332 | extern struct cpuidle_driver acpi_idle_driver; |
| 335 | 333 | ||
| 334 | #ifdef CONFIG_PM_SLEEP | ||
| 335 | void acpi_processor_syscore_init(void); | ||
| 336 | void acpi_processor_syscore_exit(void); | ||
| 337 | #else | ||
| 338 | static inline void acpi_processor_syscore_init(void) {} | ||
| 339 | static inline void acpi_processor_syscore_exit(void) {} | ||
| 340 | #endif | ||
| 341 | |||
| 336 | /* in processor_thermal.c */ | 342 | /* in processor_thermal.c */ |
| 337 | int acpi_processor_get_limit_info(struct acpi_processor *pr); | 343 | int acpi_processor_get_limit_info(struct acpi_processor *pr); |
| 338 | extern const struct thermal_cooling_device_ops processor_cooling_ops; | 344 | extern const struct thermal_cooling_device_ops processor_cooling_ops; |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 61196592152e..63d17ee9eb48 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
| @@ -316,6 +316,7 @@ struct drm_ioctl_desc { | |||
| 316 | int flags; | 316 | int flags; |
| 317 | drm_ioctl_t *func; | 317 | drm_ioctl_t *func; |
| 318 | unsigned int cmd_drv; | 318 | unsigned int cmd_drv; |
| 319 | const char *name; | ||
| 319 | }; | 320 | }; |
| 320 | 321 | ||
| 321 | /** | 322 | /** |
| @@ -324,7 +325,7 @@ struct drm_ioctl_desc { | |||
| 324 | */ | 325 | */ |
| 325 | 326 | ||
| 326 | #define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ | 327 | #define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ |
| 327 | [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl} | 328 | [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl} |
| 328 | 329 | ||
| 329 | struct drm_magic_entry { | 330 | struct drm_magic_entry { |
| 330 | struct list_head head; | 331 | struct list_head head; |
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 8230b46fdd73..471f276ce8f7 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h | |||
| @@ -50,13 +50,14 @@ struct drm_fb_helper_surface_size { | |||
| 50 | 50 | ||
| 51 | /** | 51 | /** |
| 52 | * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library | 52 | * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library |
| 53 | * @gamma_set: - Set the given gamma lut register on the given crtc. | 53 | * @gamma_set: Set the given gamma lut register on the given crtc. |
| 54 | * @gamma_get: - Read the given gamma lut register on the given crtc, used to | 54 | * @gamma_get: Read the given gamma lut register on the given crtc, used to |
| 55 | * save the current lut when force-restoring the fbdev for e.g. | 55 | * save the current lut when force-restoring the fbdev for e.g. |
| 56 | * kdbg. | 56 | * kdbg. |
| 57 | * @fb_probe: - Driver callback to allocate and initialize the fbdev info | 57 | * @fb_probe: Driver callback to allocate and initialize the fbdev info |
| 58 | * structure. Futhermore it also needs to allocate the drm | 58 | * structure. Futhermore it also needs to allocate the drm |
| 59 | * framebuffer used to back the fbdev. | 59 | * framebuffer used to back the fbdev. |
| 60 | * @initial_config: Setup an initial fbdev display configuration | ||
| 60 | * | 61 | * |
| 61 | * Driver callbacks used by the fbdev emulation helper library. | 62 | * Driver callbacks used by the fbdev emulation helper library. |
| 62 | */ | 63 | */ |
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h index 393369147a2d..675ddf4b441f 100644 --- a/include/drm/drm_os_linux.h +++ b/include/drm/drm_os_linux.h | |||
| @@ -87,15 +87,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size) | |||
| 87 | /** Other copying of data from kernel space */ | 87 | /** Other copying of data from kernel space */ |
| 88 | #define DRM_COPY_TO_USER(arg1, arg2, arg3) \ | 88 | #define DRM_COPY_TO_USER(arg1, arg2, arg3) \ |
| 89 | copy_to_user(arg1, arg2, arg3) | 89 | copy_to_user(arg1, arg2, arg3) |
| 90 | /* Macros for copyfrom user, but checking readability only once */ | ||
| 91 | #define DRM_VERIFYAREA_READ( uaddr, size ) \ | ||
| 92 | (access_ok( VERIFY_READ, uaddr, size ) ? 0 : -EFAULT) | ||
| 93 | #define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \ | ||
| 94 | __copy_from_user(arg1, arg2, arg3) | ||
| 95 | #define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \ | ||
| 96 | __copy_to_user(arg1, arg2, arg3) | ||
| 97 | #define DRM_GET_USER_UNCHECKED(val, uaddr) \ | ||
| 98 | __get_user(val, uaddr) | ||
| 99 | 90 | ||
| 100 | #define DRM_HZ HZ | 91 | #define DRM_HZ HZ |
| 101 | 92 | ||
diff --git a/include/linux/journal-head.h b/include/linux/journal-head.h index 13a3da25ff07..98cd41bb39c8 100644 --- a/include/linux/journal-head.h +++ b/include/linux/journal-head.h | |||
| @@ -30,15 +30,19 @@ struct journal_head { | |||
| 30 | 30 | ||
| 31 | /* | 31 | /* |
| 32 | * Journalling list for this buffer [jbd_lock_bh_state()] | 32 | * Journalling list for this buffer [jbd_lock_bh_state()] |
| 33 | * NOTE: We *cannot* combine this with b_modified into a bitfield | ||
| 34 | * as gcc would then (which the C standard allows but which is | ||
| 35 | * very unuseful) make 64-bit accesses to the bitfield and clobber | ||
| 36 | * b_jcount if its update races with bitfield modification. | ||
| 33 | */ | 37 | */ |
| 34 | unsigned b_jlist:4; | 38 | unsigned b_jlist; |
| 35 | 39 | ||
| 36 | /* | 40 | /* |
| 37 | * This flag signals the buffer has been modified by | 41 | * This flag signals the buffer has been modified by |
| 38 | * the currently running transaction | 42 | * the currently running transaction |
| 39 | * [jbd_lock_bh_state()] | 43 | * [jbd_lock_bh_state()] |
| 40 | */ | 44 | */ |
| 41 | unsigned b_modified:1; | 45 | unsigned b_modified; |
| 42 | 46 | ||
| 43 | /* | 47 | /* |
| 44 | * Copy of the buffer data frozen for writing to the log. | 48 | * Copy of the buffer data frozen for writing to the log. |
diff --git a/include/linux/kref.h b/include/linux/kref.h index e15828fd71f1..484604d184be 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/atomic.h> | 19 | #include <linux/atomic.h> |
| 20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
| 21 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
| 22 | #include <linux/spinlock.h> | ||
| 22 | 23 | ||
| 23 | struct kref { | 24 | struct kref { |
| 24 | atomic_t refcount; | 25 | atomic_t refcount; |
| @@ -98,6 +99,38 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref) | |||
| 98 | return kref_sub(kref, 1, release); | 99 | return kref_sub(kref, 1, release); |
| 99 | } | 100 | } |
| 100 | 101 | ||
| 102 | /** | ||
| 103 | * kref_put_spinlock_irqsave - decrement refcount for object. | ||
| 104 | * @kref: object. | ||
| 105 | * @release: pointer to the function that will clean up the object when the | ||
| 106 | * last reference to the object is released. | ||
| 107 | * This pointer is required, and it is not acceptable to pass kfree | ||
| 108 | * in as this function. | ||
| 109 | * @lock: lock to take in release case | ||
| 110 | * | ||
| 111 | * Behaves identical to kref_put with one exception. If the reference count | ||
| 112 | * drops to zero, the lock will be taken atomically wrt dropping the reference | ||
| 113 | * count. The release function has to call spin_unlock() without _irqrestore. | ||
| 114 | */ | ||
| 115 | static inline int kref_put_spinlock_irqsave(struct kref *kref, | ||
| 116 | void (*release)(struct kref *kref), | ||
| 117 | spinlock_t *lock) | ||
| 118 | { | ||
| 119 | unsigned long flags; | ||
| 120 | |||
| 121 | WARN_ON(release == NULL); | ||
| 122 | if (atomic_add_unless(&kref->refcount, -1, 1)) | ||
| 123 | return 0; | ||
| 124 | spin_lock_irqsave(lock, flags); | ||
| 125 | if (atomic_dec_and_test(&kref->refcount)) { | ||
| 126 | release(kref); | ||
| 127 | local_irq_restore(flags); | ||
| 128 | return 1; | ||
| 129 | } | ||
| 130 | spin_unlock_irqrestore(lock, flags); | ||
| 131 | return 0; | ||
| 132 | } | ||
| 133 | |||
| 101 | static inline int kref_put_mutex(struct kref *kref, | 134 | static inline int kref_put_mutex(struct kref *kref, |
| 102 | void (*release)(struct kref *kref), | 135 | void (*release)(struct kref *kref), |
| 103 | struct mutex *lock) | 136 | struct mutex *lock) |
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 67f46ad6920a..352eec9df1b8 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
| @@ -126,7 +126,7 @@ struct mlx4_rss_context { | |||
| 126 | 126 | ||
| 127 | struct mlx4_qp_path { | 127 | struct mlx4_qp_path { |
| 128 | u8 fl; | 128 | u8 fl; |
| 129 | u8 reserved1[1]; | 129 | u8 vlan_control; |
| 130 | u8 disable_pkey_check; | 130 | u8 disable_pkey_check; |
| 131 | u8 pkey_index; | 131 | u8 pkey_index; |
| 132 | u8 counter_index; | 132 | u8 counter_index; |
| @@ -141,11 +141,32 @@ struct mlx4_qp_path { | |||
| 141 | u8 sched_queue; | 141 | u8 sched_queue; |
| 142 | u8 vlan_index; | 142 | u8 vlan_index; |
| 143 | u8 feup; | 143 | u8 feup; |
| 144 | u8 reserved3; | 144 | u8 fvl_rx; |
| 145 | u8 reserved4[2]; | 145 | u8 reserved4[2]; |
| 146 | u8 dmac[6]; | 146 | u8 dmac[6]; |
| 147 | }; | 147 | }; |
| 148 | 148 | ||
| 149 | enum { /* fl */ | ||
| 150 | MLX4_FL_CV = 1 << 6, | ||
| 151 | MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2 | ||
| 152 | }; | ||
| 153 | enum { /* vlan_control */ | ||
| 154 | MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6, | ||
| 155 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED = 1 << 2, | ||
| 156 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1, /* 802.1p priority tag */ | ||
| 157 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED = 1 << 0 | ||
| 158 | }; | ||
| 159 | |||
| 160 | enum { /* feup */ | ||
| 161 | MLX4_FEUP_FORCE_ETH_UP = 1 << 6, /* force Eth UP */ | ||
| 162 | MLX4_FSM_FORCE_ETH_SRC_MAC = 1 << 5, /* force Source MAC */ | ||
| 163 | MLX4_FVL_FORCE_ETH_VLAN = 1 << 3 /* force Eth vlan */ | ||
| 164 | }; | ||
| 165 | |||
| 166 | enum { /* fvl_rx */ | ||
| 167 | MLX4_FVL_RX_FORCE_ETH_VLAN = 1 << 0 /* enforce Eth rx vlan */ | ||
| 168 | }; | ||
| 169 | |||
| 149 | struct mlx4_qp_context { | 170 | struct mlx4_qp_context { |
| 150 | __be32 flags; | 171 | __be32 flags; |
| 151 | __be32 pd; | 172 | __be32 pd; |
| @@ -185,6 +206,10 @@ struct mlx4_qp_context { | |||
| 185 | u32 reserved5[10]; | 206 | u32 reserved5[10]; |
| 186 | }; | 207 | }; |
| 187 | 208 | ||
| 209 | enum { /* param3 */ | ||
| 210 | MLX4_STRIP_VLAN = 1 << 30 | ||
| 211 | }; | ||
| 212 | |||
| 188 | /* Which firmware version adds support for NEC (NoErrorCompletion) bit */ | 213 | /* Which firmware version adds support for NEC (NoErrorCompletion) bit */ |
| 189 | #define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232) | 214 | #define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232) |
| 190 | 215 | ||
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index 3863a4dbdf18..2a93b64a3869 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h | |||
| @@ -11,9 +11,10 @@ | |||
| 11 | * | 11 | * |
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #ifdef CONFIG_OF_DEVICE | ||
| 15 | #include <linux/device.h> | 14 | #include <linux/device.h> |
| 16 | #include <linux/mod_devicetable.h> | 15 | #include <linux/mod_devicetable.h> |
| 16 | |||
| 17 | #ifdef CONFIG_OF_DEVICE | ||
| 17 | #include <linux/pm.h> | 18 | #include <linux/pm.h> |
| 18 | #include <linux/of_device.h> | 19 | #include <linux/of_device.h> |
| 19 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
| @@ -100,7 +101,7 @@ extern int of_platform_populate(struct device_node *root, | |||
| 100 | 101 | ||
| 101 | #if !defined(CONFIG_OF_ADDRESS) | 102 | #if !defined(CONFIG_OF_ADDRESS) |
| 102 | struct of_dev_auxdata; | 103 | struct of_dev_auxdata; |
| 103 | struct device; | 104 | struct device_node; |
| 104 | static inline int of_platform_populate(struct device_node *root, | 105 | static inline int of_platform_populate(struct device_node *root, |
| 105 | const struct of_device_id *matches, | 106 | const struct of_device_id *matches, |
| 106 | const struct of_dev_auxdata *lookup, | 107 | const struct of_dev_auxdata *lookup, |
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index 72474e18f1e0..6aa238096622 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h | |||
| @@ -37,17 +37,17 @@ | |||
| 37 | * if it is 0, pull-down is disabled. | 37 | * if it is 0, pull-down is disabled. |
| 38 | * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and | 38 | * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and |
| 39 | * low, this is the most typical case and is typically achieved with two | 39 | * low, this is the most typical case and is typically achieved with two |
| 40 | * active transistors on the output. Sending this config will enabale | 40 | * active transistors on the output. Setting this config will enable |
| 41 | * push-pull mode, the argument is ignored. | 41 | * push-pull mode, the argument is ignored. |
| 42 | * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open | 42 | * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open |
| 43 | * collector) which means it is usually wired with other output ports | 43 | * collector) which means it is usually wired with other output ports |
| 44 | * which are then pulled up with an external resistor. Sending this | 44 | * which are then pulled up with an external resistor. Setting this |
| 45 | * config will enabale open drain mode, the argument is ignored. | 45 | * config will enable open drain mode, the argument is ignored. |
| 46 | * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source | 46 | * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source |
| 47 | * (open emitter). Sending this config will enabale open drain mode, the | 47 | * (open emitter). Setting this config will enable open drain mode, the |
| 48 | * argument is ignored. | 48 | * argument is ignored. |
| 49 | * @PIN_CONFIG_DRIVE_STRENGTH: the pin will output the current passed as | 49 | * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current |
| 50 | * argument. The argument is in mA. | 50 | * passed as argument. The argument is in mA. |
| 51 | * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin. | 51 | * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin. |
| 52 | * If the argument != 0, schmitt-trigger mode is enabled. If it's 0, | 52 | * If the argument != 0, schmitt-trigger mode is enabled. If it's 0, |
| 53 | * schmitt-trigger mode is disabled. | 53 | * schmitt-trigger mode is disabled. |
diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h index ff9b0aab5281..c860c1b314c0 100644 --- a/include/linux/platform_data/serial-omap.h +++ b/include/linux/platform_data/serial-omap.h | |||
| @@ -43,8 +43,6 @@ struct omap_uart_port_info { | |||
| 43 | int DTR_present; | 43 | int DTR_present; |
| 44 | 44 | ||
| 45 | int (*get_context_loss_count)(struct device *); | 45 | int (*get_context_loss_count)(struct device *); |
| 46 | void (*set_forceidle)(struct device *); | ||
| 47 | void (*set_noidle)(struct device *); | ||
| 48 | void (*enable_wakeup)(struct device *, bool); | 46 | void (*enable_wakeup)(struct device *, bool); |
| 49 | }; | 47 | }; |
| 50 | 48 | ||
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 733eb5ee31c5..6ff26c8db7b9 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
| @@ -57,7 +57,7 @@ extern struct bus_type spi_bus_type; | |||
| 57 | * @modalias: Name of the driver to use with this device, or an alias | 57 | * @modalias: Name of the driver to use with this device, or an alias |
| 58 | * for that name. This appears in the sysfs "modalias" attribute | 58 | * for that name. This appears in the sysfs "modalias" attribute |
| 59 | * for driver coldplugging, and in uevents used for hotplugging | 59 | * for driver coldplugging, and in uevents used for hotplugging |
| 60 | * @cs_gpio: gpio number of the chipselect line (optional, -EINVAL when | 60 | * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when |
| 61 | * when not using a GPIO line) | 61 | * when not using a GPIO line) |
| 62 | * | 62 | * |
| 63 | * A @spi_device is used to interchange data between an SPI slave | 63 | * A @spi_device is used to interchange data between an SPI slave |
| @@ -266,7 +266,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) | |||
| 266 | * queue so the subsystem notifies the driver that it may relax the | 266 | * queue so the subsystem notifies the driver that it may relax the |
| 267 | * hardware by issuing this call | 267 | * hardware by issuing this call |
| 268 | * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS | 268 | * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS |
| 269 | * number. Any individual value may be -EINVAL for CS lines that | 269 | * number. Any individual value may be -ENOENT for CS lines that |
| 270 | * are not GPIOs (driven by the SPI controller itself). | 270 | * are not GPIOs (driven by the SPI controller itself). |
| 271 | * | 271 | * |
| 272 | * Each SPI master controller can communicate with one or more @spi_device | 272 | * Each SPI master controller can communicate with one or more @spi_device |
diff --git a/include/linux/time.h b/include/linux/time.h index 22d81b3c955b..d5d229b2e5af 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
| @@ -117,14 +117,10 @@ static inline bool timespec_valid_strict(const struct timespec *ts) | |||
| 117 | 117 | ||
| 118 | extern bool persistent_clock_exist; | 118 | extern bool persistent_clock_exist; |
| 119 | 119 | ||
| 120 | #ifdef ALWAYS_USE_PERSISTENT_CLOCK | ||
| 121 | #define has_persistent_clock() true | ||
| 122 | #else | ||
| 123 | static inline bool has_persistent_clock(void) | 120 | static inline bool has_persistent_clock(void) |
| 124 | { | 121 | { |
| 125 | return persistent_clock_exist; | 122 | return persistent_clock_exist; |
| 126 | } | 123 | } |
| 127 | #endif | ||
| 128 | 124 | ||
| 129 | extern void read_persistent_clock(struct timespec *ts); | 125 | extern void read_persistent_clock(struct timespec *ts); |
| 130 | extern void read_boot_clock(struct timespec *ts); | 126 | extern void read_boot_clock(struct timespec *ts); |
diff --git a/include/net/sock.h b/include/net/sock.h index 5c97b0fc5623..66772cf8c3c5 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
| @@ -866,6 +866,18 @@ struct inet_hashinfo; | |||
| 866 | struct raw_hashinfo; | 866 | struct raw_hashinfo; |
| 867 | struct module; | 867 | struct module; |
| 868 | 868 | ||
| 869 | /* | ||
| 870 | * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes | ||
| 871 | * un-modified. Special care is taken when initializing object to zero. | ||
| 872 | */ | ||
| 873 | static inline void sk_prot_clear_nulls(struct sock *sk, int size) | ||
| 874 | { | ||
| 875 | if (offsetof(struct sock, sk_node.next) != 0) | ||
| 876 | memset(sk, 0, offsetof(struct sock, sk_node.next)); | ||
| 877 | memset(&sk->sk_node.pprev, 0, | ||
| 878 | size - offsetof(struct sock, sk_node.pprev)); | ||
| 879 | } | ||
| 880 | |||
| 869 | /* Networking protocol blocks we attach to sockets. | 881 | /* Networking protocol blocks we attach to sockets. |
| 870 | * socket layer -> transport layer interface | 882 | * socket layer -> transport layer interface |
| 871 | * transport -> network interface is defined by struct inet_proto | 883 | * transport -> network interface is defined by struct inet_proto |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index c4af592f7057..e773dfa5f98f 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
| @@ -463,7 +463,6 @@ struct se_cmd { | |||
| 463 | #define CMD_T_ABORTED (1 << 0) | 463 | #define CMD_T_ABORTED (1 << 0) |
| 464 | #define CMD_T_ACTIVE (1 << 1) | 464 | #define CMD_T_ACTIVE (1 << 1) |
| 465 | #define CMD_T_COMPLETE (1 << 2) | 465 | #define CMD_T_COMPLETE (1 << 2) |
| 466 | #define CMD_T_QUEUED (1 << 3) | ||
| 467 | #define CMD_T_SENT (1 << 4) | 466 | #define CMD_T_SENT (1 << 4) |
| 468 | #define CMD_T_STOP (1 << 5) | 467 | #define CMD_T_STOP (1 << 5) |
| 469 | #define CMD_T_FAILED (1 << 6) | 468 | #define CMD_T_FAILED (1 << 6) |
| @@ -572,12 +571,8 @@ struct se_dev_entry { | |||
| 572 | bool def_pr_registered; | 571 | bool def_pr_registered; |
| 573 | /* See transport_lunflags_table */ | 572 | /* See transport_lunflags_table */ |
| 574 | u32 lun_flags; | 573 | u32 lun_flags; |
| 575 | u32 deve_cmds; | ||
| 576 | u32 mapped_lun; | 574 | u32 mapped_lun; |
| 577 | u32 average_bytes; | ||
| 578 | u32 last_byte_count; | ||
| 579 | u32 total_cmds; | 575 | u32 total_cmds; |
| 580 | u32 total_bytes; | ||
| 581 | u64 pr_res_key; | 576 | u64 pr_res_key; |
| 582 | u64 creation_time; | 577 | u64 creation_time; |
| 583 | u32 attach_count; | 578 | u32 attach_count; |
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index d0e686402df8..8ee15b97cd38 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h | |||
| @@ -2139,7 +2139,7 @@ TRACE_EVENT(ext4_es_remove_extent, | |||
| 2139 | __entry->lblk, __entry->len) | 2139 | __entry->lblk, __entry->len) |
| 2140 | ); | 2140 | ); |
| 2141 | 2141 | ||
| 2142 | TRACE_EVENT(ext4_es_find_delayed_extent_enter, | 2142 | TRACE_EVENT(ext4_es_find_delayed_extent_range_enter, |
| 2143 | TP_PROTO(struct inode *inode, ext4_lblk_t lblk), | 2143 | TP_PROTO(struct inode *inode, ext4_lblk_t lblk), |
| 2144 | 2144 | ||
| 2145 | TP_ARGS(inode, lblk), | 2145 | TP_ARGS(inode, lblk), |
| @@ -2161,7 +2161,7 @@ TRACE_EVENT(ext4_es_find_delayed_extent_enter, | |||
| 2161 | (unsigned long) __entry->ino, __entry->lblk) | 2161 | (unsigned long) __entry->ino, __entry->lblk) |
| 2162 | ); | 2162 | ); |
| 2163 | 2163 | ||
| 2164 | TRACE_EVENT(ext4_es_find_delayed_extent_exit, | 2164 | TRACE_EVENT(ext4_es_find_delayed_extent_range_exit, |
| 2165 | TP_PROTO(struct inode *inode, struct extent_status *es), | 2165 | TP_PROTO(struct inode *inode, struct extent_status *es), |
| 2166 | 2166 | ||
| 2167 | TP_ARGS(inode, es), | 2167 | TP_ARGS(inode, es), |
diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c index 8b86c0c68edf..d5585f5e038e 100644 --- a/kernel/cpu/idle.c +++ b/kernel/cpu/idle.c | |||
| @@ -40,11 +40,13 @@ __setup("hlt", cpu_idle_nopoll_setup); | |||
| 40 | 40 | ||
| 41 | static inline int cpu_idle_poll(void) | 41 | static inline int cpu_idle_poll(void) |
| 42 | { | 42 | { |
| 43 | rcu_idle_enter(); | ||
| 43 | trace_cpu_idle_rcuidle(0, smp_processor_id()); | 44 | trace_cpu_idle_rcuidle(0, smp_processor_id()); |
| 44 | local_irq_enable(); | 45 | local_irq_enable(); |
| 45 | while (!need_resched()) | 46 | while (!need_resched()) |
| 46 | cpu_relax(); | 47 | cpu_relax(); |
| 47 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | 48 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
| 49 | rcu_idle_exit(); | ||
| 48 | return 1; | 50 | return 1; |
| 49 | } | 51 | } |
| 50 | 52 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index 6b41c1899a8b..9dc297faf7c0 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -4394,6 +4394,64 @@ perf_event_read_event(struct perf_event *event, | |||
| 4394 | perf_output_end(&handle); | 4394 | perf_output_end(&handle); |
| 4395 | } | 4395 | } |
| 4396 | 4396 | ||
| 4397 | typedef int (perf_event_aux_match_cb)(struct perf_event *event, void *data); | ||
| 4398 | typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data); | ||
| 4399 | |||
| 4400 | static void | ||
| 4401 | perf_event_aux_ctx(struct perf_event_context *ctx, | ||
| 4402 | perf_event_aux_match_cb match, | ||
| 4403 | perf_event_aux_output_cb output, | ||
| 4404 | void *data) | ||
| 4405 | { | ||
| 4406 | struct perf_event *event; | ||
| 4407 | |||
| 4408 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | ||
| 4409 | if (event->state < PERF_EVENT_STATE_INACTIVE) | ||
| 4410 | continue; | ||
| 4411 | if (!event_filter_match(event)) | ||
| 4412 | continue; | ||
| 4413 | if (match(event, data)) | ||
| 4414 | output(event, data); | ||
| 4415 | } | ||
| 4416 | } | ||
| 4417 | |||
| 4418 | static void | ||
| 4419 | perf_event_aux(perf_event_aux_match_cb match, | ||
| 4420 | perf_event_aux_output_cb output, | ||
| 4421 | void *data, | ||
| 4422 | struct perf_event_context *task_ctx) | ||
| 4423 | { | ||
| 4424 | struct perf_cpu_context *cpuctx; | ||
| 4425 | struct perf_event_context *ctx; | ||
| 4426 | struct pmu *pmu; | ||
| 4427 | int ctxn; | ||
| 4428 | |||
| 4429 | rcu_read_lock(); | ||
| 4430 | list_for_each_entry_rcu(pmu, &pmus, entry) { | ||
| 4431 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | ||
| 4432 | if (cpuctx->unique_pmu != pmu) | ||
| 4433 | goto next; | ||
| 4434 | perf_event_aux_ctx(&cpuctx->ctx, match, output, data); | ||
| 4435 | if (task_ctx) | ||
| 4436 | goto next; | ||
| 4437 | ctxn = pmu->task_ctx_nr; | ||
| 4438 | if (ctxn < 0) | ||
| 4439 | goto next; | ||
| 4440 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | ||
| 4441 | if (ctx) | ||
| 4442 | perf_event_aux_ctx(ctx, match, output, data); | ||
| 4443 | next: | ||
| 4444 | put_cpu_ptr(pmu->pmu_cpu_context); | ||
| 4445 | } | ||
| 4446 | |||
| 4447 | if (task_ctx) { | ||
| 4448 | preempt_disable(); | ||
| 4449 | perf_event_aux_ctx(task_ctx, match, output, data); | ||
| 4450 | preempt_enable(); | ||
| 4451 | } | ||
| 4452 | rcu_read_unlock(); | ||
| 4453 | } | ||
| 4454 | |||
| 4397 | /* | 4455 | /* |
| 4398 | * task tracking -- fork/exit | 4456 | * task tracking -- fork/exit |
| 4399 | * | 4457 | * |
| @@ -4416,8 +4474,9 @@ struct perf_task_event { | |||
| 4416 | }; | 4474 | }; |
| 4417 | 4475 | ||
| 4418 | static void perf_event_task_output(struct perf_event *event, | 4476 | static void perf_event_task_output(struct perf_event *event, |
| 4419 | struct perf_task_event *task_event) | 4477 | void *data) |
| 4420 | { | 4478 | { |
| 4479 | struct perf_task_event *task_event = data; | ||
| 4421 | struct perf_output_handle handle; | 4480 | struct perf_output_handle handle; |
| 4422 | struct perf_sample_data sample; | 4481 | struct perf_sample_data sample; |
| 4423 | struct task_struct *task = task_event->task; | 4482 | struct task_struct *task = task_event->task; |
| @@ -4445,62 +4504,11 @@ out: | |||
| 4445 | task_event->event_id.header.size = size; | 4504 | task_event->event_id.header.size = size; |
| 4446 | } | 4505 | } |
| 4447 | 4506 | ||
| 4448 | static int perf_event_task_match(struct perf_event *event) | 4507 | static int perf_event_task_match(struct perf_event *event, |
| 4449 | { | 4508 | void *data __maybe_unused) |
| 4450 | if (event->state < PERF_EVENT_STATE_INACTIVE) | ||
| 4451 | return 0; | ||
| 4452 | |||
| 4453 | if (!event_filter_match(event)) | ||
| 4454 | return 0; | ||
| 4455 | |||
| 4456 | if (event->attr.comm || event->attr.mmap || | ||
| 4457 | event->attr.mmap_data || event->attr.task) | ||
| 4458 | return 1; | ||
| 4459 | |||
| 4460 | return 0; | ||
| 4461 | } | ||
| 4462 | |||
| 4463 | static void perf_event_task_ctx(struct perf_event_context *ctx, | ||
| 4464 | struct perf_task_event *task_event) | ||
| 4465 | { | 4509 | { |
| 4466 | struct perf_event *event; | 4510 | return event->attr.comm || event->attr.mmap || |
| 4467 | 4511 | event->attr.mmap_data || event->attr.task; | |
| 4468 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | ||
| 4469 | if (perf_event_task_match(event)) | ||
| 4470 | perf_event_task_output(event, task_event); | ||
| 4471 | } | ||
| 4472 | } | ||
| 4473 | |||
| 4474 | static void perf_event_task_event(struct perf_task_event *task_event) | ||
| 4475 | { | ||
| 4476 | struct perf_cpu_context *cpuctx; | ||
| 4477 | struct perf_event_context *ctx; | ||
| 4478 | struct pmu *pmu; | ||
| 4479 | int ctxn; | ||
| 4480 | |||
| 4481 | rcu_read_lock(); | ||
| 4482 | list_for_each_entry_rcu(pmu, &pmus, entry) { | ||
| 4483 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | ||
| 4484 | if (cpuctx->unique_pmu != pmu) | ||
| 4485 | goto next; | ||
| 4486 | perf_event_task_ctx(&cpuctx->ctx, task_event); | ||
| 4487 | |||
| 4488 | ctx = task_event->task_ctx; | ||
| 4489 | if (!ctx) { | ||
| 4490 | ctxn = pmu->task_ctx_nr; | ||
| 4491 | if (ctxn < 0) | ||
| 4492 | goto next; | ||
| 4493 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | ||
| 4494 | if (ctx) | ||
| 4495 | perf_event_task_ctx(ctx, task_event); | ||
| 4496 | } | ||
| 4497 | next: | ||
| 4498 | put_cpu_ptr(pmu->pmu_cpu_context); | ||
| 4499 | } | ||
| 4500 | if (task_event->task_ctx) | ||
| 4501 | perf_event_task_ctx(task_event->task_ctx, task_event); | ||
| 4502 | |||
| 4503 | rcu_read_unlock(); | ||
| 4504 | } | 4512 | } |
| 4505 | 4513 | ||
| 4506 | static void perf_event_task(struct task_struct *task, | 4514 | static void perf_event_task(struct task_struct *task, |
| @@ -4531,7 +4539,10 @@ static void perf_event_task(struct task_struct *task, | |||
| 4531 | }, | 4539 | }, |
| 4532 | }; | 4540 | }; |
| 4533 | 4541 | ||
| 4534 | perf_event_task_event(&task_event); | 4542 | perf_event_aux(perf_event_task_match, |
| 4543 | perf_event_task_output, | ||
| 4544 | &task_event, | ||
| 4545 | task_ctx); | ||
| 4535 | } | 4546 | } |
| 4536 | 4547 | ||
| 4537 | void perf_event_fork(struct task_struct *task) | 4548 | void perf_event_fork(struct task_struct *task) |
| @@ -4557,8 +4568,9 @@ struct perf_comm_event { | |||
| 4557 | }; | 4568 | }; |
| 4558 | 4569 | ||
| 4559 | static void perf_event_comm_output(struct perf_event *event, | 4570 | static void perf_event_comm_output(struct perf_event *event, |
| 4560 | struct perf_comm_event *comm_event) | 4571 | void *data) |
| 4561 | { | 4572 | { |
| 4573 | struct perf_comm_event *comm_event = data; | ||
| 4562 | struct perf_output_handle handle; | 4574 | struct perf_output_handle handle; |
| 4563 | struct perf_sample_data sample; | 4575 | struct perf_sample_data sample; |
| 4564 | int size = comm_event->event_id.header.size; | 4576 | int size = comm_event->event_id.header.size; |
| @@ -4585,39 +4597,16 @@ out: | |||
| 4585 | comm_event->event_id.header.size = size; | 4597 | comm_event->event_id.header.size = size; |
| 4586 | } | 4598 | } |
| 4587 | 4599 | ||
| 4588 | static int perf_event_comm_match(struct perf_event *event) | 4600 | static int perf_event_comm_match(struct perf_event *event, |
| 4589 | { | 4601 | void *data __maybe_unused) |
| 4590 | if (event->state < PERF_EVENT_STATE_INACTIVE) | ||
| 4591 | return 0; | ||
| 4592 | |||
| 4593 | if (!event_filter_match(event)) | ||
| 4594 | return 0; | ||
| 4595 | |||
| 4596 | if (event->attr.comm) | ||
| 4597 | return 1; | ||
| 4598 | |||
| 4599 | return 0; | ||
| 4600 | } | ||
| 4601 | |||
| 4602 | static void perf_event_comm_ctx(struct perf_event_context *ctx, | ||
| 4603 | struct perf_comm_event *comm_event) | ||
| 4604 | { | 4602 | { |
| 4605 | struct perf_event *event; | 4603 | return event->attr.comm; |
| 4606 | |||
| 4607 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | ||
| 4608 | if (perf_event_comm_match(event)) | ||
| 4609 | perf_event_comm_output(event, comm_event); | ||
| 4610 | } | ||
| 4611 | } | 4604 | } |
| 4612 | 4605 | ||
| 4613 | static void perf_event_comm_event(struct perf_comm_event *comm_event) | 4606 | static void perf_event_comm_event(struct perf_comm_event *comm_event) |
| 4614 | { | 4607 | { |
| 4615 | struct perf_cpu_context *cpuctx; | ||
| 4616 | struct perf_event_context *ctx; | ||
| 4617 | char comm[TASK_COMM_LEN]; | 4608 | char comm[TASK_COMM_LEN]; |
| 4618 | unsigned int size; | 4609 | unsigned int size; |
| 4619 | struct pmu *pmu; | ||
| 4620 | int ctxn; | ||
| 4621 | 4610 | ||
| 4622 | memset(comm, 0, sizeof(comm)); | 4611 | memset(comm, 0, sizeof(comm)); |
| 4623 | strlcpy(comm, comm_event->task->comm, sizeof(comm)); | 4612 | strlcpy(comm, comm_event->task->comm, sizeof(comm)); |
| @@ -4627,24 +4616,11 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) | |||
| 4627 | comm_event->comm_size = size; | 4616 | comm_event->comm_size = size; |
| 4628 | 4617 | ||
| 4629 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; | 4618 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; |
| 4630 | rcu_read_lock(); | ||
| 4631 | list_for_each_entry_rcu(pmu, &pmus, entry) { | ||
| 4632 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | ||
| 4633 | if (cpuctx->unique_pmu != pmu) | ||
| 4634 | goto next; | ||
| 4635 | perf_event_comm_ctx(&cpuctx->ctx, comm_event); | ||
| 4636 | 4619 | ||
| 4637 | ctxn = pmu->task_ctx_nr; | 4620 | perf_event_aux(perf_event_comm_match, |
| 4638 | if (ctxn < 0) | 4621 | perf_event_comm_output, |
| 4639 | goto next; | 4622 | comm_event, |
| 4640 | 4623 | NULL); | |
| 4641 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | ||
| 4642 | if (ctx) | ||
| 4643 | perf_event_comm_ctx(ctx, comm_event); | ||
| 4644 | next: | ||
| 4645 | put_cpu_ptr(pmu->pmu_cpu_context); | ||
| 4646 | } | ||
| 4647 | rcu_read_unlock(); | ||
| 4648 | } | 4624 | } |
| 4649 | 4625 | ||
| 4650 | void perf_event_comm(struct task_struct *task) | 4626 | void perf_event_comm(struct task_struct *task) |
| @@ -4706,8 +4682,9 @@ struct perf_mmap_event { | |||
| 4706 | }; | 4682 | }; |
| 4707 | 4683 | ||
| 4708 | static void perf_event_mmap_output(struct perf_event *event, | 4684 | static void perf_event_mmap_output(struct perf_event *event, |
| 4709 | struct perf_mmap_event *mmap_event) | 4685 | void *data) |
| 4710 | { | 4686 | { |
| 4687 | struct perf_mmap_event *mmap_event = data; | ||
| 4711 | struct perf_output_handle handle; | 4688 | struct perf_output_handle handle; |
| 4712 | struct perf_sample_data sample; | 4689 | struct perf_sample_data sample; |
| 4713 | int size = mmap_event->event_id.header.size; | 4690 | int size = mmap_event->event_id.header.size; |
| @@ -4734,46 +4711,24 @@ out: | |||
| 4734 | } | 4711 | } |
| 4735 | 4712 | ||
| 4736 | static int perf_event_mmap_match(struct perf_event *event, | 4713 | static int perf_event_mmap_match(struct perf_event *event, |
| 4737 | struct perf_mmap_event *mmap_event, | 4714 | void *data) |
| 4738 | int executable) | ||
| 4739 | { | ||
| 4740 | if (event->state < PERF_EVENT_STATE_INACTIVE) | ||
| 4741 | return 0; | ||
| 4742 | |||
| 4743 | if (!event_filter_match(event)) | ||
| 4744 | return 0; | ||
| 4745 | |||
| 4746 | if ((!executable && event->attr.mmap_data) || | ||
| 4747 | (executable && event->attr.mmap)) | ||
| 4748 | return 1; | ||
| 4749 | |||
| 4750 | return 0; | ||
| 4751 | } | ||
| 4752 | |||
| 4753 | static void perf_event_mmap_ctx(struct perf_event_context *ctx, | ||
| 4754 | struct perf_mmap_event *mmap_event, | ||
| 4755 | int executable) | ||
| 4756 | { | 4715 | { |
| 4757 | struct perf_event *event; | 4716 | struct perf_mmap_event *mmap_event = data; |
| 4717 | struct vm_area_struct *vma = mmap_event->vma; | ||
| 4718 | int executable = vma->vm_flags & VM_EXEC; | ||
| 4758 | 4719 | ||
| 4759 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 4720 | return (!executable && event->attr.mmap_data) || |
| 4760 | if (perf_event_mmap_match(event, mmap_event, executable)) | 4721 | (executable && event->attr.mmap); |
| 4761 | perf_event_mmap_output(event, mmap_event); | ||
| 4762 | } | ||
| 4763 | } | 4722 | } |
| 4764 | 4723 | ||
| 4765 | static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | 4724 | static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) |
| 4766 | { | 4725 | { |
| 4767 | struct perf_cpu_context *cpuctx; | ||
| 4768 | struct perf_event_context *ctx; | ||
| 4769 | struct vm_area_struct *vma = mmap_event->vma; | 4726 | struct vm_area_struct *vma = mmap_event->vma; |
| 4770 | struct file *file = vma->vm_file; | 4727 | struct file *file = vma->vm_file; |
| 4771 | unsigned int size; | 4728 | unsigned int size; |
| 4772 | char tmp[16]; | 4729 | char tmp[16]; |
| 4773 | char *buf = NULL; | 4730 | char *buf = NULL; |
| 4774 | const char *name; | 4731 | const char *name; |
| 4775 | struct pmu *pmu; | ||
| 4776 | int ctxn; | ||
| 4777 | 4732 | ||
| 4778 | memset(tmp, 0, sizeof(tmp)); | 4733 | memset(tmp, 0, sizeof(tmp)); |
| 4779 | 4734 | ||
| @@ -4829,27 +4784,10 @@ got_name: | |||
| 4829 | 4784 | ||
| 4830 | mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; | 4785 | mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; |
| 4831 | 4786 | ||
| 4832 | rcu_read_lock(); | 4787 | perf_event_aux(perf_event_mmap_match, |
| 4833 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 4788 | perf_event_mmap_output, |
| 4834 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | 4789 | mmap_event, |
| 4835 | if (cpuctx->unique_pmu != pmu) | 4790 | NULL); |
| 4836 | goto next; | ||
| 4837 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, | ||
| 4838 | vma->vm_flags & VM_EXEC); | ||
| 4839 | |||
| 4840 | ctxn = pmu->task_ctx_nr; | ||
| 4841 | if (ctxn < 0) | ||
| 4842 | goto next; | ||
| 4843 | |||
| 4844 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | ||
| 4845 | if (ctx) { | ||
| 4846 | perf_event_mmap_ctx(ctx, mmap_event, | ||
| 4847 | vma->vm_flags & VM_EXEC); | ||
| 4848 | } | ||
| 4849 | next: | ||
| 4850 | put_cpu_ptr(pmu->pmu_cpu_context); | ||
| 4851 | } | ||
| 4852 | rcu_read_unlock(); | ||
| 4853 | 4791 | ||
| 4854 | kfree(buf); | 4792 | kfree(buf); |
| 4855 | } | 4793 | } |
diff --git a/kernel/kmod.c b/kernel/kmod.c index 1296e72e4161..8241906c4b61 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
| @@ -569,6 +569,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) | |||
| 569 | int retval = 0; | 569 | int retval = 0; |
| 570 | 570 | ||
| 571 | helper_lock(); | 571 | helper_lock(); |
| 572 | if (!sub_info->path) { | ||
| 573 | retval = -EINVAL; | ||
| 574 | goto out; | ||
| 575 | } | ||
| 576 | |||
| 572 | if (sub_info->path[0] == '\0') | 577 | if (sub_info->path[0] == '\0') |
| 573 | goto out; | 578 | goto out; |
| 574 | 579 | ||
diff --git a/kernel/module.c b/kernel/module.c index b049939177f6..cab4bce49c23 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -2431,10 +2431,10 @@ static void kmemleak_load_module(const struct module *mod, | |||
| 2431 | kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); | 2431 | kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); |
| 2432 | 2432 | ||
| 2433 | for (i = 1; i < info->hdr->e_shnum; i++) { | 2433 | for (i = 1; i < info->hdr->e_shnum; i++) { |
| 2434 | const char *name = info->secstrings + info->sechdrs[i].sh_name; | 2434 | /* Scan all writable sections that's not executable */ |
| 2435 | if (!(info->sechdrs[i].sh_flags & SHF_ALLOC)) | 2435 | if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) || |
| 2436 | continue; | 2436 | !(info->sechdrs[i].sh_flags & SHF_WRITE) || |
| 2437 | if (!strstarts(name, ".data") && !strstarts(name, ".bss")) | 2437 | (info->sechdrs[i].sh_flags & SHF_EXECINSTR)) |
| 2438 | continue; | 2438 | continue; |
| 2439 | 2439 | ||
| 2440 | kmemleak_scan_area((void *)info->sechdrs[i].sh_addr, | 2440 | kmemleak_scan_area((void *)info->sechdrs[i].sh_addr, |
| @@ -2769,24 +2769,11 @@ static void find_module_sections(struct module *mod, struct load_info *info) | |||
| 2769 | mod->trace_events = section_objs(info, "_ftrace_events", | 2769 | mod->trace_events = section_objs(info, "_ftrace_events", |
| 2770 | sizeof(*mod->trace_events), | 2770 | sizeof(*mod->trace_events), |
| 2771 | &mod->num_trace_events); | 2771 | &mod->num_trace_events); |
| 2772 | /* | ||
| 2773 | * This section contains pointers to allocated objects in the trace | ||
| 2774 | * code and not scanning it leads to false positives. | ||
| 2775 | */ | ||
| 2776 | kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * | ||
| 2777 | mod->num_trace_events, GFP_KERNEL); | ||
| 2778 | #endif | 2772 | #endif |
| 2779 | #ifdef CONFIG_TRACING | 2773 | #ifdef CONFIG_TRACING |
| 2780 | mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", | 2774 | mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", |
| 2781 | sizeof(*mod->trace_bprintk_fmt_start), | 2775 | sizeof(*mod->trace_bprintk_fmt_start), |
| 2782 | &mod->num_trace_bprintk_fmt); | 2776 | &mod->num_trace_bprintk_fmt); |
| 2783 | /* | ||
| 2784 | * This section contains pointers to allocated objects in the trace | ||
| 2785 | * code and not scanning it leads to false positives. | ||
| 2786 | */ | ||
| 2787 | kmemleak_scan_area(mod->trace_bprintk_fmt_start, | ||
| 2788 | sizeof(*mod->trace_bprintk_fmt_start) * | ||
| 2789 | mod->num_trace_bprintk_fmt, GFP_KERNEL); | ||
| 2790 | #endif | 2777 | #endif |
| 2791 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 2778 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
| 2792 | /* sechdrs[0].sh_size is always zero */ | 2779 | /* sechdrs[0].sh_size is always zero */ |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 170814dc418f..3db5a375d8dd 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
| @@ -88,7 +88,7 @@ static void __init rcu_bootup_announce_oddness(void) | |||
| 88 | #ifdef CONFIG_RCU_NOCB_CPU | 88 | #ifdef CONFIG_RCU_NOCB_CPU |
| 89 | #ifndef CONFIG_RCU_NOCB_CPU_NONE | 89 | #ifndef CONFIG_RCU_NOCB_CPU_NONE |
| 90 | if (!have_rcu_nocb_mask) { | 90 | if (!have_rcu_nocb_mask) { |
| 91 | alloc_bootmem_cpumask_var(&rcu_nocb_mask); | 91 | zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL); |
| 92 | have_rcu_nocb_mask = true; | 92 | have_rcu_nocb_mask = true; |
| 93 | } | 93 | } |
| 94 | #ifdef CONFIG_RCU_NOCB_CPU_ZERO | 94 | #ifdef CONFIG_RCU_NOCB_CPU_ZERO |
| @@ -1667,7 +1667,7 @@ int rcu_needs_cpu(int cpu, unsigned long *dj) | |||
| 1667 | rdtp->last_accelerate = jiffies; | 1667 | rdtp->last_accelerate = jiffies; |
| 1668 | 1668 | ||
| 1669 | /* Request timer delay depending on laziness, and round. */ | 1669 | /* Request timer delay depending on laziness, and round. */ |
| 1670 | if (rdtp->all_lazy) { | 1670 | if (!rdtp->all_lazy) { |
| 1671 | *dj = round_up(rcu_idle_gp_delay + jiffies, | 1671 | *dj = round_up(rcu_idle_gp_delay + jiffies, |
| 1672 | rcu_idle_gp_delay) - jiffies; | 1672 | rcu_idle_gp_delay) - jiffies; |
| 1673 | } else { | 1673 | } else { |
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index e4c07b0692bb..70f27e89012b 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig | |||
| @@ -12,11 +12,6 @@ config CLOCKSOURCE_WATCHDOG | |||
| 12 | config ARCH_CLOCKSOURCE_DATA | 12 | config ARCH_CLOCKSOURCE_DATA |
| 13 | bool | 13 | bool |
| 14 | 14 | ||
| 15 | # Platforms has a persistent clock | ||
| 16 | config ALWAYS_USE_PERSISTENT_CLOCK | ||
| 17 | bool | ||
| 18 | default n | ||
| 19 | |||
| 20 | # Timekeeping vsyscall support | 15 | # Timekeeping vsyscall support |
| 21 | config GENERIC_TIME_VSYSCALL | 16 | config GENERIC_TIME_VSYSCALL |
| 22 | bool | 17 | bool |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 206bbfb34e09..24938d577669 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -786,11 +786,11 @@ bool tick_broadcast_oneshot_available(void) | |||
| 786 | 786 | ||
| 787 | void __init tick_broadcast_init(void) | 787 | void __init tick_broadcast_init(void) |
| 788 | { | 788 | { |
| 789 | alloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); | 789 | zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); |
| 790 | alloc_cpumask_var(&tmpmask, GFP_NOWAIT); | 790 | zalloc_cpumask_var(&tmpmask, GFP_NOWAIT); |
| 791 | #ifdef CONFIG_TICK_ONESHOT | 791 | #ifdef CONFIG_TICK_ONESHOT |
| 792 | alloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); | 792 | zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); |
| 793 | alloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT); | 793 | zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT); |
| 794 | alloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT); | 794 | zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT); |
| 795 | #endif | 795 | #endif |
| 796 | } | 796 | } |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index bc67d4245e1d..f4208138fbf4 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -717,6 +717,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) | |||
| 717 | if (unlikely(!cpu_online(cpu))) { | 717 | if (unlikely(!cpu_online(cpu))) { |
| 718 | if (cpu == tick_do_timer_cpu) | 718 | if (cpu == tick_do_timer_cpu) |
| 719 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; | 719 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
| 720 | return false; | ||
| 720 | } | 721 | } |
| 721 | 722 | ||
| 722 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 723 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
| @@ -1168,7 +1169,7 @@ void tick_cancel_sched_timer(int cpu) | |||
| 1168 | hrtimer_cancel(&ts->sched_timer); | 1169 | hrtimer_cancel(&ts->sched_timer); |
| 1169 | # endif | 1170 | # endif |
| 1170 | 1171 | ||
| 1171 | ts->nohz_mode = NOHZ_MODE_INACTIVE; | 1172 | memset(ts, 0, sizeof(*ts)); |
| 1172 | } | 1173 | } |
| 1173 | #endif | 1174 | #endif |
| 1174 | 1175 | ||
diff --git a/kernel/timer.c b/kernel/timer.c index a860bba34412..15ffdb3f1948 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -1539,12 +1539,12 @@ static int __cpuinit init_timers_cpu(int cpu) | |||
| 1539 | boot_done = 1; | 1539 | boot_done = 1; |
| 1540 | base = &boot_tvec_bases; | 1540 | base = &boot_tvec_bases; |
| 1541 | } | 1541 | } |
| 1542 | spin_lock_init(&base->lock); | ||
| 1542 | tvec_base_done[cpu] = 1; | 1543 | tvec_base_done[cpu] = 1; |
| 1543 | } else { | 1544 | } else { |
| 1544 | base = per_cpu(tvec_bases, cpu); | 1545 | base = per_cpu(tvec_bases, cpu); |
| 1545 | } | 1546 | } |
| 1546 | 1547 | ||
| 1547 | spin_lock_init(&base->lock); | ||
| 1548 | 1548 | ||
| 1549 | for (j = 0; j < TVN_SIZE; j++) { | 1549 | for (j = 0; j < TVN_SIZE; j++) { |
| 1550 | INIT_LIST_HEAD(base->tv5.vec + j); | 1550 | INIT_LIST_HEAD(base->tv5.vec + j); |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index a6361178de5a..e1b653f7e1ca 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
| @@ -750,7 +750,11 @@ static int filter_set_pred(struct event_filter *filter, | |||
| 750 | 750 | ||
| 751 | static void __free_preds(struct event_filter *filter) | 751 | static void __free_preds(struct event_filter *filter) |
| 752 | { | 752 | { |
| 753 | int i; | ||
| 754 | |||
| 753 | if (filter->preds) { | 755 | if (filter->preds) { |
| 756 | for (i = 0; i < filter->n_preds; i++) | ||
| 757 | kfree(filter->preds[i].ops); | ||
| 754 | kfree(filter->preds); | 758 | kfree(filter->preds); |
| 755 | filter->preds = NULL; | 759 | filter->preds = NULL; |
| 756 | } | 760 | } |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 636d45fe69b3..9f46e98ba8f2 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -35,7 +35,7 @@ struct trace_probe { | |||
| 35 | const char *symbol; /* symbol name */ | 35 | const char *symbol; /* symbol name */ |
| 36 | struct ftrace_event_class class; | 36 | struct ftrace_event_class class; |
| 37 | struct ftrace_event_call call; | 37 | struct ftrace_event_call call; |
| 38 | struct ftrace_event_file **files; | 38 | struct ftrace_event_file * __rcu *files; |
| 39 | ssize_t size; /* trace entry size */ | 39 | ssize_t size; /* trace entry size */ |
| 40 | unsigned int nr_args; | 40 | unsigned int nr_args; |
| 41 | struct probe_arg args[]; | 41 | struct probe_arg args[]; |
| @@ -185,9 +185,14 @@ static struct trace_probe *find_trace_probe(const char *event, | |||
| 185 | 185 | ||
| 186 | static int trace_probe_nr_files(struct trace_probe *tp) | 186 | static int trace_probe_nr_files(struct trace_probe *tp) |
| 187 | { | 187 | { |
| 188 | struct ftrace_event_file **file = tp->files; | 188 | struct ftrace_event_file **file; |
| 189 | int ret = 0; | 189 | int ret = 0; |
| 190 | 190 | ||
| 191 | /* | ||
| 192 | * Since all tp->files updater is protected by probe_enable_lock, | ||
| 193 | * we don't need to lock an rcu_read_lock. | ||
| 194 | */ | ||
| 195 | file = rcu_dereference_raw(tp->files); | ||
| 191 | if (file) | 196 | if (file) |
| 192 | while (*(file++)) | 197 | while (*(file++)) |
| 193 | ret++; | 198 | ret++; |
| @@ -209,9 +214,10 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 209 | mutex_lock(&probe_enable_lock); | 214 | mutex_lock(&probe_enable_lock); |
| 210 | 215 | ||
| 211 | if (file) { | 216 | if (file) { |
| 212 | struct ftrace_event_file **new, **old = tp->files; | 217 | struct ftrace_event_file **new, **old; |
| 213 | int n = trace_probe_nr_files(tp); | 218 | int n = trace_probe_nr_files(tp); |
| 214 | 219 | ||
| 220 | old = rcu_dereference_raw(tp->files); | ||
| 215 | /* 1 is for new one and 1 is for stopper */ | 221 | /* 1 is for new one and 1 is for stopper */ |
| 216 | new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *), | 222 | new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *), |
| 217 | GFP_KERNEL); | 223 | GFP_KERNEL); |
| @@ -251,11 +257,17 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 251 | static int | 257 | static int |
| 252 | trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) | 258 | trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) |
| 253 | { | 259 | { |
| 260 | struct ftrace_event_file **files; | ||
| 254 | int i; | 261 | int i; |
| 255 | 262 | ||
| 256 | if (tp->files) { | 263 | /* |
| 257 | for (i = 0; tp->files[i]; i++) | 264 | * Since all tp->files updater is protected by probe_enable_lock, |
| 258 | if (tp->files[i] == file) | 265 | * we don't need to lock an rcu_read_lock. |
| 266 | */ | ||
| 267 | files = rcu_dereference_raw(tp->files); | ||
| 268 | if (files) { | ||
| 269 | for (i = 0; files[i]; i++) | ||
| 270 | if (files[i] == file) | ||
| 259 | return i; | 271 | return i; |
| 260 | } | 272 | } |
| 261 | 273 | ||
| @@ -274,10 +286,11 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 274 | mutex_lock(&probe_enable_lock); | 286 | mutex_lock(&probe_enable_lock); |
| 275 | 287 | ||
| 276 | if (file) { | 288 | if (file) { |
| 277 | struct ftrace_event_file **new, **old = tp->files; | 289 | struct ftrace_event_file **new, **old; |
| 278 | int n = trace_probe_nr_files(tp); | 290 | int n = trace_probe_nr_files(tp); |
| 279 | int i, j; | 291 | int i, j; |
| 280 | 292 | ||
| 293 | old = rcu_dereference_raw(tp->files); | ||
| 281 | if (n == 0 || trace_probe_file_index(tp, file) < 0) { | 294 | if (n == 0 || trace_probe_file_index(tp, file) < 0) { |
| 282 | ret = -EINVAL; | 295 | ret = -EINVAL; |
| 283 | goto out_unlock; | 296 | goto out_unlock; |
| @@ -872,9 +885,16 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, | |||
| 872 | static __kprobes void | 885 | static __kprobes void |
| 873 | kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) | 886 | kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) |
| 874 | { | 887 | { |
| 875 | struct ftrace_event_file **file = tp->files; | 888 | /* |
| 889 | * Note: preempt is already disabled around the kprobe handler. | ||
| 890 | * However, we still need an smp_read_barrier_depends() corresponding | ||
| 891 | * to smp_wmb() in rcu_assign_pointer() to access the pointer. | ||
| 892 | */ | ||
| 893 | struct ftrace_event_file **file = rcu_dereference_raw(tp->files); | ||
| 894 | |||
| 895 | if (unlikely(!file)) | ||
| 896 | return; | ||
| 876 | 897 | ||
| 877 | /* Note: preempt is already disabled around the kprobe handler */ | ||
| 878 | while (*file) { | 898 | while (*file) { |
| 879 | __kprobe_trace_func(tp, regs, *file); | 899 | __kprobe_trace_func(tp, regs, *file); |
| 880 | file++; | 900 | file++; |
| @@ -925,9 +945,16 @@ static __kprobes void | |||
| 925 | kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | 945 | kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, |
| 926 | struct pt_regs *regs) | 946 | struct pt_regs *regs) |
| 927 | { | 947 | { |
| 928 | struct ftrace_event_file **file = tp->files; | 948 | /* |
| 949 | * Note: preempt is already disabled around the kprobe handler. | ||
| 950 | * However, we still need an smp_read_barrier_depends() corresponding | ||
| 951 | * to smp_wmb() in rcu_assign_pointer() to access the pointer. | ||
| 952 | */ | ||
| 953 | struct ftrace_event_file **file = rcu_dereference_raw(tp->files); | ||
| 954 | |||
| 955 | if (unlikely(!file)) | ||
| 956 | return; | ||
| 929 | 957 | ||
| 930 | /* Note: preempt is already disabled around the kprobe handler */ | ||
| 931 | while (*file) { | 958 | while (*file) { |
| 932 | __kretprobe_trace_func(tp, ri, regs, *file); | 959 | __kretprobe_trace_func(tp, ri, regs, *file); |
| 933 | file++; | 960 | file++; |
| @@ -935,7 +962,7 @@ kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
| 935 | } | 962 | } |
| 936 | 963 | ||
| 937 | /* Event entry printers */ | 964 | /* Event entry printers */ |
| 938 | enum print_line_t | 965 | static enum print_line_t |
| 939 | print_kprobe_event(struct trace_iterator *iter, int flags, | 966 | print_kprobe_event(struct trace_iterator *iter, int flags, |
| 940 | struct trace_event *event) | 967 | struct trace_event *event) |
| 941 | { | 968 | { |
| @@ -971,7 +998,7 @@ partial: | |||
| 971 | return TRACE_TYPE_PARTIAL_LINE; | 998 | return TRACE_TYPE_PARTIAL_LINE; |
| 972 | } | 999 | } |
| 973 | 1000 | ||
| 974 | enum print_line_t | 1001 | static enum print_line_t |
| 975 | print_kretprobe_event(struct trace_iterator *iter, int flags, | 1002 | print_kretprobe_event(struct trace_iterator *iter, int flags, |
| 976 | struct trace_event *event) | 1003 | struct trace_event *event) |
| 977 | { | 1004 | { |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4aa9f5bc6b2d..ee8e29a2320c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -296,7 +296,7 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); | |||
| 296 | static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; | 296 | static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; |
| 297 | 297 | ||
| 298 | struct workqueue_struct *system_wq __read_mostly; | 298 | struct workqueue_struct *system_wq __read_mostly; |
| 299 | EXPORT_SYMBOL_GPL(system_wq); | 299 | EXPORT_SYMBOL(system_wq); |
| 300 | struct workqueue_struct *system_highpri_wq __read_mostly; | 300 | struct workqueue_struct *system_highpri_wq __read_mostly; |
| 301 | EXPORT_SYMBOL_GPL(system_highpri_wq); | 301 | EXPORT_SYMBOL_GPL(system_highpri_wq); |
| 302 | struct workqueue_struct *system_long_wq __read_mostly; | 302 | struct workqueue_struct *system_long_wq __read_mostly; |
| @@ -1411,7 +1411,7 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq, | |||
| 1411 | local_irq_restore(flags); | 1411 | local_irq_restore(flags); |
| 1412 | return ret; | 1412 | return ret; |
| 1413 | } | 1413 | } |
| 1414 | EXPORT_SYMBOL_GPL(queue_work_on); | 1414 | EXPORT_SYMBOL(queue_work_on); |
| 1415 | 1415 | ||
| 1416 | void delayed_work_timer_fn(unsigned long __data) | 1416 | void delayed_work_timer_fn(unsigned long __data) |
| 1417 | { | 1417 | { |
| @@ -1485,7 +1485,7 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
| 1485 | local_irq_restore(flags); | 1485 | local_irq_restore(flags); |
| 1486 | return ret; | 1486 | return ret; |
| 1487 | } | 1487 | } |
| 1488 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | 1488 | EXPORT_SYMBOL(queue_delayed_work_on); |
| 1489 | 1489 | ||
| 1490 | /** | 1490 | /** |
| 1491 | * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU | 1491 | * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU |
| @@ -2059,6 +2059,7 @@ static bool manage_workers(struct worker *worker) | |||
| 2059 | if (unlikely(!mutex_trylock(&pool->manager_mutex))) { | 2059 | if (unlikely(!mutex_trylock(&pool->manager_mutex))) { |
| 2060 | spin_unlock_irq(&pool->lock); | 2060 | spin_unlock_irq(&pool->lock); |
| 2061 | mutex_lock(&pool->manager_mutex); | 2061 | mutex_lock(&pool->manager_mutex); |
| 2062 | spin_lock_irq(&pool->lock); | ||
| 2062 | ret = true; | 2063 | ret = true; |
| 2063 | } | 2064 | } |
| 2064 | 2065 | ||
| @@ -4311,6 +4312,12 @@ bool current_is_workqueue_rescuer(void) | |||
| 4311 | * no synchronization around this function and the test result is | 4312 | * no synchronization around this function and the test result is |
| 4312 | * unreliable and only useful as advisory hints or for debugging. | 4313 | * unreliable and only useful as advisory hints or for debugging. |
| 4313 | * | 4314 | * |
| 4315 | * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU. | ||
| 4316 | * Note that both per-cpu and unbound workqueues may be associated with | ||
| 4317 | * multiple pool_workqueues which have separate congested states. A | ||
| 4318 | * workqueue being congested on one CPU doesn't mean the workqueue is also | ||
| 4319 | * contested on other CPUs / NUMA nodes. | ||
| 4320 | * | ||
| 4314 | * RETURNS: | 4321 | * RETURNS: |
| 4315 | * %true if congested, %false otherwise. | 4322 | * %true if congested, %false otherwise. |
| 4316 | */ | 4323 | */ |
| @@ -4321,6 +4328,9 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) | |||
| 4321 | 4328 | ||
| 4322 | rcu_read_lock_sched(); | 4329 | rcu_read_lock_sched(); |
| 4323 | 4330 | ||
| 4331 | if (cpu == WORK_CPU_UNBOUND) | ||
| 4332 | cpu = smp_processor_id(); | ||
| 4333 | |||
| 4324 | if (!(wq->flags & WQ_UNBOUND)) | 4334 | if (!(wq->flags & WQ_UNBOUND)) |
| 4325 | pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); | 4335 | pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); |
| 4326 | else | 4336 | else |
| @@ -4895,7 +4905,8 @@ static void __init wq_numa_init(void) | |||
| 4895 | BUG_ON(!tbl); | 4905 | BUG_ON(!tbl); |
| 4896 | 4906 | ||
| 4897 | for_each_node(node) | 4907 | for_each_node(node) |
| 4898 | BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node)); | 4908 | BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, |
| 4909 | node_online(node) ? node : NUMA_NO_NODE)); | ||
| 4899 | 4910 | ||
| 4900 | for_each_possible_cpu(cpu) { | 4911 | for_each_possible_cpu(cpu) { |
| 4901 | node = cpu_to_node(cpu); | 4912 | node = cpu_to_node(cpu); |
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 8e15d966d9b0..239992021b1d 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c | |||
| @@ -837,6 +837,19 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, | |||
| 837 | 837 | ||
| 838 | dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst); | 838 | dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst); |
| 839 | if (dat_entry) { | 839 | if (dat_entry) { |
| 840 | /* If the ARP request is destined for a local client the local | ||
| 841 | * client will answer itself. DAT would only generate a | ||
| 842 | * duplicate packet. | ||
| 843 | * | ||
| 844 | * Moreover, if the soft-interface is enslaved into a bridge, an | ||
| 845 | * additional DAT answer may trigger kernel warnings about | ||
| 846 | * a packet coming from the wrong port. | ||
| 847 | */ | ||
| 848 | if (batadv_is_my_client(bat_priv, dat_entry->mac_addr)) { | ||
| 849 | ret = true; | ||
| 850 | goto out; | ||
| 851 | } | ||
| 852 | |||
| 840 | skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src, | 853 | skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src, |
| 841 | bat_priv->soft_iface, ip_dst, hw_src, | 854 | bat_priv->soft_iface, ip_dst, hw_src, |
| 842 | dat_entry->mac_addr, hw_src); | 855 | dat_entry->mac_addr, hw_src); |
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 3e30a0f1b908..1240f07ad31d 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c | |||
| @@ -163,14 +163,22 @@ void batadv_mesh_free(struct net_device *soft_iface) | |||
| 163 | batadv_vis_quit(bat_priv); | 163 | batadv_vis_quit(bat_priv); |
| 164 | 164 | ||
| 165 | batadv_gw_node_purge(bat_priv); | 165 | batadv_gw_node_purge(bat_priv); |
| 166 | batadv_originator_free(bat_priv); | ||
| 167 | batadv_nc_free(bat_priv); | 166 | batadv_nc_free(bat_priv); |
| 167 | batadv_dat_free(bat_priv); | ||
| 168 | batadv_bla_free(bat_priv); | ||
| 168 | 169 | ||
| 170 | /* Free the TT and the originator tables only after having terminated | ||
| 171 | * all the other depending components which may use these structures for | ||
| 172 | * their purposes. | ||
| 173 | */ | ||
| 169 | batadv_tt_free(bat_priv); | 174 | batadv_tt_free(bat_priv); |
| 170 | 175 | ||
| 171 | batadv_bla_free(bat_priv); | 176 | /* Since the originator table clean up routine is accessing the TT |
| 172 | 177 | * tables as well, it has to be invoked after the TT tables have been | |
| 173 | batadv_dat_free(bat_priv); | 178 | * freed and marked as empty. This ensures that no cleanup RCU callbacks |
| 179 | * accessing the TT data are scheduled for later execution. | ||
| 180 | */ | ||
| 181 | batadv_originator_free(bat_priv); | ||
| 174 | 182 | ||
| 175 | free_percpu(bat_priv->bat_counters); | 183 | free_percpu(bat_priv->bat_counters); |
| 176 | 184 | ||
| @@ -475,7 +483,7 @@ static int batadv_param_set_ra(const char *val, const struct kernel_param *kp) | |||
| 475 | char *algo_name = (char *)val; | 483 | char *algo_name = (char *)val; |
| 476 | size_t name_len = strlen(algo_name); | 484 | size_t name_len = strlen(algo_name); |
| 477 | 485 | ||
| 478 | if (algo_name[name_len - 1] == '\n') | 486 | if (name_len > 0 && algo_name[name_len - 1] == '\n') |
| 479 | algo_name[name_len - 1] = '\0'; | 487 | algo_name[name_len - 1] = '\0'; |
| 480 | 488 | ||
| 481 | bat_algo_ops = batadv_algo_get(algo_name); | 489 | bat_algo_ops = batadv_algo_get(algo_name); |
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index f7c54305a918..e84629ece9b7 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c | |||
| @@ -1514,6 +1514,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
| 1514 | struct ethhdr *ethhdr, ethhdr_tmp; | 1514 | struct ethhdr *ethhdr, ethhdr_tmp; |
| 1515 | uint8_t *orig_dest, ttl, ttvn; | 1515 | uint8_t *orig_dest, ttl, ttvn; |
| 1516 | unsigned int coding_len; | 1516 | unsigned int coding_len; |
| 1517 | int err; | ||
| 1517 | 1518 | ||
| 1518 | /* Save headers temporarily */ | 1519 | /* Save headers temporarily */ |
| 1519 | memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp)); | 1520 | memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp)); |
| @@ -1568,8 +1569,11 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
| 1568 | coding_len); | 1569 | coding_len); |
| 1569 | 1570 | ||
| 1570 | /* Resize decoded skb if decoded with larger packet */ | 1571 | /* Resize decoded skb if decoded with larger packet */ |
| 1571 | if (nc_packet->skb->len > coding_len + h_size) | 1572 | if (nc_packet->skb->len > coding_len + h_size) { |
| 1572 | pskb_trim_rcsum(skb, coding_len + h_size); | 1573 | err = pskb_trim_rcsum(skb, coding_len + h_size); |
| 1574 | if (err) | ||
| 1575 | return NULL; | ||
| 1576 | } | ||
| 1573 | 1577 | ||
| 1574 | /* Create decoded unicast packet */ | 1578 | /* Create decoded unicast packet */ |
| 1575 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 1579 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index a3395fdfbd4f..d5953b87918c 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
| @@ -1204,6 +1204,7 @@ void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc, | |||
| 1204 | mutex_lock(&osdc->request_mutex); | 1204 | mutex_lock(&osdc->request_mutex); |
| 1205 | if (req->r_linger) { | 1205 | if (req->r_linger) { |
| 1206 | __unregister_linger_request(osdc, req); | 1206 | __unregister_linger_request(osdc, req); |
| 1207 | req->r_linger = 0; | ||
| 1207 | ceph_osdc_put_request(req); | 1208 | ceph_osdc_put_request(req); |
| 1208 | } | 1209 | } |
| 1209 | mutex_unlock(&osdc->request_mutex); | 1210 | mutex_unlock(&osdc->request_mutex); |
| @@ -2120,7 +2121,9 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, | |||
| 2120 | down_read(&osdc->map_sem); | 2121 | down_read(&osdc->map_sem); |
| 2121 | mutex_lock(&osdc->request_mutex); | 2122 | mutex_lock(&osdc->request_mutex); |
| 2122 | __register_request(osdc, req); | 2123 | __register_request(osdc, req); |
| 2123 | WARN_ON(req->r_sent); | 2124 | req->r_sent = 0; |
| 2125 | req->r_got_reply = 0; | ||
| 2126 | req->r_completed = 0; | ||
| 2124 | rc = __map_request(osdc, req, 0); | 2127 | rc = __map_request(osdc, req, 0); |
| 2125 | if (rc < 0) { | 2128 | if (rc < 0) { |
| 2126 | if (nofail) { | 2129 | if (nofail) { |
diff --git a/net/core/sock.c b/net/core/sock.c index d4f4cea726e7..6ba327da79e1 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -1217,18 +1217,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk) | |||
| 1217 | #endif | 1217 | #endif |
| 1218 | } | 1218 | } |
| 1219 | 1219 | ||
| 1220 | /* | ||
| 1221 | * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes | ||
| 1222 | * un-modified. Special care is taken when initializing object to zero. | ||
| 1223 | */ | ||
| 1224 | static inline void sk_prot_clear_nulls(struct sock *sk, int size) | ||
| 1225 | { | ||
| 1226 | if (offsetof(struct sock, sk_node.next) != 0) | ||
| 1227 | memset(sk, 0, offsetof(struct sock, sk_node.next)); | ||
| 1228 | memset(&sk->sk_node.pprev, 0, | ||
| 1229 | size - offsetof(struct sock, sk_node.pprev)); | ||
| 1230 | } | ||
| 1231 | |||
| 1232 | void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) | 1220 | void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) |
| 1233 | { | 1221 | { |
| 1234 | unsigned long nulls1, nulls2; | 1222 | unsigned long nulls1, nulls2; |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 147abf5275aa..4bcabf3ab4ca 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
| @@ -84,7 +84,7 @@ int sysctl_ip_default_ttl __read_mostly = IPDEFTTL; | |||
| 84 | EXPORT_SYMBOL(sysctl_ip_default_ttl); | 84 | EXPORT_SYMBOL(sysctl_ip_default_ttl); |
| 85 | 85 | ||
| 86 | /* Generate a checksum for an outgoing IP datagram. */ | 86 | /* Generate a checksum for an outgoing IP datagram. */ |
| 87 | __inline__ void ip_send_check(struct iphdr *iph) | 87 | void ip_send_check(struct iphdr *iph) |
| 88 | { | 88 | { |
| 89 | iph->check = 0; | 89 | iph->check = 0; |
| 90 | iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); | 90 | iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index d3ddd8400354..ecd60733e5e2 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
| @@ -1081,6 +1081,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev, | |||
| 1081 | } | 1081 | } |
| 1082 | if (t == NULL) | 1082 | if (t == NULL) |
| 1083 | t = netdev_priv(dev); | 1083 | t = netdev_priv(dev); |
| 1084 | memset(&p, 0, sizeof(p)); | ||
| 1084 | ip6gre_tnl_parm_to_user(&p, &t->parms); | 1085 | ip6gre_tnl_parm_to_user(&p, &t->parms); |
| 1085 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) | 1086 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) |
| 1086 | err = -EFAULT; | 1087 | err = -EFAULT; |
| @@ -1128,6 +1129,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev, | |||
| 1128 | if (t) { | 1129 | if (t) { |
| 1129 | err = 0; | 1130 | err = 0; |
| 1130 | 1131 | ||
| 1132 | memset(&p, 0, sizeof(p)); | ||
| 1131 | ip6gre_tnl_parm_to_user(&p, &t->parms); | 1133 | ip6gre_tnl_parm_to_user(&p, &t->parms); |
| 1132 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) | 1134 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) |
| 1133 | err = -EFAULT; | 1135 | err = -EFAULT; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 71167069b394..0a17ed9eaf39 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -1890,6 +1890,17 @@ void tcp6_proc_exit(struct net *net) | |||
| 1890 | } | 1890 | } |
| 1891 | #endif | 1891 | #endif |
| 1892 | 1892 | ||
| 1893 | static void tcp_v6_clear_sk(struct sock *sk, int size) | ||
| 1894 | { | ||
| 1895 | struct inet_sock *inet = inet_sk(sk); | ||
| 1896 | |||
| 1897 | /* we do not want to clear pinet6 field, because of RCU lookups */ | ||
| 1898 | sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6)); | ||
| 1899 | |||
| 1900 | size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6); | ||
| 1901 | memset(&inet->pinet6 + 1, 0, size); | ||
| 1902 | } | ||
| 1903 | |||
| 1893 | struct proto tcpv6_prot = { | 1904 | struct proto tcpv6_prot = { |
| 1894 | .name = "TCPv6", | 1905 | .name = "TCPv6", |
| 1895 | .owner = THIS_MODULE, | 1906 | .owner = THIS_MODULE, |
| @@ -1933,6 +1944,7 @@ struct proto tcpv6_prot = { | |||
| 1933 | #ifdef CONFIG_MEMCG_KMEM | 1944 | #ifdef CONFIG_MEMCG_KMEM |
| 1934 | .proto_cgroup = tcp_proto_cgroup, | 1945 | .proto_cgroup = tcp_proto_cgroup, |
| 1935 | #endif | 1946 | #endif |
| 1947 | .clear_sk = tcp_v6_clear_sk, | ||
| 1936 | }; | 1948 | }; |
| 1937 | 1949 | ||
| 1938 | static const struct inet6_protocol tcpv6_protocol = { | 1950 | static const struct inet6_protocol tcpv6_protocol = { |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index d4defdd44937..42923b14dfa6 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
| @@ -1432,6 +1432,17 @@ void udp6_proc_exit(struct net *net) { | |||
| 1432 | } | 1432 | } |
| 1433 | #endif /* CONFIG_PROC_FS */ | 1433 | #endif /* CONFIG_PROC_FS */ |
| 1434 | 1434 | ||
| 1435 | void udp_v6_clear_sk(struct sock *sk, int size) | ||
| 1436 | { | ||
| 1437 | struct inet_sock *inet = inet_sk(sk); | ||
| 1438 | |||
| 1439 | /* we do not want to clear pinet6 field, because of RCU lookups */ | ||
| 1440 | sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6)); | ||
| 1441 | |||
| 1442 | size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6); | ||
| 1443 | memset(&inet->pinet6 + 1, 0, size); | ||
| 1444 | } | ||
| 1445 | |||
| 1435 | /* ------------------------------------------------------------------------ */ | 1446 | /* ------------------------------------------------------------------------ */ |
| 1436 | 1447 | ||
| 1437 | struct proto udpv6_prot = { | 1448 | struct proto udpv6_prot = { |
| @@ -1462,7 +1473,7 @@ struct proto udpv6_prot = { | |||
| 1462 | .compat_setsockopt = compat_udpv6_setsockopt, | 1473 | .compat_setsockopt = compat_udpv6_setsockopt, |
| 1463 | .compat_getsockopt = compat_udpv6_getsockopt, | 1474 | .compat_getsockopt = compat_udpv6_getsockopt, |
| 1464 | #endif | 1475 | #endif |
| 1465 | .clear_sk = sk_prot_clear_portaddr_nulls, | 1476 | .clear_sk = udp_v6_clear_sk, |
| 1466 | }; | 1477 | }; |
| 1467 | 1478 | ||
| 1468 | static struct inet_protosw udpv6_protosw = { | 1479 | static struct inet_protosw udpv6_protosw = { |
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index d7571046bfc4..4691ed50a928 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h | |||
| @@ -31,6 +31,8 @@ extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
| 31 | extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); | 31 | extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); |
| 32 | extern void udpv6_destroy_sock(struct sock *sk); | 32 | extern void udpv6_destroy_sock(struct sock *sk); |
| 33 | 33 | ||
| 34 | extern void udp_v6_clear_sk(struct sock *sk, int size); | ||
| 35 | |||
| 34 | #ifdef CONFIG_PROC_FS | 36 | #ifdef CONFIG_PROC_FS |
| 35 | extern int udp6_seq_show(struct seq_file *seq, void *v); | 37 | extern int udp6_seq_show(struct seq_file *seq, void *v); |
| 36 | #endif | 38 | #endif |
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index 1d08e21d9f69..dfcc4be46898 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c | |||
| @@ -56,7 +56,7 @@ struct proto udplitev6_prot = { | |||
| 56 | .compat_setsockopt = compat_udpv6_setsockopt, | 56 | .compat_setsockopt = compat_udpv6_setsockopt, |
| 57 | .compat_getsockopt = compat_udpv6_getsockopt, | 57 | .compat_getsockopt = compat_udpv6_getsockopt, |
| 58 | #endif | 58 | #endif |
| 59 | .clear_sk = sk_prot_clear_portaddr_nulls, | 59 | .clear_sk = udp_v6_clear_sk, |
| 60 | }; | 60 | }; |
| 61 | 61 | ||
| 62 | static struct inet_protosw udplite6_protosw = { | 62 | static struct inet_protosw udplite6_protosw = { |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 4ef7bdb65440..23ed03d786c8 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
| @@ -103,8 +103,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, | |||
| 103 | dev_hold(dev); | 103 | dev_hold(dev); |
| 104 | 104 | ||
| 105 | xdst->u.rt6.rt6i_idev = in6_dev_get(dev); | 105 | xdst->u.rt6.rt6i_idev = in6_dev_get(dev); |
| 106 | if (!xdst->u.rt6.rt6i_idev) | 106 | if (!xdst->u.rt6.rt6i_idev) { |
| 107 | dev_put(dev); | ||
| 107 | return -ENODEV; | 108 | return -ENODEV; |
| 109 | } | ||
| 108 | 110 | ||
| 109 | rt6_transfer_peer(&xdst->u.rt6, rt); | 111 | rt6_transfer_peer(&xdst->u.rt6, rt); |
| 110 | 112 | ||
diff --git a/scripts/package/Makefile b/scripts/package/Makefile index 84a406070f6f..a4f31c900fa6 100644 --- a/scripts/package/Makefile +++ b/scripts/package/Makefile | |||
| @@ -63,7 +63,7 @@ binrpm-pkg: FORCE | |||
| 63 | mv -f $(objtree)/.tmp_version $(objtree)/.version | 63 | mv -f $(objtree)/.tmp_version $(objtree)/.version |
| 64 | 64 | ||
| 65 | $(RPM) $(RPMOPTS) --define "_builddir $(objtree)" --target \ | 65 | $(RPM) $(RPMOPTS) --define "_builddir $(objtree)" --target \ |
| 66 | $(UTS_MACHINE) -bb $< | 66 | $(UTS_MACHINE) -bb $(objtree)/binkernel.spec |
| 67 | rm binkernel.spec | 67 | rm binkernel.spec |
| 68 | 68 | ||
| 69 | # Deb target | 69 | # Deb target |
diff --git a/sound/aoa/fabrics/layout.c b/sound/aoa/fabrics/layout.c index 552b97afbca5..61ab640e195f 100644 --- a/sound/aoa/fabrics/layout.c +++ b/sound/aoa/fabrics/layout.c | |||
| @@ -113,6 +113,7 @@ MODULE_ALIAS("sound-layout-100"); | |||
| 113 | MODULE_ALIAS("aoa-device-id-14"); | 113 | MODULE_ALIAS("aoa-device-id-14"); |
| 114 | MODULE_ALIAS("aoa-device-id-22"); | 114 | MODULE_ALIAS("aoa-device-id-22"); |
| 115 | MODULE_ALIAS("aoa-device-id-35"); | 115 | MODULE_ALIAS("aoa-device-id-35"); |
| 116 | MODULE_ALIAS("aoa-device-id-44"); | ||
| 116 | 117 | ||
| 117 | /* onyx with all but microphone connected */ | 118 | /* onyx with all but microphone connected */ |
| 118 | static struct codec_connection onyx_connections_nomic[] = { | 119 | static struct codec_connection onyx_connections_nomic[] = { |
| @@ -361,6 +362,13 @@ static struct layout layouts[] = { | |||
| 361 | .connections = tas_connections_nolineout, | 362 | .connections = tas_connections_nolineout, |
| 362 | }, | 363 | }, |
| 363 | }, | 364 | }, |
| 365 | /* PowerBook6,5 */ | ||
| 366 | { .device_id = 44, | ||
| 367 | .codecs[0] = { | ||
| 368 | .name = "tas", | ||
| 369 | .connections = tas_connections_all, | ||
| 370 | }, | ||
| 371 | }, | ||
| 364 | /* PowerBook6,7 */ | 372 | /* PowerBook6,7 */ |
| 365 | { .layout_id = 80, | 373 | { .layout_id = 80, |
| 366 | .codecs[0] = { | 374 | .codecs[0] = { |
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c index 010658335881..15e76131b501 100644 --- a/sound/aoa/soundbus/i2sbus/core.c +++ b/sound/aoa/soundbus/i2sbus/core.c | |||
| @@ -200,7 +200,8 @@ static int i2sbus_add_dev(struct macio_dev *macio, | |||
| 200 | * We probably cannot handle all device-id machines, | 200 | * We probably cannot handle all device-id machines, |
| 201 | * so restrict to those we do handle for now. | 201 | * so restrict to those we do handle for now. |
| 202 | */ | 202 | */ |
| 203 | if (id && (*id == 22 || *id == 14 || *id == 35)) { | 203 | if (id && (*id == 22 || *id == 14 || *id == 35 || |
| 204 | *id == 44)) { | ||
| 204 | snprintf(dev->sound.modalias, 32, | 205 | snprintf(dev->sound.modalias, 32, |
| 205 | "aoa-device-id-%d", *id); | 206 | "aoa-device-id-%d", *id); |
| 206 | ok = 1; | 207 | ok = 1; |
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig index 51c4ba95a32d..1a9640254433 100644 --- a/sound/oss/Kconfig +++ b/sound/oss/Kconfig | |||
| @@ -250,7 +250,7 @@ config MSND_FIFOSIZE | |||
| 250 | menuconfig SOUND_OSS | 250 | menuconfig SOUND_OSS |
| 251 | tristate "OSS sound modules" | 251 | tristate "OSS sound modules" |
| 252 | depends on ISA_DMA_API && VIRT_TO_BUS | 252 | depends on ISA_DMA_API && VIRT_TO_BUS |
| 253 | depends on !ISA_DMA_SUPPORT_BROKEN | 253 | depends on !GENERIC_ISA_DMA_SUPPORT_BROKEN |
| 254 | help | 254 | help |
| 255 | OSS is the Open Sound System suite of sound card drivers. They make | 255 | OSS is the Open Sound System suite of sound card drivers. They make |
| 256 | sound programming easier since they provide a common API. Say Y or | 256 | sound programming easier since they provide a common API. Say Y or |
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index ac079f93c535..ae85bbd2e6f8 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c | |||
| @@ -606,6 +606,10 @@ static bool is_active_nid(struct hda_codec *codec, hda_nid_t nid, | |||
| 606 | return false; | 606 | return false; |
| 607 | } | 607 | } |
| 608 | 608 | ||
| 609 | /* check whether the NID is referred by any active paths */ | ||
| 610 | #define is_active_nid_for_any(codec, nid) \ | ||
| 611 | is_active_nid(codec, nid, HDA_OUTPUT, 0) | ||
| 612 | |||
| 609 | /* get the default amp value for the target state */ | 613 | /* get the default amp value for the target state */ |
| 610 | static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid, | 614 | static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid, |
| 611 | int dir, unsigned int caps, bool enable) | 615 | int dir, unsigned int caps, bool enable) |
| @@ -759,7 +763,8 @@ static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path) | |||
| 759 | 763 | ||
| 760 | for (i = 0; i < path->depth; i++) { | 764 | for (i = 0; i < path->depth; i++) { |
| 761 | hda_nid_t nid = path->path[i]; | 765 | hda_nid_t nid = path->path[i]; |
| 762 | if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D3)) { | 766 | if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D3) && |
| 767 | !is_active_nid_for_any(codec, nid)) { | ||
| 763 | snd_hda_codec_write(codec, nid, 0, | 768 | snd_hda_codec_write(codec, nid, 0, |
| 764 | AC_VERB_SET_POWER_STATE, | 769 | AC_VERB_SET_POWER_STATE, |
| 765 | AC_PWRST_D3); | 770 | AC_PWRST_D3); |
| @@ -4157,7 +4162,7 @@ static unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec, | |||
| 4157 | return power_state; | 4162 | return power_state; |
| 4158 | if (get_wcaps_type(get_wcaps(codec, nid)) >= AC_WID_POWER) | 4163 | if (get_wcaps_type(get_wcaps(codec, nid)) >= AC_WID_POWER) |
| 4159 | return power_state; | 4164 | return power_state; |
| 4160 | if (is_active_nid(codec, nid, HDA_OUTPUT, 0)) | 4165 | if (is_active_nid_for_any(codec, nid)) |
| 4161 | return power_state; | 4166 | return power_state; |
| 4162 | return AC_PWRST_D3; | 4167 | return AC_PWRST_D3; |
| 4163 | } | 4168 | } |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 6bf47f7326ad..59d2e91a9ab6 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -3482,6 +3482,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 3482 | SND_PCI_QUIRK(0x1028, 0x05c9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | 3482 | SND_PCI_QUIRK(0x1028, 0x05c9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), |
| 3483 | SND_PCI_QUIRK(0x1028, 0x05ca, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), | 3483 | SND_PCI_QUIRK(0x1028, 0x05ca, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), |
| 3484 | SND_PCI_QUIRK(0x1028, 0x05cb, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), | 3484 | SND_PCI_QUIRK(0x1028, 0x05cb, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), |
| 3485 | SND_PCI_QUIRK(0x1028, 0x05de, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), | ||
| 3485 | SND_PCI_QUIRK(0x1028, 0x05e9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | 3486 | SND_PCI_QUIRK(0x1028, 0x05e9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), |
| 3486 | SND_PCI_QUIRK(0x1028, 0x05ea, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | 3487 | SND_PCI_QUIRK(0x1028, 0x05ea, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), |
| 3487 | SND_PCI_QUIRK(0x1028, 0x05eb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | 3488 | SND_PCI_QUIRK(0x1028, 0x05eb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), |
diff --git a/sound/soc/codecs/ab8500-codec.h b/sound/soc/codecs/ab8500-codec.h index 114f69a0c629..306d0bc8455f 100644 --- a/sound/soc/codecs/ab8500-codec.h +++ b/sound/soc/codecs/ab8500-codec.h | |||
| @@ -348,25 +348,25 @@ | |||
| 348 | 348 | ||
| 349 | /* AB8500_ADSLOTSELX */ | 349 | /* AB8500_ADSLOTSELX */ |
| 350 | #define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_ODD 0x00 | 350 | #define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_ODD 0x00 |
| 351 | #define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD 0x01 | 351 | #define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD 0x10 |
| 352 | #define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD 0x02 | 352 | #define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD 0x20 |
| 353 | #define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_ODD 0x03 | 353 | #define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_ODD 0x30 |
| 354 | #define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_ODD 0x04 | 354 | #define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_ODD 0x40 |
| 355 | #define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_ODD 0x05 | 355 | #define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_ODD 0x50 |
| 356 | #define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_ODD 0x06 | 356 | #define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_ODD 0x60 |
| 357 | #define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_ODD 0x07 | 357 | #define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_ODD 0x70 |
| 358 | #define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_ODD 0x08 | 358 | #define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_ODD 0x80 |
| 359 | #define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_ODD 0x0F | 359 | #define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_ODD 0xF0 |
| 360 | #define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_EVEN 0x00 | 360 | #define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_EVEN 0x00 |
| 361 | #define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_EVEN 0x10 | 361 | #define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_EVEN 0x01 |
| 362 | #define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN 0x20 | 362 | #define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN 0x02 |
| 363 | #define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_EVEN 0x30 | 363 | #define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_EVEN 0x03 |
| 364 | #define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_EVEN 0x40 | 364 | #define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_EVEN 0x04 |
| 365 | #define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_EVEN 0x50 | 365 | #define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_EVEN 0x05 |
| 366 | #define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_EVEN 0x60 | 366 | #define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_EVEN 0x06 |
| 367 | #define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_EVEN 0x70 | 367 | #define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_EVEN 0x07 |
| 368 | #define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_EVEN 0x80 | 368 | #define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_EVEN 0x08 |
| 369 | #define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_EVEN 0xF0 | 369 | #define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_EVEN 0x0F |
| 370 | #define AB8500_ADSLOTSELX_EVEN_SHIFT 0 | 370 | #define AB8500_ADSLOTSELX_EVEN_SHIFT 0 |
| 371 | #define AB8500_ADSLOTSELX_ODD_SHIFT 4 | 371 | #define AB8500_ADSLOTSELX_ODD_SHIFT 4 |
| 372 | 372 | ||
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c index 41230ad1c3e0..4a6f1daf911f 100644 --- a/sound/soc/codecs/da7213.c +++ b/sound/soc/codecs/da7213.c | |||
| @@ -1488,17 +1488,17 @@ static int da7213_probe(struct snd_soc_codec *codec) | |||
| 1488 | DA7213_DMIC_DATA_SEL_SHIFT); | 1488 | DA7213_DMIC_DATA_SEL_SHIFT); |
| 1489 | break; | 1489 | break; |
| 1490 | } | 1490 | } |
| 1491 | switch (pdata->dmic_data_sel) { | 1491 | switch (pdata->dmic_samplephase) { |
| 1492 | case DA7213_DMIC_SAMPLE_ON_CLKEDGE: | 1492 | case DA7213_DMIC_SAMPLE_ON_CLKEDGE: |
| 1493 | case DA7213_DMIC_SAMPLE_BETWEEN_CLKEDGE: | 1493 | case DA7213_DMIC_SAMPLE_BETWEEN_CLKEDGE: |
| 1494 | dmic_cfg |= (pdata->dmic_data_sel << | 1494 | dmic_cfg |= (pdata->dmic_samplephase << |
| 1495 | DA7213_DMIC_SAMPLEPHASE_SHIFT); | 1495 | DA7213_DMIC_SAMPLEPHASE_SHIFT); |
| 1496 | break; | 1496 | break; |
| 1497 | } | 1497 | } |
| 1498 | switch (pdata->dmic_data_sel) { | 1498 | switch (pdata->dmic_clk_rate) { |
| 1499 | case DA7213_DMIC_CLK_3_0MHZ: | 1499 | case DA7213_DMIC_CLK_3_0MHZ: |
| 1500 | case DA7213_DMIC_CLK_1_5MHZ: | 1500 | case DA7213_DMIC_CLK_1_5MHZ: |
| 1501 | dmic_cfg |= (pdata->dmic_data_sel << | 1501 | dmic_cfg |= (pdata->dmic_clk_rate << |
| 1502 | DA7213_DMIC_CLK_RATE_SHIFT); | 1502 | DA7213_DMIC_CLK_RATE_SHIFT); |
| 1503 | break; | 1503 | break; |
| 1504 | } | 1504 | } |
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c index 8df2b6e1a1a6..370af0cbcc9a 100644 --- a/sound/soc/codecs/wm0010.c +++ b/sound/soc/codecs/wm0010.c | |||
| @@ -667,6 +667,7 @@ static int wm0010_boot(struct snd_soc_codec *codec) | |||
| 667 | /* On wm0010 only the CLKCTRL1 value is used */ | 667 | /* On wm0010 only the CLKCTRL1 value is used */ |
| 668 | pll_rec.clkctrl1 = wm0010->pll_clkctrl1; | 668 | pll_rec.clkctrl1 = wm0010->pll_clkctrl1; |
| 669 | 669 | ||
| 670 | ret = -ENOMEM; | ||
| 670 | len = pll_rec.length + 8; | 671 | len = pll_rec.length + 8; |
| 671 | out = kzalloc(len, GFP_KERNEL); | 672 | out = kzalloc(len, GFP_KERNEL); |
| 672 | if (!out) { | 673 | if (!out) { |
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c index 902fab02b851..c6fa03e2114a 100644 --- a/sound/soc/fsl/imx-ssi.c +++ b/sound/soc/fsl/imx-ssi.c | |||
| @@ -540,11 +540,6 @@ static int imx_ssi_probe(struct platform_device *pdev) | |||
| 540 | clk_prepare_enable(ssi->clk); | 540 | clk_prepare_enable(ssi->clk); |
| 541 | 541 | ||
| 542 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 542 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 543 | if (!res) { | ||
| 544 | ret = -ENODEV; | ||
| 545 | goto failed_get_resource; | ||
| 546 | } | ||
| 547 | |||
| 548 | ssi->base = devm_ioremap_resource(&pdev->dev, res); | 543 | ssi->base = devm_ioremap_resource(&pdev->dev, res); |
| 549 | if (IS_ERR(ssi->base)) { | 544 | if (IS_ERR(ssi->base)) { |
| 550 | ret = PTR_ERR(ssi->base); | 545 | ret = PTR_ERR(ssi->base); |
| @@ -633,7 +628,6 @@ failed_pdev_fiq_alloc: | |||
| 633 | snd_soc_unregister_component(&pdev->dev); | 628 | snd_soc_unregister_component(&pdev->dev); |
| 634 | failed_register: | 629 | failed_register: |
| 635 | release_mem_region(res->start, resource_size(res)); | 630 | release_mem_region(res->start, resource_size(res)); |
| 636 | failed_get_resource: | ||
| 637 | clk_disable_unprepare(ssi->clk); | 631 | clk_disable_unprepare(ssi->clk); |
| 638 | failed_clk: | 632 | failed_clk: |
| 639 | 633 | ||
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c index befe68f59285..4c9dad3263c5 100644 --- a/sound/soc/kirkwood/kirkwood-i2s.c +++ b/sound/soc/kirkwood/kirkwood-i2s.c | |||
| @@ -471,11 +471,6 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev) | |||
| 471 | dev_set_drvdata(&pdev->dev, priv); | 471 | dev_set_drvdata(&pdev->dev, priv); |
| 472 | 472 | ||
| 473 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 473 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 474 | if (!mem) { | ||
| 475 | dev_err(&pdev->dev, "platform_get_resource failed\n"); | ||
| 476 | return -ENXIO; | ||
| 477 | } | ||
| 478 | |||
| 479 | priv->io = devm_ioremap_resource(&pdev->dev, mem); | 474 | priv->io = devm_ioremap_resource(&pdev->dev, mem); |
| 480 | if (IS_ERR(priv->io)) | 475 | if (IS_ERR(priv->io)) |
| 481 | return PTR_ERR(priv->io); | 476 | return PTR_ERR(priv->io); |
diff --git a/sound/usb/proc.c b/sound/usb/proc.c index 135c76871063..5f761ab34c01 100644 --- a/sound/usb/proc.c +++ b/sound/usb/proc.c | |||
| @@ -116,21 +116,22 @@ static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct s | |||
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | static void proc_dump_ep_status(struct snd_usb_substream *subs, | 118 | static void proc_dump_ep_status(struct snd_usb_substream *subs, |
| 119 | struct snd_usb_endpoint *ep, | 119 | struct snd_usb_endpoint *data_ep, |
| 120 | struct snd_usb_endpoint *sync_ep, | ||
| 120 | struct snd_info_buffer *buffer) | 121 | struct snd_info_buffer *buffer) |
| 121 | { | 122 | { |
| 122 | if (!ep) | 123 | if (!data_ep) |
| 123 | return; | 124 | return; |
| 124 | snd_iprintf(buffer, " Packet Size = %d\n", ep->curpacksize); | 125 | snd_iprintf(buffer, " Packet Size = %d\n", data_ep->curpacksize); |
| 125 | snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n", | 126 | snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n", |
| 126 | subs->speed == USB_SPEED_FULL | 127 | subs->speed == USB_SPEED_FULL |
| 127 | ? get_full_speed_hz(ep->freqm) | 128 | ? get_full_speed_hz(data_ep->freqm) |
| 128 | : get_high_speed_hz(ep->freqm), | 129 | : get_high_speed_hz(data_ep->freqm), |
| 129 | ep->freqm >> 16, ep->freqm & 0xffff); | 130 | data_ep->freqm >> 16, data_ep->freqm & 0xffff); |
| 130 | if (ep->freqshift != INT_MIN) { | 131 | if (sync_ep && data_ep->freqshift != INT_MIN) { |
| 131 | int res = 16 - ep->freqshift; | 132 | int res = 16 - data_ep->freqshift; |
| 132 | snd_iprintf(buffer, " Feedback Format = %d.%d\n", | 133 | snd_iprintf(buffer, " Feedback Format = %d.%d\n", |
| 133 | (ep->syncmaxsize > 3 ? 32 : 24) - res, res); | 134 | (sync_ep->syncmaxsize > 3 ? 32 : 24) - res, res); |
| 134 | } | 135 | } |
| 135 | } | 136 | } |
| 136 | 137 | ||
| @@ -140,8 +141,7 @@ static void proc_dump_substream_status(struct snd_usb_substream *subs, struct sn | |||
| 140 | snd_iprintf(buffer, " Status: Running\n"); | 141 | snd_iprintf(buffer, " Status: Running\n"); |
| 141 | snd_iprintf(buffer, " Interface = %d\n", subs->interface); | 142 | snd_iprintf(buffer, " Interface = %d\n", subs->interface); |
| 142 | snd_iprintf(buffer, " Altset = %d\n", subs->altset_idx); | 143 | snd_iprintf(buffer, " Altset = %d\n", subs->altset_idx); |
| 143 | proc_dump_ep_status(subs, subs->data_endpoint, buffer); | 144 | proc_dump_ep_status(subs, subs->data_endpoint, subs->sync_endpoint, buffer); |
| 144 | proc_dump_ep_status(subs, subs->sync_endpoint, buffer); | ||
| 145 | } else { | 145 | } else { |
| 146 | snd_iprintf(buffer, " Status: Stop\n"); | 146 | snd_iprintf(buffer, " Status: Stop\n"); |
| 147 | } | 147 | } |
