diff options
611 files changed, 9659 insertions, 3636 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX index 1f89424c36a6..65bbd2622396 100644 --- a/Documentation/00-INDEX +++ b/Documentation/00-INDEX | |||
@@ -272,6 +272,8 @@ printk-formats.txt | |||
272 | - how to get printk format specifiers right | 272 | - how to get printk format specifiers right |
273 | prio_tree.txt | 273 | prio_tree.txt |
274 | - info on radix-priority-search-tree use for indexing vmas. | 274 | - info on radix-priority-search-tree use for indexing vmas. |
275 | ramoops.txt | ||
276 | - documentation of the ramoops oops/panic logging module. | ||
275 | rbtree.txt | 277 | rbtree.txt |
276 | - info on what red-black trees are and what they are for. | 278 | - info on what red-black trees are and what they are for. |
277 | robust-futex-ABI.txt | 279 | robust-futex-ABI.txt |
diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index 3f5e0b09bed5..53e6fca146d7 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt | |||
@@ -45,7 +45,7 @@ arrived in memory (this becomes more likely with devices behind PCI-PCI | |||
45 | bridges). In order to ensure that all the data has arrived in memory, | 45 | bridges). In order to ensure that all the data has arrived in memory, |
46 | the interrupt handler must read a register on the device which raised | 46 | the interrupt handler must read a register on the device which raised |
47 | the interrupt. PCI transaction ordering rules require that all the data | 47 | the interrupt. PCI transaction ordering rules require that all the data |
48 | arrives in memory before the value can be returned from the register. | 48 | arrive in memory before the value may be returned from the register. |
49 | Using MSIs avoids this problem as the interrupt-generating write cannot | 49 | Using MSIs avoids this problem as the interrupt-generating write cannot |
50 | pass the data writes, so by the time the interrupt is raised, the driver | 50 | pass the data writes, so by the time the interrupt is raised, the driver |
51 | knows that all the data has arrived in memory. | 51 | knows that all the data has arrived in memory. |
@@ -86,13 +86,13 @@ device. | |||
86 | 86 | ||
87 | int pci_enable_msi(struct pci_dev *dev) | 87 | int pci_enable_msi(struct pci_dev *dev) |
88 | 88 | ||
89 | A successful call will allocate ONE interrupt to the device, regardless | 89 | A successful call allocates ONE interrupt to the device, regardless |
90 | of how many MSIs the device supports. The device will be switched from | 90 | of how many MSIs the device supports. The device is switched from |
91 | pin-based interrupt mode to MSI mode. The dev->irq number is changed | 91 | pin-based interrupt mode to MSI mode. The dev->irq number is changed |
92 | to a new number which represents the message signaled interrupt. | 92 | to a new number which represents the message signaled interrupt; |
93 | This function should be called before the driver calls request_irq() | 93 | consequently, this function should be called before the driver calls |
94 | since enabling MSIs disables the pin-based IRQ and the driver will not | 94 | request_irq(), because an MSI is delivered via a vector that is |
95 | receive interrupts on the old interrupt. | 95 | different from the vector of a pin-based interrupt. |
96 | 96 | ||
97 | 4.2.2 pci_enable_msi_block | 97 | 4.2.2 pci_enable_msi_block |
98 | 98 | ||
@@ -111,20 +111,20 @@ the device are in the range dev->irq to dev->irq + count - 1. | |||
111 | 111 | ||
112 | If this function returns a negative number, it indicates an error and | 112 | If this function returns a negative number, it indicates an error and |
113 | the driver should not attempt to request any more MSI interrupts for | 113 | the driver should not attempt to request any more MSI interrupts for |
114 | this device. If this function returns a positive number, it will be | 114 | this device. If this function returns a positive number, it is |
115 | less than 'count' and indicate the number of interrupts that could have | 115 | less than 'count' and indicates the number of interrupts that could have |
116 | been allocated. In neither case will the irq value have been | 116 | been allocated. In neither case is the irq value updated or the device |
117 | updated, nor will the device have been switched into MSI mode. | 117 | switched into MSI mode. |
118 | 118 | ||
119 | The device driver must decide what action to take if | 119 | The device driver must decide what action to take if |
120 | pci_enable_msi_block() returns a value less than the number asked for. | 120 | pci_enable_msi_block() returns a value less than the number requested. |
121 | Some devices can make use of fewer interrupts than the maximum they | 121 | For instance, the driver could still make use of fewer interrupts; |
122 | request; in this case the driver should call pci_enable_msi_block() | 122 | in this case the driver should call pci_enable_msi_block() |
123 | again. Note that it is not guaranteed to succeed, even when the | 123 | again. Note that it is not guaranteed to succeed, even when the |
124 | 'count' has been reduced to the value returned from a previous call to | 124 | 'count' has been reduced to the value returned from a previous call to |
125 | pci_enable_msi_block(). This is because there are multiple constraints | 125 | pci_enable_msi_block(). This is because there are multiple constraints |
126 | on the number of vectors that can be allocated; pci_enable_msi_block() | 126 | on the number of vectors that can be allocated; pci_enable_msi_block() |
127 | will return as soon as it finds any constraint that doesn't allow the | 127 | returns as soon as it finds any constraint that doesn't allow the |
128 | call to succeed. | 128 | call to succeed. |
129 | 129 | ||
130 | 4.2.3 pci_disable_msi | 130 | 4.2.3 pci_disable_msi |
@@ -137,10 +137,10 @@ interrupt number and frees the previously allocated message signaled | |||
137 | interrupt(s). The interrupt may subsequently be assigned to another | 137 | interrupt(s). The interrupt may subsequently be assigned to another |
138 | device, so drivers should not cache the value of dev->irq. | 138 | device, so drivers should not cache the value of dev->irq. |
139 | 139 | ||
140 | A device driver must always call free_irq() on the interrupt(s) | 140 | Before calling this function, a device driver must always call free_irq() |
141 | for which it has called request_irq() before calling this function. | 141 | on any interrupt for which it previously called request_irq(). |
142 | Failure to do so will result in a BUG_ON(), the device will be left with | 142 | Failure to do so results in a BUG_ON(), leaving the device with |
143 | MSI enabled and will leak its vector. | 143 | MSI enabled and thus leaking its vector. |
144 | 144 | ||
145 | 4.3 Using MSI-X | 145 | 4.3 Using MSI-X |
146 | 146 | ||
@@ -155,10 +155,10 @@ struct msix_entry { | |||
155 | }; | 155 | }; |
156 | 156 | ||
157 | This allows for the device to use these interrupts in a sparse fashion; | 157 | This allows for the device to use these interrupts in a sparse fashion; |
158 | for example it could use interrupts 3 and 1027 and allocate only a | 158 | for example, it could use interrupts 3 and 1027 and yet allocate only a |
159 | two-element array. The driver is expected to fill in the 'entry' value | 159 | two-element array. The driver is expected to fill in the 'entry' value |
160 | in each element of the array to indicate which entries it wants the kernel | 160 | in each element of the array to indicate for which entries the kernel |
161 | to assign interrupts for. It is invalid to fill in two entries with the | 161 | should assign interrupts; it is invalid to fill in two entries with the |
162 | same number. | 162 | same number. |
163 | 163 | ||
164 | 4.3.1 pci_enable_msix | 164 | 4.3.1 pci_enable_msix |
@@ -168,10 +168,11 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) | |||
168 | Calling this function asks the PCI subsystem to allocate 'nvec' MSIs. | 168 | Calling this function asks the PCI subsystem to allocate 'nvec' MSIs. |
169 | The 'entries' argument is a pointer to an array of msix_entry structs | 169 | The 'entries' argument is a pointer to an array of msix_entry structs |
170 | which should be at least 'nvec' entries in size. On success, the | 170 | which should be at least 'nvec' entries in size. On success, the |
171 | function will return 0 and the device will have been switched into | 171 | device is switched into MSI-X mode and the function returns 0. |
172 | MSI-X interrupt mode. The 'vector' elements in each entry will have | 172 | The 'vector' member in each entry is populated with the interrupt number; |
173 | been filled in with the interrupt number. The driver should then call | 173 | the driver should then call request_irq() for each 'vector' that it |
174 | request_irq() for each 'vector' that it decides to use. | 174 | decides to use. The device driver is responsible for keeping track of the |
175 | interrupts assigned to the MSI-X vectors so it can free them again later. | ||
175 | 176 | ||
176 | If this function returns a negative number, it indicates an error and | 177 | If this function returns a negative number, it indicates an error and |
177 | the driver should not attempt to allocate any more MSI-X interrupts for | 178 | the driver should not attempt to allocate any more MSI-X interrupts for |
@@ -181,16 +182,14 @@ below. | |||
181 | 182 | ||
182 | This function, in contrast with pci_enable_msi(), does not adjust | 183 | This function, in contrast with pci_enable_msi(), does not adjust |
183 | dev->irq. The device will not generate interrupts for this interrupt | 184 | dev->irq. The device will not generate interrupts for this interrupt |
184 | number once MSI-X is enabled. The device driver is responsible for | 185 | number once MSI-X is enabled. |
185 | keeping track of the interrupts assigned to the MSI-X vectors so it can | ||
186 | free them again later. | ||
187 | 186 | ||
188 | Device drivers should normally call this function once per device | 187 | Device drivers should normally call this function once per device |
189 | during the initialization phase. | 188 | during the initialization phase. |
190 | 189 | ||
191 | It is ideal if drivers can cope with a variable number of MSI-X interrupts, | 190 | It is ideal if drivers can cope with a variable number of MSI-X interrupts; |
192 | there are many reasons why the platform may not be able to provide the | 191 | there are many reasons why the platform may not be able to provide the |
193 | exact number a driver asks for. | 192 | exact number that a driver asks for. |
194 | 193 | ||
195 | A request loop to achieve that might look like: | 194 | A request loop to achieve that might look like: |
196 | 195 | ||
@@ -212,15 +211,15 @@ static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec) | |||
212 | 211 | ||
213 | void pci_disable_msix(struct pci_dev *dev) | 212 | void pci_disable_msix(struct pci_dev *dev) |
214 | 213 | ||
215 | This API should be used to undo the effect of pci_enable_msix(). It frees | 214 | This function should be used to undo the effect of pci_enable_msix(). It frees |
216 | the previously allocated message signaled interrupts. The interrupts may | 215 | the previously allocated message signaled interrupts. The interrupts may |
217 | subsequently be assigned to another device, so drivers should not cache | 216 | subsequently be assigned to another device, so drivers should not cache |
218 | the value of the 'vector' elements over a call to pci_disable_msix(). | 217 | the value of the 'vector' elements over a call to pci_disable_msix(). |
219 | 218 | ||
220 | A device driver must always call free_irq() on the interrupt(s) | 219 | Before calling this function, a device driver must always call free_irq() |
221 | for which it has called request_irq() before calling this function. | 220 | on any interrupt for which it previously called request_irq(). |
222 | Failure to do so will result in a BUG_ON(), the device will be left with | 221 | Failure to do so results in a BUG_ON(), leaving the device with |
223 | MSI enabled and will leak its vector. | 222 | MSI-X enabled and thus leaking its vector. |
224 | 223 | ||
225 | 4.3.3 The MSI-X Table | 224 | 4.3.3 The MSI-X Table |
226 | 225 | ||
@@ -232,10 +231,10 @@ mask or unmask an interrupt, it should call disable_irq() / enable_irq(). | |||
232 | 4.4 Handling devices implementing both MSI and MSI-X capabilities | 231 | 4.4 Handling devices implementing both MSI and MSI-X capabilities |
233 | 232 | ||
234 | If a device implements both MSI and MSI-X capabilities, it can | 233 | If a device implements both MSI and MSI-X capabilities, it can |
235 | run in either MSI mode or MSI-X mode but not both simultaneously. | 234 | run in either MSI mode or MSI-X mode, but not both simultaneously. |
236 | This is a requirement of the PCI spec, and it is enforced by the | 235 | This is a requirement of the PCI spec, and it is enforced by the |
237 | PCI layer. Calling pci_enable_msi() when MSI-X is already enabled or | 236 | PCI layer. Calling pci_enable_msi() when MSI-X is already enabled or |
238 | pci_enable_msix() when MSI is already enabled will result in an error. | 237 | pci_enable_msix() when MSI is already enabled results in an error. |
239 | If a device driver wishes to switch between MSI and MSI-X at runtime, | 238 | If a device driver wishes to switch between MSI and MSI-X at runtime, |
240 | it must first quiesce the device, then switch it back to pin-interrupt | 239 | it must first quiesce the device, then switch it back to pin-interrupt |
241 | mode, before calling pci_enable_msi() or pci_enable_msix() and resuming | 240 | mode, before calling pci_enable_msi() or pci_enable_msix() and resuming |
@@ -251,7 +250,7 @@ the MSI-X facilities in preference to the MSI facilities. As mentioned | |||
251 | above, MSI-X supports any number of interrupts between 1 and 2048. | 250 | above, MSI-X supports any number of interrupts between 1 and 2048. |
252 | In constrast, MSI is restricted to a maximum of 32 interrupts (and | 251 | In constrast, MSI is restricted to a maximum of 32 interrupts (and |
253 | must be a power of two). In addition, the MSI interrupt vectors must | 252 | must be a power of two). In addition, the MSI interrupt vectors must |
254 | be allocated consecutively, so the system may not be able to allocate | 253 | be allocated consecutively, so the system might not be able to allocate |
255 | as many vectors for MSI as it could for MSI-X. On some platforms, MSI | 254 | as many vectors for MSI as it could for MSI-X. On some platforms, MSI |
256 | interrupts must all be targeted at the same set of CPUs whereas MSI-X | 255 | interrupts must all be targeted at the same set of CPUs whereas MSI-X |
257 | interrupts can all be targeted at different CPUs. | 256 | interrupts can all be targeted at different CPUs. |
@@ -281,7 +280,7 @@ disabled to enabled and back again. | |||
281 | 280 | ||
282 | Using 'lspci -v' (as root) may show some devices with "MSI", "Message | 281 | Using 'lspci -v' (as root) may show some devices with "MSI", "Message |
283 | Signalled Interrupts" or "MSI-X" capabilities. Each of these capabilities | 282 | Signalled Interrupts" or "MSI-X" capabilities. Each of these capabilities |
284 | has an 'Enable' flag which will be followed with either "+" (enabled) | 283 | has an 'Enable' flag which is followed with either "+" (enabled) |
285 | or "-" (disabled). | 284 | or "-" (disabled). |
286 | 285 | ||
287 | 286 | ||
@@ -298,7 +297,7 @@ The PCI stack provides three ways to disable MSIs: | |||
298 | 297 | ||
299 | Some host chipsets simply don't support MSIs properly. If we're | 298 | Some host chipsets simply don't support MSIs properly. If we're |
300 | lucky, the manufacturer knows this and has indicated it in the ACPI | 299 | lucky, the manufacturer knows this and has indicated it in the ACPI |
301 | FADT table. In this case, Linux will automatically disable MSIs. | 300 | FADT table. In this case, Linux automatically disables MSIs. |
302 | Some boards don't include this information in the table and so we have | 301 | Some boards don't include this information in the table and so we have |
303 | to detect them ourselves. The complete list of these is found near the | 302 | to detect them ourselves. The complete list of these is found near the |
304 | quirk_disable_all_msi() function in drivers/pci/quirks.c. | 303 | quirk_disable_all_msi() function in drivers/pci/quirks.c. |
@@ -317,7 +316,7 @@ Some bridges allow you to enable MSIs by changing some bits in their | |||
317 | PCI configuration space (especially the Hypertransport chipsets such | 316 | PCI configuration space (especially the Hypertransport chipsets such |
318 | as the nVidia nForce and Serverworks HT2000). As with host chipsets, | 317 | as the nVidia nForce and Serverworks HT2000). As with host chipsets, |
319 | Linux mostly knows about them and automatically enables MSIs if it can. | 318 | Linux mostly knows about them and automatically enables MSIs if it can. |
320 | If you have a bridge which Linux doesn't yet know about, you can enable | 319 | If you have a bridge unknown to Linux, you can enable |
321 | MSIs in configuration space using whatever method you know works, then | 320 | MSIs in configuration space using whatever method you know works, then |
322 | enable MSIs on that bridge by doing: | 321 | enable MSIs on that bridge by doing: |
323 | 322 | ||
@@ -327,7 +326,7 @@ where $bridge is the PCI address of the bridge you've enabled (eg | |||
327 | 0000:00:0e.0). | 326 | 0000:00:0e.0). |
328 | 327 | ||
329 | To disable MSIs, echo 0 instead of 1. Changing this value should be | 328 | To disable MSIs, echo 0 instead of 1. Changing this value should be |
330 | done with caution as it can break interrupt handling for all devices | 329 | done with caution as it could break interrupt handling for all devices |
331 | below this bridge. | 330 | below this bridge. |
332 | 331 | ||
333 | Again, please notify linux-pci@vger.kernel.org of any bridges that need | 332 | Again, please notify linux-pci@vger.kernel.org of any bridges that need |
@@ -336,7 +335,7 @@ special handling. | |||
336 | 5.3. Disabling MSIs on a single device | 335 | 5.3. Disabling MSIs on a single device |
337 | 336 | ||
338 | Some devices are known to have faulty MSI implementations. Usually this | 337 | Some devices are known to have faulty MSI implementations. Usually this |
339 | is handled in the individual device driver but occasionally it's necessary | 338 | is handled in the individual device driver, but occasionally it's necessary |
340 | to handle this with a quirk. Some drivers have an option to disable use | 339 | to handle this with a quirk. Some drivers have an option to disable use |
341 | of MSI. While this is a convenient workaround for the driver author, | 340 | of MSI. While this is a convenient workaround for the driver author, |
342 | it is not good practise, and should not be emulated. | 341 | it is not good practise, and should not be emulated. |
@@ -350,7 +349,7 @@ for your machine. You should also check your .config to be sure you | |||
350 | have enabled CONFIG_PCI_MSI. | 349 | have enabled CONFIG_PCI_MSI. |
351 | 350 | ||
352 | Then, 'lspci -t' gives the list of bridges above a device. Reading | 351 | Then, 'lspci -t' gives the list of bridges above a device. Reading |
353 | /sys/bus/pci/devices/*/msi_bus will tell you whether MSI are enabled (1) | 352 | /sys/bus/pci/devices/*/msi_bus will tell you whether MSIs are enabled (1) |
354 | or disabled (0). If 0 is found in any of the msi_bus files belonging | 353 | or disabled (0). If 0 is found in any of the msi_bus files belonging |
355 | to bridges between the PCI root and the device, MSIs are disabled. | 354 | to bridges between the PCI root and the device, MSIs are disabled. |
356 | 355 | ||
diff --git a/Documentation/SubmittingDrivers b/Documentation/SubmittingDrivers index 319baa8b60dd..36d16bbf72c6 100644 --- a/Documentation/SubmittingDrivers +++ b/Documentation/SubmittingDrivers | |||
@@ -130,7 +130,7 @@ Linux kernel master tree: | |||
130 | ftp.??.kernel.org:/pub/linux/kernel/... | 130 | ftp.??.kernel.org:/pub/linux/kernel/... |
131 | ?? == your country code, such as "us", "uk", "fr", etc. | 131 | ?? == your country code, such as "us", "uk", "fr", etc. |
132 | 132 | ||
133 | http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git | 133 | http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git |
134 | 134 | ||
135 | Linux kernel mailing list: | 135 | Linux kernel mailing list: |
136 | linux-kernel@vger.kernel.org | 136 | linux-kernel@vger.kernel.org |
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches index 569f3532e138..4468ce24427c 100644 --- a/Documentation/SubmittingPatches +++ b/Documentation/SubmittingPatches | |||
@@ -303,7 +303,7 @@ patches that are being emailed around. | |||
303 | 303 | ||
304 | The sign-off is a simple line at the end of the explanation for the | 304 | The sign-off is a simple line at the end of the explanation for the |
305 | patch, which certifies that you wrote it or otherwise have the right to | 305 | patch, which certifies that you wrote it or otherwise have the right to |
306 | pass it on as a open-source patch. The rules are pretty simple: if you | 306 | pass it on as an open-source patch. The rules are pretty simple: if you |
307 | can certify the below: | 307 | can certify the below: |
308 | 308 | ||
309 | Developer's Certificate of Origin 1.1 | 309 | Developer's Certificate of Origin 1.1 |
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt index e578feed6d81..6d670f570451 100644 --- a/Documentation/block/cfq-iosched.txt +++ b/Documentation/block/cfq-iosched.txt | |||
@@ -43,3 +43,74 @@ If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches | |||
43 | to IOPS mode and starts providing fairness in terms of number of requests | 43 | to IOPS mode and starts providing fairness in terms of number of requests |
44 | dispatched. Note that this mode switching takes effect only for group | 44 | dispatched. Note that this mode switching takes effect only for group |
45 | scheduling. For non-cgroup users nothing should change. | 45 | scheduling. For non-cgroup users nothing should change. |
46 | |||
47 | CFQ IO scheduler Idling Theory | ||
48 | =============================== | ||
49 | Idling on a queue is primarily about waiting for the next request to come | ||
50 | on same queue after completion of a request. In this process CFQ will not | ||
51 | dispatch requests from other cfq queues even if requests are pending there. | ||
52 | |||
53 | The rationale behind idling is that it can cut down on number of seeks | ||
54 | on rotational media. For example, if a process is doing dependent | ||
55 | sequential reads (next read will come on only after completion of previous | ||
56 | one), then not dispatching request from other queue should help as we | ||
57 | did not move the disk head and kept on dispatching sequential IO from | ||
58 | one queue. | ||
59 | |||
60 | CFQ has following service trees and various queues are put on these trees. | ||
61 | |||
62 | sync-idle sync-noidle async | ||
63 | |||
64 | All cfq queues doing synchronous sequential IO go on to sync-idle tree. | ||
65 | On this tree we idle on each queue individually. | ||
66 | |||
67 | All synchronous non-sequential queues go on sync-noidle tree. Also any | ||
68 | request which are marked with REQ_NOIDLE go on this service tree. On this | ||
69 | tree we do not idle on individual queues instead idle on the whole group | ||
70 | of queues or the tree. So if there are 4 queues waiting for IO to dispatch | ||
71 | we will idle only once last queue has dispatched the IO and there is | ||
72 | no more IO on this service tree. | ||
73 | |||
74 | All async writes go on async service tree. There is no idling on async | ||
75 | queues. | ||
76 | |||
77 | CFQ has some optimizations for SSDs and if it detects a non-rotational | ||
78 | media which can support higher queue depth (multiple requests at in | ||
79 | flight at a time), then it cuts down on idling of individual queues and | ||
80 | all the queues move to sync-noidle tree and only tree idle remains. This | ||
81 | tree idling provides isolation with buffered write queues on async tree. | ||
82 | |||
83 | FAQ | ||
84 | === | ||
85 | Q1. Why to idle at all on queues marked with REQ_NOIDLE. | ||
86 | |||
87 | A1. We only do tree idle (all queues on sync-noidle tree) on queues marked | ||
88 | with REQ_NOIDLE. This helps in providing isolation with all the sync-idle | ||
89 | queues. Otherwise in presence of many sequential readers, other | ||
90 | synchronous IO might not get fair share of disk. | ||
91 | |||
92 | For example, if there are 10 sequential readers doing IO and they get | ||
93 | 100ms each. If a REQ_NOIDLE request comes in, it will be scheduled | ||
94 | roughly after 1 second. If after completion of REQ_NOIDLE request we | ||
95 | do not idle, and after a couple of milli seconds a another REQ_NOIDLE | ||
96 | request comes in, again it will be scheduled after 1second. Repeat it | ||
97 | and notice how a workload can lose its disk share and suffer due to | ||
98 | multiple sequential readers. | ||
99 | |||
100 | fsync can generate dependent IO where bunch of data is written in the | ||
101 | context of fsync, and later some journaling data is written. Journaling | ||
102 | data comes in only after fsync has finished its IO (atleast for ext4 | ||
103 | that seemed to be the case). Now if one decides not to idle on fsync | ||
104 | thread due to REQ_NOIDLE, then next journaling write will not get | ||
105 | scheduled for another second. A process doing small fsync, will suffer | ||
106 | badly in presence of multiple sequential readers. | ||
107 | |||
108 | Hence doing tree idling on threads using REQ_NOIDLE flag on requests | ||
109 | provides isolation from multiple sequential readers and at the same | ||
110 | time we do not idle on individual threads. | ||
111 | |||
112 | Q2. When to specify REQ_NOIDLE | ||
113 | A2. I would think whenever one is doing synchronous write and not expecting | ||
114 | more writes to be dispatched from same context soon, should be able | ||
115 | to specify REQ_NOIDLE on writes and that probably should work well for | ||
116 | most of the cases. | ||
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt index a0b58e29f911..860c29a472ad 100644 --- a/Documentation/email-clients.txt +++ b/Documentation/email-clients.txt | |||
@@ -199,18 +199,16 @@ to coerce it into behaving. | |||
199 | 199 | ||
200 | To beat some sense out of the internal editor, do this: | 200 | To beat some sense out of the internal editor, do this: |
201 | 201 | ||
202 | - Under account settings, composition and addressing, uncheck "Compose | ||
203 | messages in HTML format". | ||
204 | |||
205 | - Edit your Thunderbird config settings so that it won't use format=flowed. | 202 | - Edit your Thunderbird config settings so that it won't use format=flowed. |
206 | Go to "edit->preferences->advanced->config editor" to bring up the | 203 | Go to "edit->preferences->advanced->config editor" to bring up the |
207 | thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to | 204 | thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to |
208 | "false". | 205 | "false". |
209 | 206 | ||
210 | - Enable "preformat" mode: Shft-click on the Write icon to bring up the HTML | 207 | - Disable HTML Format: Set "mail.identity.id1.compose_html" to "false". |
211 | composer, select "Preformat" from the drop-down box just under the subject | 208 | |
212 | line, then close the message without saving. (This setting also applies to | 209 | - Enable "preformat" mode: Set "editor.quotesPreformatted" to "true". |
213 | the text composer, but the only control for it is in the HTML composer.) | 210 | |
211 | - Enable UTF8: Set "prefs.converted-to-utf8" to "true". | ||
214 | 212 | ||
215 | - Install the "toggle wordwrap" extension. Download the file from: | 213 | - Install the "toggle wordwrap" extension. Download the file from: |
216 | https://addons.mozilla.org/thunderbird/addon/2351/ | 214 | https://addons.mozilla.org/thunderbird/addon/2351/ |
diff --git a/Documentation/filesystems/befs.txt b/Documentation/filesystems/befs.txt index 6e49c363938e..da45e6c842b8 100644 --- a/Documentation/filesystems/befs.txt +++ b/Documentation/filesystems/befs.txt | |||
@@ -27,7 +27,7 @@ His original code can still be found at: | |||
27 | Does anyone know of a more current email address for Makoto? He doesn't | 27 | Does anyone know of a more current email address for Makoto? He doesn't |
28 | respond to the address given above... | 28 | respond to the address given above... |
29 | 29 | ||
30 | Current maintainer: Sergey S. Kostyliov <rathamahata@php4.ru> | 30 | This filesystem doesn't have a maintainer. |
31 | 31 | ||
32 | WHAT IS THIS DRIVER? | 32 | WHAT IS THIS DRIVER? |
33 | ================== | 33 | ================== |
diff --git a/Documentation/kernel-docs.txt b/Documentation/kernel-docs.txt index 9a8674629a07..0e0734b509d8 100644 --- a/Documentation/kernel-docs.txt +++ b/Documentation/kernel-docs.txt | |||
@@ -620,17 +620,6 @@ | |||
620 | (including this document itself) have been moved there, and might | 620 | (including this document itself) have been moved there, and might |
621 | be more up to date than the web version. | 621 | be more up to date than the web version. |
622 | 622 | ||
623 | * Name: "Linux Source Driver" | ||
624 | URL: http://lsd.linux.cz | ||
625 | Keywords: Browsing source code. | ||
626 | Description: "Linux Source Driver (LSD) is an application, which | ||
627 | can make browsing source codes of Linux kernel easier than you can | ||
628 | imagine. You can select between multiple versions of kernel (e.g. | ||
629 | 0.01, 1.0.0, 2.0.33, 2.0.34pre13, 2.0.0, 2.1.101 etc.). With LSD | ||
630 | you can search Linux kernel (fulltext, macros, types, functions | ||
631 | and variables) and LSD can generate patches for you on the fly | ||
632 | (files, directories or kernel)". | ||
633 | |||
634 | * Name: "Linux Kernel Source Reference" | 623 | * Name: "Linux Kernel Source Reference" |
635 | Author: Thomas Graichen. | 624 | Author: Thomas Graichen. |
636 | URL: http://marc.info/?l=linux-kernel&m=96446640102205&w=4 | 625 | URL: http://marc.info/?l=linux-kernel&m=96446640102205&w=4 |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index e279b7242912..614d0382e2cb 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -40,6 +40,7 @@ parameter is applicable: | |||
40 | ALSA ALSA sound support is enabled. | 40 | ALSA ALSA sound support is enabled. |
41 | APIC APIC support is enabled. | 41 | APIC APIC support is enabled. |
42 | APM Advanced Power Management support is enabled. | 42 | APM Advanced Power Management support is enabled. |
43 | ARM ARM architecture is enabled. | ||
43 | AVR32 AVR32 architecture is enabled. | 44 | AVR32 AVR32 architecture is enabled. |
44 | AX25 Appropriate AX.25 support is enabled. | 45 | AX25 Appropriate AX.25 support is enabled. |
45 | BLACKFIN Blackfin architecture is enabled. | 46 | BLACKFIN Blackfin architecture is enabled. |
@@ -49,6 +50,7 @@ parameter is applicable: | |||
49 | EFI EFI Partitioning (GPT) is enabled | 50 | EFI EFI Partitioning (GPT) is enabled |
50 | EIDE EIDE/ATAPI support is enabled. | 51 | EIDE EIDE/ATAPI support is enabled. |
51 | FB The frame buffer device is enabled. | 52 | FB The frame buffer device is enabled. |
53 | FTRACE Function tracing enabled. | ||
52 | GCOV GCOV profiling is enabled. | 54 | GCOV GCOV profiling is enabled. |
53 | HW Appropriate hardware is enabled. | 55 | HW Appropriate hardware is enabled. |
54 | IA-64 IA-64 architecture is enabled. | 56 | IA-64 IA-64 architecture is enabled. |
@@ -69,6 +71,7 @@ parameter is applicable: | |||
69 | Documentation/m68k/kernel-options.txt. | 71 | Documentation/m68k/kernel-options.txt. |
70 | MCA MCA bus support is enabled. | 72 | MCA MCA bus support is enabled. |
71 | MDA MDA console support is enabled. | 73 | MDA MDA console support is enabled. |
74 | MIPS MIPS architecture is enabled. | ||
72 | MOUSE Appropriate mouse support is enabled. | 75 | MOUSE Appropriate mouse support is enabled. |
73 | MSI Message Signaled Interrupts (PCI). | 76 | MSI Message Signaled Interrupts (PCI). |
74 | MTD MTD (Memory Technology Device) support is enabled. | 77 | MTD MTD (Memory Technology Device) support is enabled. |
@@ -100,7 +103,6 @@ parameter is applicable: | |||
100 | SPARC Sparc architecture is enabled. | 103 | SPARC Sparc architecture is enabled. |
101 | SWSUSP Software suspend (hibernation) is enabled. | 104 | SWSUSP Software suspend (hibernation) is enabled. |
102 | SUSPEND System suspend states are enabled. | 105 | SUSPEND System suspend states are enabled. |
103 | FTRACE Function tracing enabled. | ||
104 | TPM TPM drivers are enabled. | 106 | TPM TPM drivers are enabled. |
105 | TS Appropriate touchscreen support is enabled. | 107 | TS Appropriate touchscreen support is enabled. |
106 | UMS USB Mass Storage support is enabled. | 108 | UMS USB Mass Storage support is enabled. |
@@ -115,7 +117,7 @@ parameter is applicable: | |||
115 | X86-64 X86-64 architecture is enabled. | 117 | X86-64 X86-64 architecture is enabled. |
116 | More X86-64 boot options can be found in | 118 | More X86-64 boot options can be found in |
117 | Documentation/x86/x86_64/boot-options.txt . | 119 | Documentation/x86/x86_64/boot-options.txt . |
118 | X86 Either 32bit or 64bit x86 (same as X86-32+X86-64) | 120 | X86 Either 32-bit or 64-bit x86 (same as X86-32+X86-64) |
119 | XEN Xen support is enabled | 121 | XEN Xen support is enabled |
120 | 122 | ||
121 | In addition, the following text indicates that the option: | 123 | In addition, the following text indicates that the option: |
@@ -376,7 +378,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
376 | atkbd.softrepeat= [HW] | 378 | atkbd.softrepeat= [HW] |
377 | Use software keyboard repeat | 379 | Use software keyboard repeat |
378 | 380 | ||
379 | autotest [IA64] | 381 | autotest [IA-64] |
380 | 382 | ||
381 | baycom_epp= [HW,AX25] | 383 | baycom_epp= [HW,AX25] |
382 | Format: <io>,<mode> | 384 | Format: <io>,<mode> |
@@ -681,8 +683,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
681 | uart[8250],mmio32,<addr>[,options] | 683 | uart[8250],mmio32,<addr>[,options] |
682 | Start an early, polled-mode console on the 8250/16550 | 684 | Start an early, polled-mode console on the 8250/16550 |
683 | UART at the specified I/O port or MMIO address. | 685 | UART at the specified I/O port or MMIO address. |
684 | MMIO inter-register address stride is either 8bit (mmio) | 686 | MMIO inter-register address stride is either 8-bit |
685 | or 32bit (mmio32). | 687 | (mmio) or 32-bit (mmio32). |
686 | The options are the same as for ttyS, above. | 688 | The options are the same as for ttyS, above. |
687 | 689 | ||
688 | earlyprintk= [X86,SH,BLACKFIN] | 690 | earlyprintk= [X86,SH,BLACKFIN] |
@@ -725,7 +727,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
725 | See Documentation/block/as-iosched.txt and | 727 | See Documentation/block/as-iosched.txt and |
726 | Documentation/block/deadline-iosched.txt for details. | 728 | Documentation/block/deadline-iosched.txt for details. |
727 | 729 | ||
728 | elfcorehdr= [IA64,PPC,SH,X86] | 730 | elfcorehdr= [IA-64,PPC,SH,X86] |
729 | Specifies physical address of start of kernel core | 731 | Specifies physical address of start of kernel core |
730 | image elf header. Generally kexec loader will | 732 | image elf header. Generally kexec loader will |
731 | pass this option to capture kernel. | 733 | pass this option to capture kernel. |
@@ -791,7 +793,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
791 | tracer at boot up. function-list is a comma separated | 793 | tracer at boot up. function-list is a comma separated |
792 | list of functions. This list can be changed at run | 794 | list of functions. This list can be changed at run |
793 | time by the set_ftrace_filter file in the debugfs | 795 | time by the set_ftrace_filter file in the debugfs |
794 | tracing directory. | 796 | tracing directory. |
795 | 797 | ||
796 | ftrace_notrace=[function-list] | 798 | ftrace_notrace=[function-list] |
797 | [FTRACE] Do not trace the functions specified in | 799 | [FTRACE] Do not trace the functions specified in |
@@ -829,7 +831,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
829 | 831 | ||
830 | hashdist= [KNL,NUMA] Large hashes allocated during boot | 832 | hashdist= [KNL,NUMA] Large hashes allocated during boot |
831 | are distributed across NUMA nodes. Defaults on | 833 | are distributed across NUMA nodes. Defaults on |
832 | for 64bit NUMA, off otherwise. | 834 | for 64-bit NUMA, off otherwise. |
833 | Format: 0 | 1 (for off | on) | 835 | Format: 0 | 1 (for off | on) |
834 | 836 | ||
835 | hcl= [IA-64] SGI's Hardware Graph compatibility layer | 837 | hcl= [IA-64] SGI's Hardware Graph compatibility layer |
@@ -998,10 +1000,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
998 | DMA. | 1000 | DMA. |
999 | forcedac [x86_64] | 1001 | forcedac [x86_64] |
1000 | With this option iommu will not optimize to look | 1002 | With this option iommu will not optimize to look |
1001 | for io virtual address below 32 bit forcing dual | 1003 | for io virtual address below 32-bit forcing dual |
1002 | address cycle on pci bus for cards supporting greater | 1004 | address cycle on pci bus for cards supporting greater |
1003 | than 32 bit addressing. The default is to look | 1005 | than 32-bit addressing. The default is to look |
1004 | for translation below 32 bit and if not available | 1006 | for translation below 32-bit and if not available |
1005 | then look in the higher range. | 1007 | then look in the higher range. |
1006 | strict [Default Off] | 1008 | strict [Default Off] |
1007 | With this option on every unmap_single operation will | 1009 | With this option on every unmap_single operation will |
@@ -1017,7 +1019,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1017 | off disable Interrupt Remapping | 1019 | off disable Interrupt Remapping |
1018 | nosid disable Source ID checking | 1020 | nosid disable Source ID checking |
1019 | 1021 | ||
1020 | inttest= [IA64] | 1022 | inttest= [IA-64] |
1021 | 1023 | ||
1022 | iomem= Disable strict checking of access to MMIO memory | 1024 | iomem= Disable strict checking of access to MMIO memory |
1023 | strict regions from userspace. | 1025 | strict regions from userspace. |
@@ -1034,7 +1036,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1034 | nomerge | 1036 | nomerge |
1035 | forcesac | 1037 | forcesac |
1036 | soft | 1038 | soft |
1037 | pt [x86, IA64] | 1039 | pt [x86, IA-64] |
1038 | 1040 | ||
1039 | io7= [HW] IO7 for Marvel based alpha systems | 1041 | io7= [HW] IO7 for Marvel based alpha systems |
1040 | See comment before marvel_specify_io7 in | 1042 | See comment before marvel_specify_io7 in |
@@ -1165,7 +1167,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1165 | 1167 | ||
1166 | kvm-amd.npt= [KVM,AMD] Disable nested paging (virtualized MMU) | 1168 | kvm-amd.npt= [KVM,AMD] Disable nested paging (virtualized MMU) |
1167 | for all guests. | 1169 | for all guests. |
1168 | Default is 1 (enabled) if in 64bit or 32bit-PAE mode | 1170 | Default is 1 (enabled) if in 64-bit or 32-bit PAE mode. |
1169 | 1171 | ||
1170 | kvm-intel.ept= [KVM,Intel] Disable extended page tables | 1172 | kvm-intel.ept= [KVM,Intel] Disable extended page tables |
1171 | (virtualized MMU) support on capable Intel chips. | 1173 | (virtualized MMU) support on capable Intel chips. |
@@ -1202,10 +1204,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1202 | libata.dma=0 Disable all PATA and SATA DMA | 1204 | libata.dma=0 Disable all PATA and SATA DMA |
1203 | libata.dma=1 PATA and SATA Disk DMA only | 1205 | libata.dma=1 PATA and SATA Disk DMA only |
1204 | libata.dma=2 ATAPI (CDROM) DMA only | 1206 | libata.dma=2 ATAPI (CDROM) DMA only |
1205 | libata.dma=4 Compact Flash DMA only | 1207 | libata.dma=4 Compact Flash DMA only |
1206 | Combinations also work, so libata.dma=3 enables DMA | 1208 | Combinations also work, so libata.dma=3 enables DMA |
1207 | for disks and CDROMs, but not CFs. | 1209 | for disks and CDROMs, but not CFs. |
1208 | 1210 | ||
1209 | libata.ignore_hpa= [LIBATA] Ignore HPA limit | 1211 | libata.ignore_hpa= [LIBATA] Ignore HPA limit |
1210 | libata.ignore_hpa=0 keep BIOS limits (default) | 1212 | libata.ignore_hpa=0 keep BIOS limits (default) |
1211 | libata.ignore_hpa=1 ignore limits, using full disk | 1213 | libata.ignore_hpa=1 ignore limits, using full disk |
@@ -1331,7 +1333,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1331 | ltpc= [NET] | 1333 | ltpc= [NET] |
1332 | Format: <io>,<irq>,<dma> | 1334 | Format: <io>,<irq>,<dma> |
1333 | 1335 | ||
1334 | machvec= [IA64] Force the use of a particular machine-vector | 1336 | machvec= [IA-64] Force the use of a particular machine-vector |
1335 | (machvec) in a generic kernel. | 1337 | (machvec) in a generic kernel. |
1336 | Example: machvec=hpzx1_swiotlb | 1338 | Example: machvec=hpzx1_swiotlb |
1337 | 1339 | ||
@@ -1348,9 +1350,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1348 | it is equivalent to "nosmp", which also disables | 1350 | it is equivalent to "nosmp", which also disables |
1349 | the IO APIC. | 1351 | the IO APIC. |
1350 | 1352 | ||
1351 | max_loop= [LOOP] Maximum number of loopback devices that can | 1353 | max_loop= [LOOP] The number of loop block devices that get |
1352 | be mounted | 1354 | (loop.max_loop) unconditionally pre-created at init time. The default |
1353 | Format: <1-256> | 1355 | number is configured by BLK_DEV_LOOP_MIN_COUNT. Instead |
1356 | of statically allocating a predefined number, loop | ||
1357 | devices can be requested on-demand with the | ||
1358 | /dev/loop-control interface. | ||
1354 | 1359 | ||
1355 | mcatest= [IA-64] | 1360 | mcatest= [IA-64] |
1356 | 1361 | ||
@@ -1734,7 +1739,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1734 | 1739 | ||
1735 | nointroute [IA-64] | 1740 | nointroute [IA-64] |
1736 | 1741 | ||
1737 | nojitter [IA64] Disables jitter checking for ITC timers. | 1742 | nojitter [IA-64] Disables jitter checking for ITC timers. |
1738 | 1743 | ||
1739 | no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver | 1744 | no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver |
1740 | 1745 | ||
@@ -1800,7 +1805,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1800 | 1805 | ||
1801 | nox2apic [X86-64,APIC] Do not enable x2APIC mode. | 1806 | nox2apic [X86-64,APIC] Do not enable x2APIC mode. |
1802 | 1807 | ||
1803 | nptcg= [IA64] Override max number of concurrent global TLB | 1808 | nptcg= [IA-64] Override max number of concurrent global TLB |
1804 | purges which is reported from either PAL_VM_SUMMARY or | 1809 | purges which is reported from either PAL_VM_SUMMARY or |
1805 | SAL PALO. | 1810 | SAL PALO. |
1806 | 1811 | ||
@@ -2077,7 +2082,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2077 | Format: { parport<nr> | timid | 0 } | 2082 | Format: { parport<nr> | timid | 0 } |
2078 | See also Documentation/parport.txt. | 2083 | See also Documentation/parport.txt. |
2079 | 2084 | ||
2080 | pmtmr= [X86] Manual setup of pmtmr I/O Port. | 2085 | pmtmr= [X86] Manual setup of pmtmr I/O Port. |
2081 | Override pmtimer IOPort with a hex value. | 2086 | Override pmtimer IOPort with a hex value. |
2082 | e.g. pmtmr=0x508 | 2087 | e.g. pmtmr=0x508 |
2083 | 2088 | ||
@@ -2635,6 +2640,16 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2635 | medium is write-protected). | 2640 | medium is write-protected). |
2636 | Example: quirks=0419:aaf5:rl,0421:0433:rc | 2641 | Example: quirks=0419:aaf5:rl,0421:0433:rc |
2637 | 2642 | ||
2643 | user_debug= [KNL,ARM] | ||
2644 | Format: <int> | ||
2645 | See arch/arm/Kconfig.debug help text. | ||
2646 | 1 - undefined instruction events | ||
2647 | 2 - system calls | ||
2648 | 4 - invalid data aborts | ||
2649 | 8 - SIGSEGV faults | ||
2650 | 16 - SIGBUS faults | ||
2651 | Example: user_debug=31 | ||
2652 | |||
2638 | userpte= | 2653 | userpte= |
2639 | [X86] Flags controlling user PTE allocations. | 2654 | [X86] Flags controlling user PTE allocations. |
2640 | 2655 | ||
@@ -2680,6 +2695,27 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2680 | vmpoff= [KNL,S390] Perform z/VM CP command after power off. | 2695 | vmpoff= [KNL,S390] Perform z/VM CP command after power off. |
2681 | Format: <command> | 2696 | Format: <command> |
2682 | 2697 | ||
2698 | vsyscall= [X86-64] | ||
2699 | Controls the behavior of vsyscalls (i.e. calls to | ||
2700 | fixed addresses of 0xffffffffff600x00 from legacy | ||
2701 | code). Most statically-linked binaries and older | ||
2702 | versions of glibc use these calls. Because these | ||
2703 | functions are at fixed addresses, they make nice | ||
2704 | targets for exploits that can control RIP. | ||
2705 | |||
2706 | emulate [default] Vsyscalls turn into traps and are | ||
2707 | emulated reasonably safely. | ||
2708 | |||
2709 | native Vsyscalls are native syscall instructions. | ||
2710 | This is a little bit faster than trapping | ||
2711 | and makes a few dynamic recompilers work | ||
2712 | better than they would in emulation mode. | ||
2713 | It also makes exploits much easier to write. | ||
2714 | |||
2715 | none Vsyscalls don't work at all. This makes | ||
2716 | them quite hard to use for exploits but | ||
2717 | might break your system. | ||
2718 | |||
2683 | vt.cur_default= [VT] Default cursor shape. | 2719 | vt.cur_default= [VT] Default cursor shape. |
2684 | Format: 0xCCBBAA, where AA, BB, and CC are the same as | 2720 | Format: 0xCCBBAA, where AA, BB, and CC are the same as |
2685 | the parameters of the <Esc>[?A;B;Cc escape sequence; | 2721 | the parameters of the <Esc>[?A;B;Cc escape sequence; |
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index 4edd78dfb362..bbce1215434a 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX | |||
@@ -1,13 +1,21 @@ | |||
1 | 00-INDEX | 1 | 00-INDEX |
2 | - this file | 2 | - this file |
3 | 3c359.txt | ||
4 | - information on the 3Com TokenLink Velocity XL (3c5359) driver. | ||
3 | 3c505.txt | 5 | 3c505.txt |
4 | - information on the 3Com EtherLink Plus (3c505) driver. | 6 | - information on the 3Com EtherLink Plus (3c505) driver. |
7 | 3c509.txt | ||
8 | - information on the 3Com Etherlink III Series Ethernet cards. | ||
5 | 6pack.txt | 9 | 6pack.txt |
6 | - info on the 6pack protocol, an alternative to KISS for AX.25 | 10 | - info on the 6pack protocol, an alternative to KISS for AX.25 |
7 | DLINK.txt | 11 | DLINK.txt |
8 | - info on the D-Link DE-600/DE-620 parallel port pocket adapters | 12 | - info on the D-Link DE-600/DE-620 parallel port pocket adapters |
9 | PLIP.txt | 13 | PLIP.txt |
10 | - PLIP: The Parallel Line Internet Protocol device driver | 14 | - PLIP: The Parallel Line Internet Protocol device driver |
15 | README.ipw2100 | ||
16 | - README for the Intel PRO/Wireless 2100 driver. | ||
17 | README.ipw2200 | ||
18 | - README for the Intel PRO/Wireless 2915ABG and 2200BG driver. | ||
11 | README.sb1000 | 19 | README.sb1000 |
12 | - info on General Instrument/NextLevel SURFboard1000 cable modem. | 20 | - info on General Instrument/NextLevel SURFboard1000 cable modem. |
13 | alias.txt | 21 | alias.txt |
@@ -20,8 +28,12 @@ atm.txt | |||
20 | - info on where to get ATM programs and support for Linux. | 28 | - info on where to get ATM programs and support for Linux. |
21 | ax25.txt | 29 | ax25.txt |
22 | - info on using AX.25 and NET/ROM code for Linux | 30 | - info on using AX.25 and NET/ROM code for Linux |
31 | batman-adv.txt | ||
32 | - B.A.T.M.A.N routing protocol on top of layer 2 Ethernet Frames. | ||
23 | baycom.txt | 33 | baycom.txt |
24 | - info on the driver for Baycom style amateur radio modems | 34 | - info on the driver for Baycom style amateur radio modems |
35 | bonding.txt | ||
36 | - Linux Ethernet Bonding Driver HOWTO: link aggregation in Linux. | ||
25 | bridge.txt | 37 | bridge.txt |
26 | - where to get user space programs for ethernet bridging with Linux. | 38 | - where to get user space programs for ethernet bridging with Linux. |
27 | can.txt | 39 | can.txt |
@@ -34,32 +46,60 @@ cxacru.txt | |||
34 | - Conexant AccessRunner USB ADSL Modem | 46 | - Conexant AccessRunner USB ADSL Modem |
35 | cxacru-cf.py | 47 | cxacru-cf.py |
36 | - Conexant AccessRunner USB ADSL Modem configuration file parser | 48 | - Conexant AccessRunner USB ADSL Modem configuration file parser |
49 | cxgb.txt | ||
50 | - Release Notes for the Chelsio N210 Linux device driver. | ||
51 | dccp.txt | ||
52 | - the Datagram Congestion Control Protocol (DCCP) (RFC 4340..42). | ||
37 | de4x5.txt | 53 | de4x5.txt |
38 | - the Digital EtherWORKS DE4?? and DE5?? PCI Ethernet driver | 54 | - the Digital EtherWORKS DE4?? and DE5?? PCI Ethernet driver |
39 | decnet.txt | 55 | decnet.txt |
40 | - info on using the DECnet networking layer in Linux. | 56 | - info on using the DECnet networking layer in Linux. |
41 | depca.txt | 57 | depca.txt |
42 | - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver | 58 | - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver |
59 | dl2k.txt | ||
60 | - README for D-Link DL2000-based Gigabit Ethernet Adapters (dl2k.ko). | ||
61 | dm9000.txt | ||
62 | - README for the Simtec DM9000 Network driver. | ||
43 | dmfe.txt | 63 | dmfe.txt |
44 | - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver. | 64 | - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver. |
65 | dns_resolver.txt | ||
66 | - The DNS resolver module allows kernel servies to make DNS queries. | ||
67 | driver.txt | ||
68 | - Softnet driver issues. | ||
45 | e100.txt | 69 | e100.txt |
46 | - info on Intel's EtherExpress PRO/100 line of 10/100 boards | 70 | - info on Intel's EtherExpress PRO/100 line of 10/100 boards |
47 | e1000.txt | 71 | e1000.txt |
48 | - info on Intel's E1000 line of gigabit ethernet boards | 72 | - info on Intel's E1000 line of gigabit ethernet boards |
73 | e1000e.txt | ||
74 | - README for the Intel Gigabit Ethernet Driver (e1000e). | ||
49 | eql.txt | 75 | eql.txt |
50 | - serial IP load balancing | 76 | - serial IP load balancing |
51 | ewrk3.txt | 77 | ewrk3.txt |
52 | - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver | 78 | - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver |
79 | fib_trie.txt | ||
80 | - Level Compressed Trie (LC-trie) notes: a structure for routing. | ||
53 | filter.txt | 81 | filter.txt |
54 | - Linux Socket Filtering | 82 | - Linux Socket Filtering |
55 | fore200e.txt | 83 | fore200e.txt |
56 | - FORE Systems PCA-200E/SBA-200E ATM NIC driver info. | 84 | - FORE Systems PCA-200E/SBA-200E ATM NIC driver info. |
57 | framerelay.txt | 85 | framerelay.txt |
58 | - info on using Frame Relay/Data Link Connection Identifier (DLCI). | 86 | - info on using Frame Relay/Data Link Connection Identifier (DLCI). |
87 | gen_stats.txt | ||
88 | - Generic networking statistics for netlink users. | ||
89 | generic_hdlc.txt | ||
90 | - The generic High Level Data Link Control (HDLC) layer. | ||
59 | generic_netlink.txt | 91 | generic_netlink.txt |
60 | - info on Generic Netlink | 92 | - info on Generic Netlink |
93 | gianfar.txt | ||
94 | - Gianfar Ethernet Driver. | ||
61 | ieee802154.txt | 95 | ieee802154.txt |
62 | - Linux IEEE 802.15.4 implementation, API and drivers | 96 | - Linux IEEE 802.15.4 implementation, API and drivers |
97 | ifenslave.c | ||
98 | - Configure network interfaces for parallel routing (bonding). | ||
99 | igb.txt | ||
100 | - README for the Intel Gigabit Ethernet Driver (igb). | ||
101 | igbvf.txt | ||
102 | - README for the Intel Gigabit Ethernet Driver (igbvf). | ||
63 | ip-sysctl.txt | 103 | ip-sysctl.txt |
64 | - /proc/sys/net/ipv4/* variables | 104 | - /proc/sys/net/ipv4/* variables |
65 | ip_dynaddr.txt | 105 | ip_dynaddr.txt |
@@ -68,41 +108,117 @@ ipddp.txt | |||
68 | - AppleTalk-IP Decapsulation and AppleTalk-IP Encapsulation | 108 | - AppleTalk-IP Decapsulation and AppleTalk-IP Encapsulation |
69 | iphase.txt | 109 | iphase.txt |
70 | - Interphase PCI ATM (i)Chip IA Linux driver info. | 110 | - Interphase PCI ATM (i)Chip IA Linux driver info. |
111 | ipv6.txt | ||
112 | - Options to the ipv6 kernel module. | ||
113 | ipvs-sysctl.txt | ||
114 | - Per-inode explanation of the /proc/sys/net/ipv4/vs interface. | ||
71 | irda.txt | 115 | irda.txt |
72 | - where to get IrDA (infrared) utilities and info for Linux. | 116 | - where to get IrDA (infrared) utilities and info for Linux. |
117 | ixgb.txt | ||
118 | - README for the Intel 10 Gigabit Ethernet Driver (ixgb). | ||
119 | ixgbe.txt | ||
120 | - README for the Intel 10 Gigabit Ethernet Driver (ixgbe). | ||
121 | ixgbevf.txt | ||
122 | - README for the Intel Virtual Function (VF) Driver (ixgbevf). | ||
123 | l2tp.txt | ||
124 | - User guide to the L2TP tunnel protocol. | ||
73 | lapb-module.txt | 125 | lapb-module.txt |
74 | - programming information of the LAPB module. | 126 | - programming information of the LAPB module. |
75 | ltpc.txt | 127 | ltpc.txt |
76 | - the Apple or Farallon LocalTalk PC card driver | 128 | - the Apple or Farallon LocalTalk PC card driver |
129 | mac80211-injection.txt | ||
130 | - HOWTO use packet injection with mac80211 | ||
77 | multicast.txt | 131 | multicast.txt |
78 | - Behaviour of cards under Multicast | 132 | - Behaviour of cards under Multicast |
133 | multiqueue.txt | ||
134 | - HOWTO for multiqueue network device support. | ||
135 | netconsole.txt | ||
136 | - The network console module netconsole.ko: configuration and notes. | ||
137 | netdev-features.txt | ||
138 | - Network interface features API description. | ||
79 | netdevices.txt | 139 | netdevices.txt |
80 | - info on network device driver functions exported to the kernel. | 140 | - info on network device driver functions exported to the kernel. |
141 | netif-msg.txt | ||
142 | - Design of the network interface message level setting (NETIF_MSG_*). | ||
143 | nfc.txt | ||
144 | - The Linux Near Field Communication (NFS) subsystem. | ||
81 | olympic.txt | 145 | olympic.txt |
82 | - IBM PCI Pit/Pit-Phy/Olympic Token Ring driver info. | 146 | - IBM PCI Pit/Pit-Phy/Olympic Token Ring driver info. |
147 | operstates.txt | ||
148 | - Overview of network interface operational states. | ||
149 | packet_mmap.txt | ||
150 | - User guide to memory mapped packet socket rings (PACKET_[RT]X_RING). | ||
151 | phonet.txt | ||
152 | - The Phonet packet protocol used in Nokia cellular modems. | ||
153 | phy.txt | ||
154 | - The PHY abstraction layer. | ||
155 | pktgen.txt | ||
156 | - User guide to the kernel packet generator (pktgen.ko). | ||
83 | policy-routing.txt | 157 | policy-routing.txt |
84 | - IP policy-based routing | 158 | - IP policy-based routing |
159 | ppp_generic.txt | ||
160 | - Information about the generic PPP driver. | ||
161 | proc_net_tcp.txt | ||
162 | - Per inode overview of the /proc/net/tcp and /proc/net/tcp6 interfaces. | ||
163 | radiotap-headers.txt | ||
164 | - Background on radiotap headers. | ||
85 | ray_cs.txt | 165 | ray_cs.txt |
86 | - Raylink Wireless LAN card driver info. | 166 | - Raylink Wireless LAN card driver info. |
167 | rds.txt | ||
168 | - Background on the reliable, ordered datagram delivery method RDS. | ||
169 | regulatory.txt | ||
170 | - Overview of the Linux wireless regulatory infrastructure. | ||
171 | rxrpc.txt | ||
172 | - Guide to the RxRPC protocol. | ||
173 | s2io.txt | ||
174 | - Release notes for Neterion Xframe I/II 10GbE driver. | ||
175 | scaling.txt | ||
176 | - Explanation of network scaling techniques: RSS, RPS, RFS, aRFS, XPS. | ||
177 | sctp.txt | ||
178 | - Notes on the Linux kernel implementation of the SCTP protocol. | ||
179 | secid.txt | ||
180 | - Explanation of the secid member in flow structures. | ||
87 | skfp.txt | 181 | skfp.txt |
88 | - SysKonnect FDDI (SK-5xxx, Compaq Netelligent) driver info. | 182 | - SysKonnect FDDI (SK-5xxx, Compaq Netelligent) driver info. |
89 | smc9.txt | 183 | smc9.txt |
90 | - the driver for SMC's 9000 series of Ethernet cards | 184 | - the driver for SMC's 9000 series of Ethernet cards |
91 | smctr.txt | 185 | smctr.txt |
92 | - SMC TokenCard TokenRing Linux driver info. | 186 | - SMC TokenCard TokenRing Linux driver info. |
187 | spider-net.txt | ||
188 | - README for the Spidernet Driver (as found in PS3 / Cell BE). | ||
189 | stmmac.txt | ||
190 | - README for the STMicro Synopsys Ethernet driver. | ||
191 | tc-actions-env-rules.txt | ||
192 | - rules for traffic control (tc) actions. | ||
193 | timestamping.txt | ||
194 | - overview of network packet timestamping variants. | ||
93 | tcp.txt | 195 | tcp.txt |
94 | - short blurb on how TCP output takes place. | 196 | - short blurb on how TCP output takes place. |
197 | tcp-thin.txt | ||
198 | - kernel tuning options for low rate 'thin' TCP streams. | ||
95 | tlan.txt | 199 | tlan.txt |
96 | - ThunderLAN (Compaq Netelligent 10/100, Olicom OC-2xxx) driver info. | 200 | - ThunderLAN (Compaq Netelligent 10/100, Olicom OC-2xxx) driver info. |
97 | tms380tr.txt | 201 | tms380tr.txt |
98 | - SysKonnect Token Ring ISA/PCI adapter driver info. | 202 | - SysKonnect Token Ring ISA/PCI adapter driver info. |
203 | tproxy.txt | ||
204 | - Transparent proxy support user guide. | ||
99 | tuntap.txt | 205 | tuntap.txt |
100 | - TUN/TAP device driver, allowing user space Rx/Tx of packets. | 206 | - TUN/TAP device driver, allowing user space Rx/Tx of packets. |
207 | udplite.txt | ||
208 | - UDP-Lite protocol (RFC 3828) introduction. | ||
101 | vortex.txt | 209 | vortex.txt |
102 | - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards. | 210 | - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards. |
211 | vxge.txt | ||
212 | - README for the Neterion X3100 PCIe Server Adapter. | ||
103 | x25.txt | 213 | x25.txt |
104 | - general info on X.25 development. | 214 | - general info on X.25 development. |
105 | x25-iface.txt | 215 | x25-iface.txt |
106 | - description of the X.25 Packet Layer to LAPB device interface. | 216 | - description of the X.25 Packet Layer to LAPB device interface. |
217 | xfrm_proc.txt | ||
218 | - description of the statistics package for XFRM. | ||
219 | xfrm_sync.txt | ||
220 | - sync patches for XFRM enable migration of an SA between hosts. | ||
221 | xfrm_sysctl.txt | ||
222 | - description of the XFRM configuration options. | ||
107 | z8530drv.txt | 223 | z8530drv.txt |
108 | - info about Linux driver for Z8530 based HDLC cards for AX.25 | 224 | - info about Linux driver for Z8530 based HDLC cards for AX.25 |
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index 5dd960d75174..91df678fb7f8 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt | |||
@@ -238,6 +238,18 @@ ad_select | |||
238 | 238 | ||
239 | This option was added in bonding version 3.4.0. | 239 | This option was added in bonding version 3.4.0. |
240 | 240 | ||
241 | all_slaves_active | ||
242 | |||
243 | Specifies that duplicate frames (received on inactive ports) should be | ||
244 | dropped (0) or delivered (1). | ||
245 | |||
246 | Normally, bonding will drop duplicate frames (received on inactive | ||
247 | ports), which is desirable for most users. But there are some times | ||
248 | it is nice to allow duplicate frames to be delivered. | ||
249 | |||
250 | The default value is 0 (drop duplicate frames received on inactive | ||
251 | ports). | ||
252 | |||
241 | arp_interval | 253 | arp_interval |
242 | 254 | ||
243 | Specifies the ARP link monitoring frequency in milliseconds. | 255 | Specifies the ARP link monitoring frequency in milliseconds. |
@@ -433,6 +445,23 @@ miimon | |||
433 | determined. See the High Availability section for additional | 445 | determined. See the High Availability section for additional |
434 | information. The default value is 0. | 446 | information. The default value is 0. |
435 | 447 | ||
448 | min_links | ||
449 | |||
450 | Specifies the minimum number of links that must be active before | ||
451 | asserting carrier. It is similar to the Cisco EtherChannel min-links | ||
452 | feature. This allows setting the minimum number of member ports that | ||
453 | must be up (link-up state) before marking the bond device as up | ||
454 | (carrier on). This is useful for situations where higher level services | ||
455 | such as clustering want to ensure a minimum number of low bandwidth | ||
456 | links are active before switchover. This option only affect 802.3ad | ||
457 | mode. | ||
458 | |||
459 | The default value is 0. This will cause carrier to be asserted (for | ||
460 | 802.3ad mode) whenever there is an active aggregator, regardless of the | ||
461 | number of available links in that aggregator. Note that, because an | ||
462 | aggregator cannot be active without at least one available link, | ||
463 | setting this option to 0 or to 1 has the exact same effect. | ||
464 | |||
436 | mode | 465 | mode |
437 | 466 | ||
438 | Specifies one of the bonding policies. The default is | 467 | Specifies one of the bonding policies. The default is |
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index db2a4067013c..81546990f41c 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
@@ -992,7 +992,7 @@ bindv6only - BOOLEAN | |||
992 | TRUE: disable IPv4-mapped address feature | 992 | TRUE: disable IPv4-mapped address feature |
993 | FALSE: enable IPv4-mapped address feature | 993 | FALSE: enable IPv4-mapped address feature |
994 | 994 | ||
995 | Default: FALSE (as specified in RFC2553bis) | 995 | Default: FALSE (as specified in RFC3493) |
996 | 996 | ||
997 | IPv6 Fragmentation: | 997 | IPv6 Fragmentation: |
998 | 998 | ||
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt new file mode 100644 index 000000000000..58fd7414e6c0 --- /dev/null +++ b/Documentation/networking/scaling.txt | |||
@@ -0,0 +1,378 @@ | |||
1 | Scaling in the Linux Networking Stack | ||
2 | |||
3 | |||
4 | Introduction | ||
5 | ============ | ||
6 | |||
7 | This document describes a set of complementary techniques in the Linux | ||
8 | networking stack to increase parallelism and improve performance for | ||
9 | multi-processor systems. | ||
10 | |||
11 | The following technologies are described: | ||
12 | |||
13 | RSS: Receive Side Scaling | ||
14 | RPS: Receive Packet Steering | ||
15 | RFS: Receive Flow Steering | ||
16 | Accelerated Receive Flow Steering | ||
17 | XPS: Transmit Packet Steering | ||
18 | |||
19 | |||
20 | RSS: Receive Side Scaling | ||
21 | ========================= | ||
22 | |||
23 | Contemporary NICs support multiple receive and transmit descriptor queues | ||
24 | (multi-queue). On reception, a NIC can send different packets to different | ||
25 | queues to distribute processing among CPUs. The NIC distributes packets by | ||
26 | applying a filter to each packet that assigns it to one of a small number | ||
27 | of logical flows. Packets for each flow are steered to a separate receive | ||
28 | queue, which in turn can be processed by separate CPUs. This mechanism is | ||
29 | generally known as “Receive-side Scaling” (RSS). The goal of RSS and | ||
30 | the other scaling techniques to increase performance uniformly. | ||
31 | Multi-queue distribution can also be used for traffic prioritization, but | ||
32 | that is not the focus of these techniques. | ||
33 | |||
34 | The filter used in RSS is typically a hash function over the network | ||
35 | and/or transport layer headers-- for example, a 4-tuple hash over | ||
36 | IP addresses and TCP ports of a packet. The most common hardware | ||
37 | implementation of RSS uses a 128-entry indirection table where each entry | ||
38 | stores a queue number. The receive queue for a packet is determined | ||
39 | by masking out the low order seven bits of the computed hash for the | ||
40 | packet (usually a Toeplitz hash), taking this number as a key into the | ||
41 | indirection table and reading the corresponding value. | ||
42 | |||
43 | Some advanced NICs allow steering packets to queues based on | ||
44 | programmable filters. For example, webserver bound TCP port 80 packets | ||
45 | can be directed to their own receive queue. Such “n-tuple” filters can | ||
46 | be configured from ethtool (--config-ntuple). | ||
47 | |||
48 | ==== RSS Configuration | ||
49 | |||
50 | The driver for a multi-queue capable NIC typically provides a kernel | ||
51 | module parameter for specifying the number of hardware queues to | ||
52 | configure. In the bnx2x driver, for instance, this parameter is called | ||
53 | num_queues. A typical RSS configuration would be to have one receive queue | ||
54 | for each CPU if the device supports enough queues, or otherwise at least | ||
55 | one for each memory domain, where a memory domain is a set of CPUs that | ||
56 | share a particular memory level (L1, L2, NUMA node, etc.). | ||
57 | |||
58 | The indirection table of an RSS device, which resolves a queue by masked | ||
59 | hash, is usually programmed by the driver at initialization. The | ||
60 | default mapping is to distribute the queues evenly in the table, but the | ||
61 | indirection table can be retrieved and modified at runtime using ethtool | ||
62 | commands (--show-rxfh-indir and --set-rxfh-indir). Modifying the | ||
63 | indirection table could be done to give different queues different | ||
64 | relative weights. | ||
65 | |||
66 | == RSS IRQ Configuration | ||
67 | |||
68 | Each receive queue has a separate IRQ associated with it. The NIC triggers | ||
69 | this to notify a CPU when new packets arrive on the given queue. The | ||
70 | signaling path for PCIe devices uses message signaled interrupts (MSI-X), | ||
71 | that can route each interrupt to a particular CPU. The active mapping | ||
72 | of queues to IRQs can be determined from /proc/interrupts. By default, | ||
73 | an IRQ may be handled on any CPU. Because a non-negligible part of packet | ||
74 | processing takes place in receive interrupt handling, it is advantageous | ||
75 | to spread receive interrupts between CPUs. To manually adjust the IRQ | ||
76 | affinity of each interrupt see Documentation/IRQ-affinity. Some systems | ||
77 | will be running irqbalance, a daemon that dynamically optimizes IRQ | ||
78 | assignments and as a result may override any manual settings. | ||
79 | |||
80 | == Suggested Configuration | ||
81 | |||
82 | RSS should be enabled when latency is a concern or whenever receive | ||
83 | interrupt processing forms a bottleneck. Spreading load between CPUs | ||
84 | decreases queue length. For low latency networking, the optimal setting | ||
85 | is to allocate as many queues as there are CPUs in the system (or the | ||
86 | NIC maximum, if lower). The most efficient high-rate configuration | ||
87 | is likely the one with the smallest number of receive queues where no | ||
88 | receive queue overflows due to a saturated CPU, because in default | ||
89 | mode with interrupt coalescing enabled, the aggregate number of | ||
90 | interrupts (and thus work) grows with each additional queue. | ||
91 | |||
92 | Per-cpu load can be observed using the mpstat utility, but note that on | ||
93 | processors with hyperthreading (HT), each hyperthread is represented as | ||
94 | a separate CPU. For interrupt handling, HT has shown no benefit in | ||
95 | initial tests, so limit the number of queues to the number of CPU cores | ||
96 | in the system. | ||
97 | |||
98 | |||
99 | RPS: Receive Packet Steering | ||
100 | ============================ | ||
101 | |||
102 | Receive Packet Steering (RPS) is logically a software implementation of | ||
103 | RSS. Being in software, it is necessarily called later in the datapath. | ||
104 | Whereas RSS selects the queue and hence CPU that will run the hardware | ||
105 | interrupt handler, RPS selects the CPU to perform protocol processing | ||
106 | above the interrupt handler. This is accomplished by placing the packet | ||
107 | on the desired CPU’s backlog queue and waking up the CPU for processing. | ||
108 | RPS has some advantages over RSS: 1) it can be used with any NIC, | ||
109 | 2) software filters can easily be added to hash over new protocols, | ||
110 | 3) it does not increase hardware device interrupt rate (although it does | ||
111 | introduce inter-processor interrupts (IPIs)). | ||
112 | |||
113 | RPS is called during bottom half of the receive interrupt handler, when | ||
114 | a driver sends a packet up the network stack with netif_rx() or | ||
115 | netif_receive_skb(). These call the get_rps_cpu() function, which | ||
116 | selects the queue that should process a packet. | ||
117 | |||
118 | The first step in determining the target CPU for RPS is to calculate a | ||
119 | flow hash over the packet’s addresses or ports (2-tuple or 4-tuple hash | ||
120 | depending on the protocol). This serves as a consistent hash of the | ||
121 | associated flow of the packet. The hash is either provided by hardware | ||
122 | or will be computed in the stack. Capable hardware can pass the hash in | ||
123 | the receive descriptor for the packet; this would usually be the same | ||
124 | hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in | ||
125 | skb->rx_hash and can be used elsewhere in the stack as a hash of the | ||
126 | packet’s flow. | ||
127 | |||
128 | Each receive hardware queue has an associated list of CPUs to which | ||
129 | RPS may enqueue packets for processing. For each received packet, | ||
130 | an index into the list is computed from the flow hash modulo the size | ||
131 | of the list. The indexed CPU is the target for processing the packet, | ||
132 | and the packet is queued to the tail of that CPU’s backlog queue. At | ||
133 | the end of the bottom half routine, IPIs are sent to any CPUs for which | ||
134 | packets have been queued to their backlog queue. The IPI wakes backlog | ||
135 | processing on the remote CPU, and any queued packets are then processed | ||
136 | up the networking stack. | ||
137 | |||
138 | ==== RPS Configuration | ||
139 | |||
140 | RPS requires a kernel compiled with the CONFIG_RPS kconfig symbol (on | ||
141 | by default for SMP). Even when compiled in, RPS remains disabled until | ||
142 | explicitly configured. The list of CPUs to which RPS may forward traffic | ||
143 | can be configured for each receive queue using a sysfs file entry: | ||
144 | |||
145 | /sys/class/net/<dev>/queues/rx-<n>/rps_cpus | ||
146 | |||
147 | This file implements a bitmap of CPUs. RPS is disabled when it is zero | ||
148 | (the default), in which case packets are processed on the interrupting | ||
149 | CPU. Documentation/IRQ-affinity.txt explains how CPUs are assigned to | ||
150 | the bitmap. | ||
151 | |||
152 | == Suggested Configuration | ||
153 | |||
154 | For a single queue device, a typical RPS configuration would be to set | ||
155 | the rps_cpus to the CPUs in the same memory domain of the interrupting | ||
156 | CPU. If NUMA locality is not an issue, this could also be all CPUs in | ||
157 | the system. At high interrupt rate, it might be wise to exclude the | ||
158 | interrupting CPU from the map since that already performs much work. | ||
159 | |||
160 | For a multi-queue system, if RSS is configured so that a hardware | ||
161 | receive queue is mapped to each CPU, then RPS is probably redundant | ||
162 | and unnecessary. If there are fewer hardware queues than CPUs, then | ||
163 | RPS might be beneficial if the rps_cpus for each queue are the ones that | ||
164 | share the same memory domain as the interrupting CPU for that queue. | ||
165 | |||
166 | |||
167 | RFS: Receive Flow Steering | ||
168 | ========================== | ||
169 | |||
170 | While RPS steers packets solely based on hash, and thus generally | ||
171 | provides good load distribution, it does not take into account | ||
172 | application locality. This is accomplished by Receive Flow Steering | ||
173 | (RFS). The goal of RFS is to increase datacache hitrate by steering | ||
174 | kernel processing of packets to the CPU where the application thread | ||
175 | consuming the packet is running. RFS relies on the same RPS mechanisms | ||
176 | to enqueue packets onto the backlog of another CPU and to wake up that | ||
177 | CPU. | ||
178 | |||
179 | In RFS, packets are not forwarded directly by the value of their hash, | ||
180 | but the hash is used as index into a flow lookup table. This table maps | ||
181 | flows to the CPUs where those flows are being processed. The flow hash | ||
182 | (see RPS section above) is used to calculate the index into this table. | ||
183 | The CPU recorded in each entry is the one which last processed the flow. | ||
184 | If an entry does not hold a valid CPU, then packets mapped to that entry | ||
185 | are steered using plain RPS. Multiple table entries may point to the | ||
186 | same CPU. Indeed, with many flows and few CPUs, it is very likely that | ||
187 | a single application thread handles flows with many different flow hashes. | ||
188 | |||
189 | rps_sock_table is a global flow table that contains the *desired* CPU for | ||
190 | flows: the CPU that is currently processing the flow in userspace. Each | ||
191 | table value is a CPU index that is updated during calls to recvmsg and | ||
192 | sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage() | ||
193 | and tcp_splice_read()). | ||
194 | |||
195 | When the scheduler moves a thread to a new CPU while it has outstanding | ||
196 | receive packets on the old CPU, packets may arrive out of order. To | ||
197 | avoid this, RFS uses a second flow table to track outstanding packets | ||
198 | for each flow: rps_dev_flow_table is a table specific to each hardware | ||
199 | receive queue of each device. Each table value stores a CPU index and a | ||
200 | counter. The CPU index represents the *current* CPU onto which packets | ||
201 | for this flow are enqueued for further kernel processing. Ideally, kernel | ||
202 | and userspace processing occur on the same CPU, and hence the CPU index | ||
203 | in both tables is identical. This is likely false if the scheduler has | ||
204 | recently migrated a userspace thread while the kernel still has packets | ||
205 | enqueued for kernel processing on the old CPU. | ||
206 | |||
207 | The counter in rps_dev_flow_table values records the length of the current | ||
208 | CPU's backlog when a packet in this flow was last enqueued. Each backlog | ||
209 | queue has a head counter that is incremented on dequeue. A tail counter | ||
210 | is computed as head counter + queue length. In other words, the counter | ||
211 | in rps_dev_flow_table[i] records the last element in flow i that has | ||
212 | been enqueued onto the currently designated CPU for flow i (of course, | ||
213 | entry i is actually selected by hash and multiple flows may hash to the | ||
214 | same entry i). | ||
215 | |||
216 | And now the trick for avoiding out of order packets: when selecting the | ||
217 | CPU for packet processing (from get_rps_cpu()) the rps_sock_flow table | ||
218 | and the rps_dev_flow table of the queue that the packet was received on | ||
219 | are compared. If the desired CPU for the flow (found in the | ||
220 | rps_sock_flow table) matches the current CPU (found in the rps_dev_flow | ||
221 | table), the packet is enqueued onto that CPU’s backlog. If they differ, | ||
222 | the current CPU is updated to match the desired CPU if one of the | ||
223 | following is true: | ||
224 | |||
225 | - The current CPU's queue head counter >= the recorded tail counter | ||
226 | value in rps_dev_flow[i] | ||
227 | - The current CPU is unset (equal to NR_CPUS) | ||
228 | - The current CPU is offline | ||
229 | |||
230 | After this check, the packet is sent to the (possibly updated) current | ||
231 | CPU. These rules aim to ensure that a flow only moves to a new CPU when | ||
232 | there are no packets outstanding on the old CPU, as the outstanding | ||
233 | packets could arrive later than those about to be processed on the new | ||
234 | CPU. | ||
235 | |||
236 | ==== RFS Configuration | ||
237 | |||
238 | RFS is only available if the kconfig symbol CONFIG_RFS is enabled (on | ||
239 | by default for SMP). The functionality remains disabled until explicitly | ||
240 | configured. The number of entries in the global flow table is set through: | ||
241 | |||
242 | /proc/sys/net/core/rps_sock_flow_entries | ||
243 | |||
244 | The number of entries in the per-queue flow table are set through: | ||
245 | |||
246 | /sys/class/net/<dev>/queues/tx-<n>/rps_flow_cnt | ||
247 | |||
248 | == Suggested Configuration | ||
249 | |||
250 | Both of these need to be set before RFS is enabled for a receive queue. | ||
251 | Values for both are rounded up to the nearest power of two. The | ||
252 | suggested flow count depends on the expected number of active connections | ||
253 | at any given time, which may be significantly less than the number of open | ||
254 | connections. We have found that a value of 32768 for rps_sock_flow_entries | ||
255 | works fairly well on a moderately loaded server. | ||
256 | |||
257 | For a single queue device, the rps_flow_cnt value for the single queue | ||
258 | would normally be configured to the same value as rps_sock_flow_entries. | ||
259 | For a multi-queue device, the rps_flow_cnt for each queue might be | ||
260 | configured as rps_sock_flow_entries / N, where N is the number of | ||
261 | queues. So for instance, if rps_flow_entries is set to 32768 and there | ||
262 | are 16 configured receive queues, rps_flow_cnt for each queue might be | ||
263 | configured as 2048. | ||
264 | |||
265 | |||
266 | Accelerated RFS | ||
267 | =============== | ||
268 | |||
269 | Accelerated RFS is to RFS what RSS is to RPS: a hardware-accelerated load | ||
270 | balancing mechanism that uses soft state to steer flows based on where | ||
271 | the application thread consuming the packets of each flow is running. | ||
272 | Accelerated RFS should perform better than RFS since packets are sent | ||
273 | directly to a CPU local to the thread consuming the data. The target CPU | ||
274 | will either be the same CPU where the application runs, or at least a CPU | ||
275 | which is local to the application thread’s CPU in the cache hierarchy. | ||
276 | |||
277 | To enable accelerated RFS, the networking stack calls the | ||
278 | ndo_rx_flow_steer driver function to communicate the desired hardware | ||
279 | queue for packets matching a particular flow. The network stack | ||
280 | automatically calls this function every time a flow entry in | ||
281 | rps_dev_flow_table is updated. The driver in turn uses a device specific | ||
282 | method to program the NIC to steer the packets. | ||
283 | |||
284 | The hardware queue for a flow is derived from the CPU recorded in | ||
285 | rps_dev_flow_table. The stack consults a CPU to hardware queue map which | ||
286 | is maintained by the NIC driver. This is an auto-generated reverse map of | ||
287 | the IRQ affinity table shown by /proc/interrupts. Drivers can use | ||
288 | functions in the cpu_rmap (“CPU affinity reverse map”) kernel library | ||
289 | to populate the map. For each CPU, the corresponding queue in the map is | ||
290 | set to be one whose processing CPU is closest in cache locality. | ||
291 | |||
292 | ==== Accelerated RFS Configuration | ||
293 | |||
294 | Accelerated RFS is only available if the kernel is compiled with | ||
295 | CONFIG_RFS_ACCEL and support is provided by the NIC device and driver. | ||
296 | It also requires that ntuple filtering is enabled via ethtool. The map | ||
297 | of CPU to queues is automatically deduced from the IRQ affinities | ||
298 | configured for each receive queue by the driver, so no additional | ||
299 | configuration should be necessary. | ||
300 | |||
301 | == Suggested Configuration | ||
302 | |||
303 | This technique should be enabled whenever one wants to use RFS and the | ||
304 | NIC supports hardware acceleration. | ||
305 | |||
306 | XPS: Transmit Packet Steering | ||
307 | ============================= | ||
308 | |||
309 | Transmit Packet Steering is a mechanism for intelligently selecting | ||
310 | which transmit queue to use when transmitting a packet on a multi-queue | ||
311 | device. To accomplish this, a mapping from CPU to hardware queue(s) is | ||
312 | recorded. The goal of this mapping is usually to assign queues | ||
313 | exclusively to a subset of CPUs, where the transmit completions for | ||
314 | these queues are processed on a CPU within this set. This choice | ||
315 | provides two benefits. First, contention on the device queue lock is | ||
316 | significantly reduced since fewer CPUs contend for the same queue | ||
317 | (contention can be eliminated completely if each CPU has its own | ||
318 | transmit queue). Secondly, cache miss rate on transmit completion is | ||
319 | reduced, in particular for data cache lines that hold the sk_buff | ||
320 | structures. | ||
321 | |||
322 | XPS is configured per transmit queue by setting a bitmap of CPUs that | ||
323 | may use that queue to transmit. The reverse mapping, from CPUs to | ||
324 | transmit queues, is computed and maintained for each network device. | ||
325 | When transmitting the first packet in a flow, the function | ||
326 | get_xps_queue() is called to select a queue. This function uses the ID | ||
327 | of the running CPU as a key into the CPU-to-queue lookup table. If the | ||
328 | ID matches a single queue, that is used for transmission. If multiple | ||
329 | queues match, one is selected by using the flow hash to compute an index | ||
330 | into the set. | ||
331 | |||
332 | The queue chosen for transmitting a particular flow is saved in the | ||
333 | corresponding socket structure for the flow (e.g. a TCP connection). | ||
334 | This transmit queue is used for subsequent packets sent on the flow to | ||
335 | prevent out of order (ooo) packets. The choice also amortizes the cost | ||
336 | of calling get_xps_queues() over all packets in the flow. To avoid | ||
337 | ooo packets, the queue for a flow can subsequently only be changed if | ||
338 | skb->ooo_okay is set for a packet in the flow. This flag indicates that | ||
339 | there are no outstanding packets in the flow, so the transmit queue can | ||
340 | change without the risk of generating out of order packets. The | ||
341 | transport layer is responsible for setting ooo_okay appropriately. TCP, | ||
342 | for instance, sets the flag when all data for a connection has been | ||
343 | acknowledged. | ||
344 | |||
345 | ==== XPS Configuration | ||
346 | |||
347 | XPS is only available if the kconfig symbol CONFIG_XPS is enabled (on by | ||
348 | default for SMP). The functionality remains disabled until explicitly | ||
349 | configured. To enable XPS, the bitmap of CPUs that may use a transmit | ||
350 | queue is configured using the sysfs file entry: | ||
351 | |||
352 | /sys/class/net/<dev>/queues/tx-<n>/xps_cpus | ||
353 | |||
354 | == Suggested Configuration | ||
355 | |||
356 | For a network device with a single transmission queue, XPS configuration | ||
357 | has no effect, since there is no choice in this case. In a multi-queue | ||
358 | system, XPS is preferably configured so that each CPU maps onto one queue. | ||
359 | If there are as many queues as there are CPUs in the system, then each | ||
360 | queue can also map onto one CPU, resulting in exclusive pairings that | ||
361 | experience no contention. If there are fewer queues than CPUs, then the | ||
362 | best CPUs to share a given queue are probably those that share the cache | ||
363 | with the CPU that processes transmit completions for that queue | ||
364 | (transmit interrupts). | ||
365 | |||
366 | |||
367 | Further Information | ||
368 | =================== | ||
369 | RPS and RFS were introduced in kernel 2.6.35. XPS was incorporated into | ||
370 | 2.6.38. Original patches were submitted by Tom Herbert | ||
371 | (therbert@google.com) | ||
372 | |||
373 | Accelerated RFS was introduced in 2.6.35. Original patches were | ||
374 | submitted by Ben Hutchings (bhutchings@solarflare.com) | ||
375 | |||
376 | Authors: | ||
377 | Tom Herbert (therbert@google.com) | ||
378 | Willem de Bruijn (willemb@google.com) | ||
diff --git a/Documentation/ramoops.txt b/Documentation/ramoops.txt new file mode 100644 index 000000000000..8fb1ba7fe7bf --- /dev/null +++ b/Documentation/ramoops.txt | |||
@@ -0,0 +1,76 @@ | |||
1 | Ramoops oops/panic logger | ||
2 | ========================= | ||
3 | |||
4 | Sergiu Iordache <sergiu@chromium.org> | ||
5 | |||
6 | Updated: 8 August 2011 | ||
7 | |||
8 | 0. Introduction | ||
9 | |||
10 | Ramoops is an oops/panic logger that writes its logs to RAM before the system | ||
11 | crashes. It works by logging oopses and panics in a circular buffer. Ramoops | ||
12 | needs a system with persistent RAM so that the content of that area can | ||
13 | survive after a restart. | ||
14 | |||
15 | 1. Ramoops concepts | ||
16 | |||
17 | Ramoops uses a predefined memory area to store the dump. The start and size of | ||
18 | the memory area are set using two variables: | ||
19 | * "mem_address" for the start | ||
20 | * "mem_size" for the size. The memory size will be rounded down to a | ||
21 | power of two. | ||
22 | |||
23 | The memory area is divided into "record_size" chunks (also rounded down to | ||
24 | power of two) and each oops/panic writes a "record_size" chunk of | ||
25 | information. | ||
26 | |||
27 | Dumping both oopses and panics can be done by setting 1 in the "dump_oops" | ||
28 | variable while setting 0 in that variable dumps only the panics. | ||
29 | |||
30 | The module uses a counter to record multiple dumps but the counter gets reset | ||
31 | on restart (i.e. new dumps after the restart will overwrite old ones). | ||
32 | |||
33 | 2. Setting the parameters | ||
34 | |||
35 | Setting the ramoops parameters can be done in 2 different manners: | ||
36 | 1. Use the module parameters (which have the names of the variables described | ||
37 | as before). | ||
38 | 2. Use a platform device and set the platform data. The parameters can then | ||
39 | be set through that platform data. An example of doing that is: | ||
40 | |||
41 | #include <linux/ramoops.h> | ||
42 | [...] | ||
43 | |||
44 | static struct ramoops_platform_data ramoops_data = { | ||
45 | .mem_size = <...>, | ||
46 | .mem_address = <...>, | ||
47 | .record_size = <...>, | ||
48 | .dump_oops = <...>, | ||
49 | }; | ||
50 | |||
51 | static struct platform_device ramoops_dev = { | ||
52 | .name = "ramoops", | ||
53 | .dev = { | ||
54 | .platform_data = &ramoops_data, | ||
55 | }, | ||
56 | }; | ||
57 | |||
58 | [... inside a function ...] | ||
59 | int ret; | ||
60 | |||
61 | ret = platform_device_register(&ramoops_dev); | ||
62 | if (ret) { | ||
63 | printk(KERN_ERR "unable to register platform device\n"); | ||
64 | return ret; | ||
65 | } | ||
66 | |||
67 | 3. Dump format | ||
68 | |||
69 | The data dump begins with a header, currently defined as "====" followed by a | ||
70 | timestamp and a new line. The dump then continues with the actual data. | ||
71 | |||
72 | 4. Reading the data | ||
73 | |||
74 | The dump data can be read from memory (through /dev/mem or other means). | ||
75 | Getting the module parameters, which are needed in order to parse the data, can | ||
76 | be done through /sys/module/ramoops/parameters/* . | ||
diff --git a/Documentation/virtual/00-INDEX b/Documentation/virtual/00-INDEX index fe0251c4cfb7..8e601991d91c 100644 --- a/Documentation/virtual/00-INDEX +++ b/Documentation/virtual/00-INDEX | |||
@@ -8,3 +8,6 @@ lguest/ | |||
8 | - Extremely simple hypervisor for experimental/educational use. | 8 | - Extremely simple hypervisor for experimental/educational use. |
9 | uml/ | 9 | uml/ |
10 | - User Mode Linux, builds/runs Linux kernel as a userspace program. | 10 | - User Mode Linux, builds/runs Linux kernel as a userspace program. |
11 | virtio.txt | ||
12 | - Text version of draft virtio spec. | ||
13 | See http://ozlabs.org/~rusty/virtio-spec | ||
diff --git a/Documentation/virtual/lguest/lguest.c b/Documentation/virtual/lguest/lguest.c index 043bd7df3139..d928c134dee6 100644 --- a/Documentation/virtual/lguest/lguest.c +++ b/Documentation/virtual/lguest/lguest.c | |||
@@ -1996,6 +1996,9 @@ int main(int argc, char *argv[]) | |||
1996 | /* We use a simple helper to copy the arguments separated by spaces. */ | 1996 | /* We use a simple helper to copy the arguments separated by spaces. */ |
1997 | concat((char *)(boot + 1), argv+optind+2); | 1997 | concat((char *)(boot + 1), argv+optind+2); |
1998 | 1998 | ||
1999 | /* Set kernel alignment to 16M (CONFIG_PHYSICAL_ALIGN) */ | ||
2000 | boot->hdr.kernel_alignment = 0x1000000; | ||
2001 | |||
1999 | /* Boot protocol version: 2.07 supports the fields for lguest. */ | 2002 | /* Boot protocol version: 2.07 supports the fields for lguest. */ |
2000 | boot->hdr.version = 0x207; | 2003 | boot->hdr.version = 0x207; |
2001 | 2004 | ||
diff --git a/Documentation/virtual/virtio-spec.txt b/Documentation/virtual/virtio-spec.txt new file mode 100644 index 000000000000..a350ae135b8c --- /dev/null +++ b/Documentation/virtual/virtio-spec.txt | |||
@@ -0,0 +1,2200 @@ | |||
1 | [Generated file: see http://ozlabs.org/~rusty/virtio-spec/] | ||
2 | Virtio PCI Card Specification | ||
3 | v0.9.1 DRAFT | ||
4 | - | ||
5 | |||
6 | Rusty Russell <rusty@rustcorp.com.au>IBM Corporation (Editor) | ||
7 | |||
8 | 2011 August 1. | ||
9 | |||
10 | Purpose and Description | ||
11 | |||
12 | This document describes the specifications of the “virtio” family | ||
13 | of PCI[LaTeX Command: nomenclature] devices. These are devices | ||
14 | are found in virtual environments[LaTeX Command: nomenclature], | ||
15 | yet by design they are not all that different from physical PCI | ||
16 | devices, and this document treats them as such. This allows the | ||
17 | guest to use standard PCI drivers and discovery mechanisms. | ||
18 | |||
19 | The purpose of virtio and this specification is that virtual | ||
20 | environments and guests should have a straightforward, efficient, | ||
21 | standard and extensible mechanism for virtual devices, rather | ||
22 | than boutique per-environment or per-OS mechanisms. | ||
23 | |||
24 | Straightforward: Virtio PCI devices use normal PCI mechanisms | ||
25 | of interrupts and DMA which should be familiar to any device | ||
26 | driver author. There is no exotic page-flipping or COW | ||
27 | mechanism: it's just a PCI device.[footnote: | ||
28 | This lack of page-sharing implies that the implementation of the | ||
29 | device (e.g. the hypervisor or host) needs full access to the | ||
30 | guest memory. Communication with untrusted parties (i.e. | ||
31 | inter-guest communication) requires copying. | ||
32 | ] | ||
33 | |||
34 | Efficient: Virtio PCI devices consist of rings of descriptors | ||
35 | for input and output, which are neatly separated to avoid cache | ||
36 | effects from both guest and device writing to the same cache | ||
37 | lines. | ||
38 | |||
39 | Standard: Virtio PCI makes no assumptions about the environment | ||
40 | in which it operates, beyond supporting PCI. In fact the virtio | ||
41 | devices specified in the appendices do not require PCI at all: | ||
42 | they have been implemented on non-PCI buses.[footnote: | ||
43 | The Linux implementation further separates the PCI virtio code | ||
44 | from the specific virtio drivers: these drivers are shared with | ||
45 | the non-PCI implementations (currently lguest and S/390). | ||
46 | ] | ||
47 | |||
48 | Extensible: Virtio PCI devices contain feature bits which are | ||
49 | acknowledged by the guest operating system during device setup. | ||
50 | This allows forwards and backwards compatibility: the device | ||
51 | offers all the features it knows about, and the driver | ||
52 | acknowledges those it understands and wishes to use. | ||
53 | |||
54 | Virtqueues | ||
55 | |||
56 | The mechanism for bulk data transport on virtio PCI devices is | ||
57 | pretentiously called a virtqueue. Each device can have zero or | ||
58 | more virtqueues: for example, the network device has one for | ||
59 | transmit and one for receive. | ||
60 | |||
61 | Each virtqueue occupies two or more physically-contiguous pages | ||
62 | (defined, for the purposes of this specification, as 4096 bytes), | ||
63 | and consists of three parts: | ||
64 | |||
65 | |||
66 | +-------------------+-----------------------------------+-----------+ | ||
67 | | Descriptor Table | Available Ring (padding) | Used Ring | | ||
68 | +-------------------+-----------------------------------+-----------+ | ||
69 | |||
70 | |||
71 | When the driver wants to send buffers to the device, it puts them | ||
72 | in one or more slots in the descriptor table, and writes the | ||
73 | descriptor indices into the available ring. It then notifies the | ||
74 | device. When the device has finished with the buffers, it writes | ||
75 | the descriptors into the used ring, and sends an interrupt. | ||
76 | |||
77 | Specification | ||
78 | |||
79 | PCI Discovery | ||
80 | |||
81 | Any PCI device with Vendor ID 0x1AF4, and Device ID 0x1000 | ||
82 | through 0x103F inclusive is a virtio device[footnote: | ||
83 | The actual value within this range is ignored | ||
84 | ]. The device must also have a Revision ID of 0 to match this | ||
85 | specification. | ||
86 | |||
87 | The Subsystem Device ID indicates which virtio device is | ||
88 | supported by the device. The Subsystem Vendor ID should reflect | ||
89 | the PCI Vendor ID of the environment (it's currently only used | ||
90 | for informational purposes by the guest). | ||
91 | |||
92 | |||
93 | +----------------------+--------------------+---------------+ | ||
94 | | Subsystem Device ID | Virtio Device | Specification | | ||
95 | +----------------------+--------------------+---------------+ | ||
96 | +----------------------+--------------------+---------------+ | ||
97 | | 1 | network card | Appendix C | | ||
98 | +----------------------+--------------------+---------------+ | ||
99 | | 2 | block device | Appendix D | | ||
100 | +----------------------+--------------------+---------------+ | ||
101 | | 3 | console | Appendix E | | ||
102 | +----------------------+--------------------+---------------+ | ||
103 | | 4 | entropy source | Appendix F | | ||
104 | +----------------------+--------------------+---------------+ | ||
105 | | 5 | memory ballooning | Appendix G | | ||
106 | +----------------------+--------------------+---------------+ | ||
107 | | 6 | ioMemory | - | | ||
108 | +----------------------+--------------------+---------------+ | ||
109 | | 9 | 9P transport | - | | ||
110 | +----------------------+--------------------+---------------+ | ||
111 | |||
112 | |||
113 | Device Configuration | ||
114 | |||
115 | To configure the device, we use the first I/O region of the PCI | ||
116 | device. This contains a virtio header followed by a | ||
117 | device-specific region. | ||
118 | |||
119 | There may be different widths of accesses to the I/O region; the “ | ||
120 | natural” access method for each field in the virtio header must | ||
121 | be used (i.e. 32-bit accesses for 32-bit fields, etc), but the | ||
122 | device-specific region can be accessed using any width accesses, | ||
123 | and should obtain the same results. | ||
124 | |||
125 | Note that this is possible because while the virtio header is PCI | ||
126 | (i.e. little) endian, the device-specific region is encoded in | ||
127 | the native endian of the guest (where such distinction is | ||
128 | applicable). | ||
129 | |||
130 | Device Initialization Sequence | ||
131 | |||
132 | We start with an overview of device initialization, then expand | ||
133 | on the details of the device and how each step is preformed. | ||
134 | |||
135 | Reset the device. This is not required on initial start up. | ||
136 | |||
137 | The ACKNOWLEDGE status bit is set: we have noticed the device. | ||
138 | |||
139 | The DRIVER status bit is set: we know how to drive the device. | ||
140 | |||
141 | Device-specific setup, including reading the Device Feature | ||
142 | Bits, discovery of virtqueues for the device, optional MSI-X | ||
143 | setup, and reading and possibly writing the virtio | ||
144 | configuration space. | ||
145 | |||
146 | The subset of Device Feature Bits understood by the driver is | ||
147 | written to the device. | ||
148 | |||
149 | The DRIVER_OK status bit is set. | ||
150 | |||
151 | The device can now be used (ie. buffers added to the | ||
152 | virtqueues)[footnote: | ||
153 | Historically, drivers have used the device before steps 5 and 6. | ||
154 | This is only allowed if the driver does not use any features | ||
155 | which would alter this early use of the device. | ||
156 | ] | ||
157 | |||
158 | If any of these steps go irrecoverably wrong, the guest should | ||
159 | set the FAILED status bit to indicate that it has given up on the | ||
160 | device (it can reset the device later to restart if desired). | ||
161 | |||
162 | We now cover the fields required for general setup in detail. | ||
163 | |||
164 | Virtio Header | ||
165 | |||
166 | The virtio header looks as follows: | ||
167 | |||
168 | |||
169 | +------------++---------------------+---------------------+----------+--------+---------+---------+---------+--------+ | ||
170 | | Bits || 32 | 32 | 32 | 16 | 16 | 16 | 8 | 8 | | ||
171 | +------------++---------------------+---------------------+----------+--------+---------+---------+---------+--------+ | ||
172 | | Read/Write || R | R+W | R+W | R | R+W | R+W | R+W | R | | ||
173 | +------------++---------------------+---------------------+----------+--------+---------+---------+---------+--------+ | ||
174 | | Purpose || Device | Guest | Queue | Queue | Queue | Queue | Device | ISR | | ||
175 | | || Features bits 0:31 | Features bits 0:31 | Address | Size | Select | Notify | Status | Status | | ||
176 | +------------++---------------------+---------------------+----------+--------+---------+---------+---------+--------+ | ||
177 | |||
178 | |||
179 | If MSI-X is enabled for the device, two additional fields | ||
180 | immediately follow this header: | ||
181 | |||
182 | |||
183 | +------------++----------------+--------+ | ||
184 | | Bits || 16 | 16 | | ||
185 | +----------------+--------+ | ||
186 | +------------++----------------+--------+ | ||
187 | | Read/Write || R+W | R+W | | ||
188 | +------------++----------------+--------+ | ||
189 | | Purpose || Configuration | Queue | | ||
190 | | (MSI-X) || Vector | Vector | | ||
191 | +------------++----------------+--------+ | ||
192 | |||
193 | |||
194 | Finally, if feature bits (VIRTIO_F_FEATURES_HI) this is | ||
195 | immediately followed by two additional fields: | ||
196 | |||
197 | |||
198 | +------------++----------------------+---------------------- | ||
199 | | Bits || 32 | 32 | ||
200 | +------------++----------------------+---------------------- | ||
201 | | Read/Write || R | R+W | ||
202 | +------------++----------------------+---------------------- | ||
203 | | Purpose || Device | Guest | ||
204 | | || Features bits 32:63 | Features bits 32:63 | ||
205 | +------------++----------------------+---------------------- | ||
206 | |||
207 | |||
208 | Immediately following these general headers, there may be | ||
209 | device-specific headers: | ||
210 | |||
211 | |||
212 | +------------++--------------------+ | ||
213 | | Bits || Device Specific | | ||
214 | +--------------------+ | ||
215 | +------------++--------------------+ | ||
216 | | Read/Write || Device Specific | | ||
217 | +------------++--------------------+ | ||
218 | | Purpose || Device Specific... | | ||
219 | | || | | ||
220 | +------------++--------------------+ | ||
221 | |||
222 | |||
223 | Device Status | ||
224 | |||
225 | The Device Status field is updated by the guest to indicate its | ||
226 | progress. This provides a simple low-level diagnostic: it's most | ||
227 | useful to imagine them hooked up to traffic lights on the console | ||
228 | indicating the status of each device. | ||
229 | |||
230 | The device can be reset by writing a 0 to this field, otherwise | ||
231 | at least one bit should be set: | ||
232 | |||
233 | ACKNOWLEDGE (1) Indicates that the guest OS has found the | ||
234 | device and recognized it as a valid virtio device. | ||
235 | |||
236 | DRIVER (2) Indicates that the guest OS knows how to drive the | ||
237 | device. Under Linux, drivers can be loadable modules so there | ||
238 | may be a significant (or infinite) delay before setting this | ||
239 | bit. | ||
240 | |||
241 | DRIVER_OK (3) Indicates that the driver is set up and ready to | ||
242 | drive the device. | ||
243 | |||
244 | FAILED (8) Indicates that something went wrong in the guest, | ||
245 | and it has given up on the device. This could be an internal | ||
246 | error, or the driver didn't like the device for some reason, or | ||
247 | even a fatal error during device operation. The device must be | ||
248 | reset before attempting to re-initialize. | ||
249 | |||
250 | Feature Bits | ||
251 | |||
252 | The least significant 31 bits of the first configuration field | ||
253 | indicates the features that the device supports (the high bit is | ||
254 | reserved, and will be used to indicate the presence of future | ||
255 | feature bits elsewhere). If more than 31 feature bits are | ||
256 | supported, the device indicates so by setting feature bit 31 (see | ||
257 | [cha:Reserved-Feature-Bits]). The bits are allocated as follows: | ||
258 | |||
259 | 0 to 23 Feature bits for the specific device type | ||
260 | |||
261 | 24 to 40 Feature bits reserved for extensions to the queue and | ||
262 | feature negotiation mechanisms | ||
263 | |||
264 | 41 to 63 Feature bits reserved for future extensions | ||
265 | |||
266 | For example, feature bit 0 for a network device (i.e. Subsystem | ||
267 | Device ID 1) indicates that the device supports checksumming of | ||
268 | packets. | ||
269 | |||
270 | The feature bits are negotiated: the device lists all the | ||
271 | features it understands in the Device Features field, and the | ||
272 | guest writes the subset that it understands into the Guest | ||
273 | Features field. The only way to renegotiate is to reset the | ||
274 | device. | ||
275 | |||
276 | In particular, new fields in the device configuration header are | ||
277 | indicated by offering a feature bit, so the guest can check | ||
278 | before accessing that part of the configuration space. | ||
279 | |||
280 | This allows for forwards and backwards compatibility: if the | ||
281 | device is enhanced with a new feature bit, older guests will not | ||
282 | write that feature bit back to the Guest Features field and it | ||
283 | can go into backwards compatibility mode. Similarly, if a guest | ||
284 | is enhanced with a feature that the device doesn't support, it | ||
285 | will not see that feature bit in the Device Features field and | ||
286 | can go into backwards compatibility mode (or, for poor | ||
287 | implementations, set the FAILED Device Status bit). | ||
288 | |||
289 | Access to feature bits 32 to 63 is enabled by Guest by setting | ||
290 | feature bit 31. If this bit is unset, Device must assume that all | ||
291 | feature bits > 31 are unset. | ||
292 | |||
293 | Configuration/Queue Vectors | ||
294 | |||
295 | When MSI-X capability is present and enabled in the device | ||
296 | (through standard PCI configuration space) 4 bytes at byte offset | ||
297 | 20 are used to map configuration change and queue interrupts to | ||
298 | MSI-X vectors. In this case, the ISR Status field is unused, and | ||
299 | device specific configuration starts at byte offset 24 in virtio | ||
300 | header structure. When MSI-X capability is not enabled, device | ||
301 | specific configuration starts at byte offset 20 in virtio header. | ||
302 | |||
303 | Writing a valid MSI-X Table entry number, 0 to 0x7FF, to one of | ||
304 | Configuration/Queue Vector registers, maps interrupts triggered | ||
305 | by the configuration change/selected queue events respectively to | ||
306 | the corresponding MSI-X vector. To disable interrupts for a | ||
307 | specific event type, unmap it by writing a special NO_VECTOR | ||
308 | value: | ||
309 | |||
310 | /* Vector value used to disable MSI for queue */ | ||
311 | |||
312 | #define VIRTIO_MSI_NO_VECTOR 0xffff | ||
313 | |||
314 | Reading these registers returns vector mapped to a given event, | ||
315 | or NO_VECTOR if unmapped. All queue and configuration change | ||
316 | events are unmapped by default. | ||
317 | |||
318 | Note that mapping an event to vector might require allocating | ||
319 | internal device resources, and might fail. Devices report such | ||
320 | failures by returning the NO_VECTOR value when the relevant | ||
321 | Vector field is read. After mapping an event to vector, the | ||
322 | driver must verify success by reading the Vector field value: on | ||
323 | success, the previously written value is returned, and on | ||
324 | failure, NO_VECTOR is returned. If a mapping failure is detected, | ||
325 | the driver can retry mapping with fewervectors, or disable MSI-X. | ||
326 | |||
327 | Virtqueue Configuration | ||
328 | |||
329 | As a device can have zero or more virtqueues for bulk data | ||
330 | transport (for example, the network driver has two), the driver | ||
331 | needs to configure them as part of the device-specific | ||
332 | configuration. | ||
333 | |||
334 | This is done as follows, for each virtqueue a device has: | ||
335 | |||
336 | Write the virtqueue index (first queue is 0) to the Queue | ||
337 | Select field. | ||
338 | |||
339 | Read the virtqueue size from the Queue Size field, which is | ||
340 | always a power of 2. This controls how big the virtqueue is | ||
341 | (see below). If this field is 0, the virtqueue does not exist. | ||
342 | |||
343 | Allocate and zero virtqueue in contiguous physical memory, on a | ||
344 | 4096 byte alignment. Write the physical address, divided by | ||
345 | 4096 to the Queue Address field.[footnote: | ||
346 | The 4096 is based on the x86 page size, but it's also large | ||
347 | enough to ensure that the separate parts of the virtqueue are on | ||
348 | separate cache lines. | ||
349 | ] | ||
350 | |||
351 | Optionally, if MSI-X capability is present and enabled on the | ||
352 | device, select a vector to use to request interrupts triggered | ||
353 | by virtqueue events. Write the MSI-X Table entry number | ||
354 | corresponding to this vector in Queue Vector field. Read the | ||
355 | Queue Vector field: on success, previously written value is | ||
356 | returned; on failure, NO_VECTOR value is returned. | ||
357 | |||
358 | The Queue Size field controls the total number of bytes required | ||
359 | for the virtqueue according to the following formula: | ||
360 | |||
361 | #define ALIGN(x) (((x) + 4095) & ~4095) | ||
362 | |||
363 | static inline unsigned vring_size(unsigned int qsz) | ||
364 | |||
365 | { | ||
366 | |||
367 | return ALIGN(sizeof(struct vring_desc)*qsz + sizeof(u16)*(2 | ||
368 | + qsz)) | ||
369 | |||
370 | + ALIGN(sizeof(struct vring_used_elem)*qsz); | ||
371 | |||
372 | } | ||
373 | |||
374 | This currently wastes some space with padding, but also allows | ||
375 | future extensions. The virtqueue layout structure looks like this | ||
376 | (qsz is the Queue Size field, which is a variable, so this code | ||
377 | won't compile): | ||
378 | |||
379 | struct vring { | ||
380 | |||
381 | /* The actual descriptors (16 bytes each) */ | ||
382 | |||
383 | struct vring_desc desc[qsz]; | ||
384 | |||
385 | |||
386 | |||
387 | /* A ring of available descriptor heads with free-running | ||
388 | index. */ | ||
389 | |||
390 | struct vring_avail avail; | ||
391 | |||
392 | |||
393 | |||
394 | // Padding to the next 4096 boundary. | ||
395 | |||
396 | char pad[]; | ||
397 | |||
398 | |||
399 | |||
400 | // A ring of used descriptor heads with free-running index. | ||
401 | |||
402 | struct vring_used used; | ||
403 | |||
404 | }; | ||
405 | |||
406 | A Note on Virtqueue Endianness | ||
407 | |||
408 | Note that the endian of these fields and everything else in the | ||
409 | virtqueue is the native endian of the guest, not little-endian as | ||
410 | PCI normally is. This makes for simpler guest code, and it is | ||
411 | assumed that the host already has to be deeply aware of the guest | ||
412 | endian so such an “endian-aware” device is not a significant | ||
413 | issue. | ||
414 | |||
415 | Descriptor Table | ||
416 | |||
417 | The descriptor table refers to the buffers the guest is using for | ||
418 | the device. The addresses are physical addresses, and the buffers | ||
419 | can be chained via the next field. Each descriptor describes a | ||
420 | buffer which is read-only or write-only, but a chain of | ||
421 | descriptors can contain both read-only and write-only buffers. | ||
422 | |||
423 | No descriptor chain may be more than 2^32 bytes long in total.struct vring_desc { | ||
424 | |||
425 | /* Address (guest-physical). */ | ||
426 | |||
427 | u64 addr; | ||
428 | |||
429 | /* Length. */ | ||
430 | |||
431 | u32 len; | ||
432 | |||
433 | /* This marks a buffer as continuing via the next field. */ | ||
434 | |||
435 | #define VRING_DESC_F_NEXT 1 | ||
436 | |||
437 | /* This marks a buffer as write-only (otherwise read-only). */ | ||
438 | |||
439 | #define VRING_DESC_F_WRITE 2 | ||
440 | |||
441 | /* This means the buffer contains a list of buffer descriptors. | ||
442 | */ | ||
443 | |||
444 | #define VRING_DESC_F_INDIRECT 4 | ||
445 | |||
446 | /* The flags as indicated above. */ | ||
447 | |||
448 | u16 flags; | ||
449 | |||
450 | /* Next field if flags & NEXT */ | ||
451 | |||
452 | u16 next; | ||
453 | |||
454 | }; | ||
455 | |||
456 | The number of descriptors in the table is specified by the Queue | ||
457 | Size field for this virtqueue. | ||
458 | |||
459 | <sub:Indirect-Descriptors>Indirect Descriptors | ||
460 | |||
461 | Some devices benefit by concurrently dispatching a large number | ||
462 | of large requests. The VIRTIO_RING_F_INDIRECT_DESC feature can be | ||
463 | used to allow this (see [cha:Reserved-Feature-Bits]). To increase | ||
464 | ring capacity it is possible to store a table of indirect | ||
465 | descriptors anywhere in memory, and insert a descriptor in main | ||
466 | virtqueue (with flags&INDIRECT on) that refers to memory buffer | ||
467 | containing this indirect descriptor table; fields addr and len | ||
468 | refer to the indirect table address and length in bytes, | ||
469 | respectively. The indirect table layout structure looks like this | ||
470 | (len is the length of the descriptor that refers to this table, | ||
471 | which is a variable, so this code won't compile): | ||
472 | |||
473 | struct indirect_descriptor_table { | ||
474 | |||
475 | /* The actual descriptors (16 bytes each) */ | ||
476 | |||
477 | struct vring_desc desc[len / 16]; | ||
478 | |||
479 | }; | ||
480 | |||
481 | The first indirect descriptor is located at start of the indirect | ||
482 | descriptor table (index 0), additional indirect descriptors are | ||
483 | chained by next field. An indirect descriptor without next field | ||
484 | (with flags&NEXT off) signals the end of the indirect descriptor | ||
485 | table, and transfers control back to the main virtqueue. An | ||
486 | indirect descriptor can not refer to another indirect descriptor | ||
487 | table (flags&INDIRECT must be off). A single indirect descriptor | ||
488 | table can include both read-only and write-only descriptors; | ||
489 | write-only flag (flags&WRITE) in the descriptor that refers to it | ||
490 | is ignored. | ||
491 | |||
492 | Available Ring | ||
493 | |||
494 | The available ring refers to what descriptors we are offering the | ||
495 | device: it refers to the head of a descriptor chain. The “flags” | ||
496 | field is currently 0 or 1: 1 indicating that we do not need an | ||
497 | interrupt when the device consumes a descriptor from the | ||
498 | available ring. Alternatively, the guest can ask the device to | ||
499 | delay interrupts until an entry with an index specified by the “ | ||
500 | used_event” field is written in the used ring (equivalently, | ||
501 | until the idx field in the used ring will reach the value | ||
502 | used_event + 1). The method employed by the device is controlled | ||
503 | by the VIRTIO_RING_F_EVENT_IDX feature bit (see [cha:Reserved-Feature-Bits] | ||
504 | ). This interrupt suppression is merely an optimization; it may | ||
505 | not suppress interrupts entirely. | ||
506 | |||
507 | The “idx” field indicates where we would put the next descriptor | ||
508 | entry (modulo the ring size). This starts at 0, and increases. | ||
509 | |||
510 | struct vring_avail { | ||
511 | |||
512 | #define VRING_AVAIL_F_NO_INTERRUPT 1 | ||
513 | |||
514 | u16 flags; | ||
515 | |||
516 | u16 idx; | ||
517 | |||
518 | u16 ring[qsz]; /* qsz is the Queue Size field read from device | ||
519 | */ | ||
520 | |||
521 | u16 used_event; | ||
522 | |||
523 | }; | ||
524 | |||
525 | Used Ring | ||
526 | |||
527 | The used ring is where the device returns buffers once it is done | ||
528 | with them. The flags field can be used by the device to hint that | ||
529 | no notification is necessary when the guest adds to the available | ||
530 | ring. Alternatively, the “avail_event” field can be used by the | ||
531 | device to hint that no notification is necessary until an entry | ||
532 | with an index specified by the “avail_event” is written in the | ||
533 | available ring (equivalently, until the idx field in the | ||
534 | available ring will reach the value avail_event + 1). The method | ||
535 | employed by the device is controlled by the guest through the | ||
536 | VIRTIO_RING_F_EVENT_IDX feature bit (see [cha:Reserved-Feature-Bits] | ||
537 | ). [footnote: | ||
538 | These fields are kept here because this is the only part of the | ||
539 | virtqueue written by the device | ||
540 | ]. | ||
541 | |||
542 | Each entry in the ring is a pair: the head entry of the | ||
543 | descriptor chain describing the buffer (this matches an entry | ||
544 | placed in the available ring by the guest earlier), and the total | ||
545 | of bytes written into the buffer. The latter is extremely useful | ||
546 | for guests using untrusted buffers: if you do not know exactly | ||
547 | how much has been written by the device, you usually have to zero | ||
548 | the buffer to ensure no data leakage occurs. | ||
549 | |||
550 | /* u32 is used here for ids for padding reasons. */ | ||
551 | |||
552 | struct vring_used_elem { | ||
553 | |||
554 | /* Index of start of used descriptor chain. */ | ||
555 | |||
556 | u32 id; | ||
557 | |||
558 | /* Total length of the descriptor chain which was used | ||
559 | (written to) */ | ||
560 | |||
561 | u32 len; | ||
562 | |||
563 | }; | ||
564 | |||
565 | |||
566 | |||
567 | struct vring_used { | ||
568 | |||
569 | #define VRING_USED_F_NO_NOTIFY 1 | ||
570 | |||
571 | u16 flags; | ||
572 | |||
573 | u16 idx; | ||
574 | |||
575 | struct vring_used_elem ring[qsz]; | ||
576 | |||
577 | u16 avail_event; | ||
578 | |||
579 | }; | ||
580 | |||
581 | Helpers for Managing Virtqueues | ||
582 | |||
583 | The Linux Kernel Source code contains the definitions above and | ||
584 | helper routines in a more usable form, in | ||
585 | include/linux/virtio_ring.h. This was explicitly licensed by IBM | ||
586 | and Red Hat under the (3-clause) BSD license so that it can be | ||
587 | freely used by all other projects, and is reproduced (with slight | ||
588 | variation to remove Linux assumptions) in Appendix A. | ||
589 | |||
590 | Device Operation | ||
591 | |||
592 | There are two parts to device operation: supplying new buffers to | ||
593 | the device, and processing used buffers from the device. As an | ||
594 | example, the virtio network device has two virtqueues: the | ||
595 | transmit virtqueue and the receive virtqueue. The driver adds | ||
596 | outgoing (read-only) packets to the transmit virtqueue, and then | ||
597 | frees them after they are used. Similarly, incoming (write-only) | ||
598 | buffers are added to the receive virtqueue, and processed after | ||
599 | they are used. | ||
600 | |||
601 | Supplying Buffers to The Device | ||
602 | |||
603 | Actual transfer of buffers from the guest OS to the device | ||
604 | operates as follows: | ||
605 | |||
606 | Place the buffer(s) into free descriptor(s). | ||
607 | |||
608 | If there are no free descriptors, the guest may choose to | ||
609 | notify the device even if notifications are suppressed (to | ||
610 | reduce latency).[footnote: | ||
611 | The Linux drivers do this only for read-only buffers: for | ||
612 | write-only buffers, it is assumed that the driver is merely | ||
613 | trying to keep the receive buffer ring full, and no notification | ||
614 | of this expected condition is necessary. | ||
615 | ] | ||
616 | |||
617 | Place the id of the buffer in the next ring entry of the | ||
618 | available ring. | ||
619 | |||
620 | The steps (1) and (2) may be performed repeatedly if batching | ||
621 | is possible. | ||
622 | |||
623 | A memory barrier should be executed to ensure the device sees | ||
624 | the updated descriptor table and available ring before the next | ||
625 | step. | ||
626 | |||
627 | The available “idx” field should be increased by the number of | ||
628 | entries added to the available ring. | ||
629 | |||
630 | A memory barrier should be executed to ensure that we update | ||
631 | the idx field before checking for notification suppression. | ||
632 | |||
633 | If notifications are not suppressed, the device should be | ||
634 | notified of the new buffers. | ||
635 | |||
636 | Note that the above code does not take precautions against the | ||
637 | available ring buffer wrapping around: this is not possible since | ||
638 | the ring buffer is the same size as the descriptor table, so step | ||
639 | (1) will prevent such a condition. | ||
640 | |||
641 | In addition, the maximum queue size is 32768 (it must be a power | ||
642 | of 2 which fits in 16 bits), so the 16-bit “idx” value can always | ||
643 | distinguish between a full and empty buffer. | ||
644 | |||
645 | Here is a description of each stage in more detail. | ||
646 | |||
647 | Placing Buffers Into The Descriptor Table | ||
648 | |||
649 | A buffer consists of zero or more read-only physically-contiguous | ||
650 | elements followed by zero or more physically-contiguous | ||
651 | write-only elements (it must have at least one element). This | ||
652 | algorithm maps it into the descriptor table: | ||
653 | |||
654 | for each buffer element, b: | ||
655 | |||
656 | Get the next free descriptor table entry, d | ||
657 | |||
658 | Set d.addr to the physical address of the start of b | ||
659 | |||
660 | Set d.len to the length of b. | ||
661 | |||
662 | If b is write-only, set d.flags to VRING_DESC_F_WRITE, | ||
663 | otherwise 0. | ||
664 | |||
665 | If there is a buffer element after this: | ||
666 | |||
667 | Set d.next to the index of the next free descriptor element. | ||
668 | |||
669 | Set the VRING_DESC_F_NEXT bit in d.flags. | ||
670 | |||
671 | In practice, the d.next fields are usually used to chain free | ||
672 | descriptors, and a separate count kept to check there are enough | ||
673 | free descriptors before beginning the mappings. | ||
674 | |||
675 | Updating The Available Ring | ||
676 | |||
677 | The head of the buffer we mapped is the first d in the algorithm | ||
678 | above. A naive implementation would do the following: | ||
679 | |||
680 | avail->ring[avail->idx % qsz] = head; | ||
681 | |||
682 | However, in general we can add many descriptors before we update | ||
683 | the “idx” field (at which point they become visible to the | ||
684 | device), so we keep a counter of how many we've added: | ||
685 | |||
686 | avail->ring[(avail->idx + added++) % qsz] = head; | ||
687 | |||
688 | Updating The Index Field | ||
689 | |||
690 | Once the idx field of the virtqueue is updated, the device will | ||
691 | be able to access the descriptor entries we've created and the | ||
692 | memory they refer to. This is why a memory barrier is generally | ||
693 | used before the idx update, to ensure it sees the most up-to-date | ||
694 | copy. | ||
695 | |||
696 | The idx field always increments, and we let it wrap naturally at | ||
697 | 65536: | ||
698 | |||
699 | avail->idx += added; | ||
700 | |||
701 | <sub:Notifying-The-Device>Notifying The Device | ||
702 | |||
703 | Device notification occurs by writing the 16-bit virtqueue index | ||
704 | of this virtqueue to the Queue Notify field of the virtio header | ||
705 | in the first I/O region of the PCI device. This can be expensive, | ||
706 | however, so the device can suppress such notifications if it | ||
707 | doesn't need them. We have to be careful to expose the new idx | ||
708 | value before checking the suppression flag: it's OK to notify | ||
709 | gratuitously, but not to omit a required notification. So again, | ||
710 | we use a memory barrier here before reading the flags or the | ||
711 | avail_event field. | ||
712 | |||
713 | If the VIRTIO_F_RING_EVENT_IDX feature is not negotiated, and if | ||
714 | the VRING_USED_F_NOTIFY flag is not set, we go ahead and write to | ||
715 | the PCI configuration space. | ||
716 | |||
717 | If the VIRTIO_F_RING_EVENT_IDX feature is negotiated, we read the | ||
718 | avail_event field in the available ring structure. If the | ||
719 | available index crossed_the avail_event field value since the | ||
720 | last notification, we go ahead and write to the PCI configuration | ||
721 | space. The avail_event field wraps naturally at 65536 as well: | ||
722 | |||
723 | (u16)(new_idx - avail_event - 1) < (u16)(new_idx - old_idx) | ||
724 | |||
725 | <sub:Receiving-Used-Buffers>Receiving Used Buffers From The | ||
726 | Device | ||
727 | |||
728 | Once the device has used a buffer (read from or written to it, or | ||
729 | parts of both, depending on the nature of the virtqueue and the | ||
730 | device), it sends an interrupt, following an algorithm very | ||
731 | similar to the algorithm used for the driver to send the device a | ||
732 | buffer: | ||
733 | |||
734 | Write the head descriptor number to the next field in the used | ||
735 | ring. | ||
736 | |||
737 | Update the used ring idx. | ||
738 | |||
739 | Determine whether an interrupt is necessary: | ||
740 | |||
741 | If the VIRTIO_F_RING_EVENT_IDX feature is not negotiated: check | ||
742 | if f the VRING_AVAIL_F_NO_INTERRUPT flag is not set in avail- | ||
743 | >flags | ||
744 | |||
745 | If the VIRTIO_F_RING_EVENT_IDX feature is negotiated: check | ||
746 | whether the used index crossed the used_event field value | ||
747 | since the last update. The used_event field wraps naturally | ||
748 | at 65536 as well:(u16)(new_idx - used_event - 1) < (u16)(new_idx - old_idx) | ||
749 | |||
750 | If an interrupt is necessary: | ||
751 | |||
752 | If MSI-X capability is disabled: | ||
753 | |||
754 | Set the lower bit of the ISR Status field for the device. | ||
755 | |||
756 | Send the appropriate PCI interrupt for the device. | ||
757 | |||
758 | If MSI-X capability is enabled: | ||
759 | |||
760 | Request the appropriate MSI-X interrupt message for the | ||
761 | device, Queue Vector field sets the MSI-X Table entry | ||
762 | number. | ||
763 | |||
764 | If Queue Vector field value is NO_VECTOR, no interrupt | ||
765 | message is requested for this event. | ||
766 | |||
767 | The guest interrupt handler should: | ||
768 | |||
769 | If MSI-X capability is disabled: read the ISR Status field, | ||
770 | which will reset it to zero. If the lower bit is zero, the | ||
771 | interrupt was not for this device. Otherwise, the guest driver | ||
772 | should look through the used rings of each virtqueue for the | ||
773 | device, to see if any progress has been made by the device | ||
774 | which requires servicing. | ||
775 | |||
776 | If MSI-X capability is enabled: look through the used rings of | ||
777 | each virtqueue mapped to the specific MSI-X vector for the | ||
778 | device, to see if any progress has been made by the device | ||
779 | which requires servicing. | ||
780 | |||
781 | For each ring, guest should then disable interrupts by writing | ||
782 | VRING_AVAIL_F_NO_INTERRUPT flag in avail structure, if required. | ||
783 | It can then process used ring entries finally enabling interrupts | ||
784 | by clearing the VRING_AVAIL_F_NO_INTERRUPT flag or updating the | ||
785 | EVENT_IDX field in the available structure, Guest should then | ||
786 | execute a memory barrier, and then recheck the ring empty | ||
787 | condition. This is necessary to handle the case where, after the | ||
788 | last check and before enabling interrupts, an interrupt has been | ||
789 | suppressed by the device: | ||
790 | |||
791 | vring_disable_interrupts(vq); | ||
792 | |||
793 | for (;;) { | ||
794 | |||
795 | if (vq->last_seen_used != vring->used.idx) { | ||
796 | |||
797 | vring_enable_interrupts(vq); | ||
798 | |||
799 | mb(); | ||
800 | |||
801 | if (vq->last_seen_used != vring->used.idx) | ||
802 | |||
803 | break; | ||
804 | |||
805 | } | ||
806 | |||
807 | struct vring_used_elem *e = | ||
808 | vring.used->ring[vq->last_seen_used%vsz]; | ||
809 | |||
810 | process_buffer(e); | ||
811 | |||
812 | vq->last_seen_used++; | ||
813 | |||
814 | } | ||
815 | |||
816 | Dealing With Configuration Changes | ||
817 | |||
818 | Some virtio PCI devices can change the device configuration | ||
819 | state, as reflected in the virtio header in the PCI configuration | ||
820 | space. In this case: | ||
821 | |||
822 | If MSI-X capability is disabled: an interrupt is delivered and | ||
823 | the second highest bit is set in the ISR Status field to | ||
824 | indicate that the driver should re-examine the configuration | ||
825 | space.Note that a single interrupt can indicate both that one | ||
826 | or more virtqueue has been used and that the configuration | ||
827 | space has changed: even if the config bit is set, virtqueues | ||
828 | must be scanned. | ||
829 | |||
830 | If MSI-X capability is enabled: an interrupt message is | ||
831 | requested. The Configuration Vector field sets the MSI-X Table | ||
832 | entry number to use. If Configuration Vector field value is | ||
833 | NO_VECTOR, no interrupt message is requested for this event. | ||
834 | |||
835 | Creating New Device Types | ||
836 | |||
837 | Various considerations are necessary when creating a new device | ||
838 | type: | ||
839 | |||
840 | How Many Virtqueues? | ||
841 | |||
842 | It is possible that a very simple device will operate entirely | ||
843 | through its configuration space, but most will need at least one | ||
844 | virtqueue in which it will place requests. A device with both | ||
845 | input and output (eg. console and network devices described here) | ||
846 | need two queues: one which the driver fills with buffers to | ||
847 | receive input, and one which the driver places buffers to | ||
848 | transmit output. | ||
849 | |||
850 | What Configuration Space Layout? | ||
851 | |||
852 | Configuration space is generally used for rarely-changing or | ||
853 | initialization-time parameters. But it is a limited resource, so | ||
854 | it might be better to use a virtqueue to update configuration | ||
855 | information (the network device does this for filtering, | ||
856 | otherwise the table in the config space could potentially be very | ||
857 | large). | ||
858 | |||
859 | Note that this space is generally the guest's native endian, | ||
860 | rather than PCI's little-endian. | ||
861 | |||
862 | What Device Number? | ||
863 | |||
864 | Currently device numbers are assigned quite freely: a simple | ||
865 | request mail to the author of this document or the Linux | ||
866 | virtualization mailing list[footnote: | ||
867 | |||
868 | https://lists.linux-foundation.org/mailman/listinfo/virtualization | ||
869 | ] will be sufficient to secure a unique one. | ||
870 | |||
871 | Meanwhile for experimental drivers, use 65535 and work backwards. | ||
872 | |||
873 | How many MSI-X vectors? | ||
874 | |||
875 | Using the optional MSI-X capability devices can speed up | ||
876 | interrupt processing by removing the need to read ISR Status | ||
877 | register by guest driver (which might be an expensive operation), | ||
878 | reducing interrupt sharing between devices and queues within the | ||
879 | device, and handling interrupts from multiple CPUs. However, some | ||
880 | systems impose a limit (which might be as low as 256) on the | ||
881 | total number of MSI-X vectors that can be allocated to all | ||
882 | devices. Devices and/or device drivers should take this into | ||
883 | account, limiting the number of vectors used unless the device is | ||
884 | expected to cause a high volume of interrupts. Devices can | ||
885 | control the number of vectors used by limiting the MSI-X Table | ||
886 | Size or not presenting MSI-X capability in PCI configuration | ||
887 | space. Drivers can control this by mapping events to as small | ||
888 | number of vectors as possible, or disabling MSI-X capability | ||
889 | altogether. | ||
890 | |||
891 | Message Framing | ||
892 | |||
893 | The descriptors used for a buffer should not effect the semantics | ||
894 | of the message, except for the total length of the buffer. For | ||
895 | example, a network buffer consists of a 10 byte header followed | ||
896 | by the network packet. Whether this is presented in the ring | ||
897 | descriptor chain as (say) a 10 byte buffer and a 1514 byte | ||
898 | buffer, or a single 1524 byte buffer, or even three buffers, | ||
899 | should have no effect. | ||
900 | |||
901 | In particular, no implementation should use the descriptor | ||
902 | boundaries to determine the size of any header in a request.[footnote: | ||
903 | The current qemu device implementations mistakenly insist that | ||
904 | the first descriptor cover the header in these cases exactly, so | ||
905 | a cautious driver should arrange it so. | ||
906 | ] | ||
907 | |||
908 | Device Improvements | ||
909 | |||
910 | Any change to configuration space, or new virtqueues, or | ||
911 | behavioural changes, should be indicated by negotiation of a new | ||
912 | feature bit. This establishes clarity[footnote: | ||
913 | Even if it does mean documenting design or implementation | ||
914 | mistakes! | ||
915 | ] and avoids future expansion problems. | ||
916 | |||
917 | Clusters of functionality which are always implemented together | ||
918 | can use a single bit, but if one feature makes sense without the | ||
919 | others they should not be gratuitously grouped together to | ||
920 | conserve feature bits. We can always extend the spec when the | ||
921 | first person needs more than 24 feature bits for their device. | ||
922 | |||
923 | [LaTeX Command: printnomenclature] | ||
924 | |||
925 | Appendix A: virtio_ring.h | ||
926 | |||
927 | #ifndef VIRTIO_RING_H | ||
928 | |||
929 | #define VIRTIO_RING_H | ||
930 | |||
931 | /* An interface for efficient virtio implementation. | ||
932 | |||
933 | * | ||
934 | |||
935 | * This header is BSD licensed so anyone can use the definitions | ||
936 | |||
937 | * to implement compatible drivers/servers. | ||
938 | |||
939 | * | ||
940 | |||
941 | * Copyright 2007, 2009, IBM Corporation | ||
942 | |||
943 | * Copyright 2011, Red Hat, Inc | ||
944 | |||
945 | * All rights reserved. | ||
946 | |||
947 | * | ||
948 | |||
949 | * Redistribution and use in source and binary forms, with or | ||
950 | without | ||
951 | |||
952 | * modification, are permitted provided that the following | ||
953 | conditions | ||
954 | |||
955 | * are met: | ||
956 | |||
957 | * 1. Redistributions of source code must retain the above | ||
958 | copyright | ||
959 | |||
960 | * notice, this list of conditions and the following | ||
961 | disclaimer. | ||
962 | |||
963 | * 2. Redistributions in binary form must reproduce the above | ||
964 | copyright | ||
965 | |||
966 | * notice, this list of conditions and the following | ||
967 | disclaimer in the | ||
968 | |||
969 | * documentation and/or other materials provided with the | ||
970 | distribution. | ||
971 | |||
972 | * 3. Neither the name of IBM nor the names of its contributors | ||
973 | |||
974 | * may be used to endorse or promote products derived from | ||
975 | this software | ||
976 | |||
977 | * without specific prior written permission. | ||
978 | |||
979 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND | ||
980 | CONTRIBUTORS ``AS IS'' AND | ||
981 | |||
982 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | ||
983 | TO, THE | ||
984 | |||
985 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A | ||
986 | PARTICULAR PURPOSE | ||
987 | |||
988 | * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE | ||
989 | LIABLE | ||
990 | |||
991 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
992 | CONSEQUENTIAL | ||
993 | |||
994 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
995 | SUBSTITUTE GOODS | ||
996 | |||
997 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
998 | INTERRUPTION) | ||
999 | |||
1000 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
1001 | CONTRACT, STRICT | ||
1002 | |||
1003 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
1004 | IN ANY WAY | ||
1005 | |||
1006 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
1007 | POSSIBILITY OF | ||
1008 | |||
1009 | * SUCH DAMAGE. | ||
1010 | |||
1011 | */ | ||
1012 | |||
1013 | |||
1014 | |||
1015 | /* This marks a buffer as continuing via the next field. */ | ||
1016 | |||
1017 | #define VRING_DESC_F_NEXT 1 | ||
1018 | |||
1019 | /* This marks a buffer as write-only (otherwise read-only). */ | ||
1020 | |||
1021 | #define VRING_DESC_F_WRITE 2 | ||
1022 | |||
1023 | |||
1024 | |||
1025 | /* The Host uses this in used->flags to advise the Guest: don't | ||
1026 | kick me | ||
1027 | |||
1028 | * when you add a buffer. It's unreliable, so it's simply an | ||
1029 | |||
1030 | * optimization. Guest will still kick if it's out of buffers. | ||
1031 | */ | ||
1032 | |||
1033 | #define VRING_USED_F_NO_NOTIFY 1 | ||
1034 | |||
1035 | /* The Guest uses this in avail->flags to advise the Host: don't | ||
1036 | |||
1037 | * interrupt me when you consume a buffer. It's unreliable, so | ||
1038 | it's | ||
1039 | |||
1040 | * simply an optimization. */ | ||
1041 | |||
1042 | #define VRING_AVAIL_F_NO_INTERRUPT 1 | ||
1043 | |||
1044 | |||
1045 | |||
1046 | /* Virtio ring descriptors: 16 bytes. | ||
1047 | |||
1048 | * These can chain together via "next". */ | ||
1049 | |||
1050 | struct vring_desc { | ||
1051 | |||
1052 | /* Address (guest-physical). */ | ||
1053 | |||
1054 | uint64_t addr; | ||
1055 | |||
1056 | /* Length. */ | ||
1057 | |||
1058 | uint32_t len; | ||
1059 | |||
1060 | /* The flags as indicated above. */ | ||
1061 | |||
1062 | uint16_t flags; | ||
1063 | |||
1064 | /* We chain unused descriptors via this, too */ | ||
1065 | |||
1066 | uint16_t next; | ||
1067 | |||
1068 | }; | ||
1069 | |||
1070 | |||
1071 | |||
1072 | struct vring_avail { | ||
1073 | |||
1074 | uint16_t flags; | ||
1075 | |||
1076 | uint16_t idx; | ||
1077 | |||
1078 | uint16_t ring[]; | ||
1079 | |||
1080 | uint16_t used_event; | ||
1081 | |||
1082 | }; | ||
1083 | |||
1084 | |||
1085 | |||
1086 | /* u32 is used here for ids for padding reasons. */ | ||
1087 | |||
1088 | struct vring_used_elem { | ||
1089 | |||
1090 | /* Index of start of used descriptor chain. */ | ||
1091 | |||
1092 | uint32_t id; | ||
1093 | |||
1094 | /* Total length of the descriptor chain which was written | ||
1095 | to. */ | ||
1096 | |||
1097 | uint32_t len; | ||
1098 | |||
1099 | }; | ||
1100 | |||
1101 | |||
1102 | |||
1103 | struct vring_used { | ||
1104 | |||
1105 | uint16_t flags; | ||
1106 | |||
1107 | uint16_t idx; | ||
1108 | |||
1109 | struct vring_used_elem ring[]; | ||
1110 | |||
1111 | uint16_t avail_event; | ||
1112 | |||
1113 | }; | ||
1114 | |||
1115 | |||
1116 | |||
1117 | struct vring { | ||
1118 | |||
1119 | unsigned int num; | ||
1120 | |||
1121 | |||
1122 | |||
1123 | struct vring_desc *desc; | ||
1124 | |||
1125 | struct vring_avail *avail; | ||
1126 | |||
1127 | struct vring_used *used; | ||
1128 | |||
1129 | }; | ||
1130 | |||
1131 | |||
1132 | |||
1133 | /* The standard layout for the ring is a continuous chunk of | ||
1134 | memory which | ||
1135 | |||
1136 | * looks like this. We assume num is a power of 2. | ||
1137 | |||
1138 | * | ||
1139 | |||
1140 | * struct vring { | ||
1141 | |||
1142 | * // The actual descriptors (16 bytes each) | ||
1143 | |||
1144 | * struct vring_desc desc[num]; | ||
1145 | |||
1146 | * | ||
1147 | |||
1148 | * // A ring of available descriptor heads with free-running | ||
1149 | index. | ||
1150 | |||
1151 | * __u16 avail_flags; | ||
1152 | |||
1153 | * __u16 avail_idx; | ||
1154 | |||
1155 | * __u16 available[num]; | ||
1156 | |||
1157 | * | ||
1158 | |||
1159 | * // Padding to the next align boundary. | ||
1160 | |||
1161 | * char pad[]; | ||
1162 | |||
1163 | * | ||
1164 | |||
1165 | * // A ring of used descriptor heads with free-running | ||
1166 | index. | ||
1167 | |||
1168 | * __u16 used_flags; | ||
1169 | |||
1170 | * __u16 EVENT_IDX; | ||
1171 | |||
1172 | * struct vring_used_elem used[num]; | ||
1173 | |||
1174 | * }; | ||
1175 | |||
1176 | * Note: for virtio PCI, align is 4096. | ||
1177 | |||
1178 | */ | ||
1179 | |||
1180 | static inline void vring_init(struct vring *vr, unsigned int num, | ||
1181 | void *p, | ||
1182 | |||
1183 | unsigned long align) | ||
1184 | |||
1185 | { | ||
1186 | |||
1187 | vr->num = num; | ||
1188 | |||
1189 | vr->desc = p; | ||
1190 | |||
1191 | vr->avail = p + num*sizeof(struct vring_desc); | ||
1192 | |||
1193 | vr->used = (void *)(((unsigned long)&vr->avail->ring[num] | ||
1194 | |||
1195 | + align-1) | ||
1196 | |||
1197 | & ~(align - 1)); | ||
1198 | |||
1199 | } | ||
1200 | |||
1201 | |||
1202 | |||
1203 | static inline unsigned vring_size(unsigned int num, unsigned long | ||
1204 | align) | ||
1205 | |||
1206 | { | ||
1207 | |||
1208 | return ((sizeof(struct vring_desc)*num + | ||
1209 | sizeof(uint16_t)*(2+num) | ||
1210 | |||
1211 | + align - 1) & ~(align - 1)) | ||
1212 | |||
1213 | + sizeof(uint16_t)*3 + sizeof(struct | ||
1214 | vring_used_elem)*num; | ||
1215 | |||
1216 | } | ||
1217 | |||
1218 | |||
1219 | |||
1220 | static inline int vring_need_event(uint16_t event_idx, uint16_t | ||
1221 | new_idx, uint16_t old_idx) | ||
1222 | |||
1223 | { | ||
1224 | |||
1225 | return (uint16_t)(new_idx - event_idx - 1) < | ||
1226 | (uint16_t)(new_idx - old_idx); | ||
1227 | |||
1228 | } | ||
1229 | |||
1230 | #endif /* VIRTIO_RING_H */ | ||
1231 | |||
1232 | <cha:Reserved-Feature-Bits>Appendix B: Reserved Feature Bits | ||
1233 | |||
1234 | Currently there are five device-independent feature bits defined: | ||
1235 | |||
1236 | VIRTIO_F_NOTIFY_ON_EMPTY (24) Negotiating this feature | ||
1237 | indicates that the driver wants an interrupt if the device runs | ||
1238 | out of available descriptors on a virtqueue, even though | ||
1239 | interrupts are suppressed using the VRING_AVAIL_F_NO_INTERRUPT | ||
1240 | flag or the used_event field. An example of this is the | ||
1241 | networking driver: it doesn't need to know every time a packet | ||
1242 | is transmitted, but it does need to free the transmitted | ||
1243 | packets a finite time after they are transmitted. It can avoid | ||
1244 | using a timer if the device interrupts it when all the packets | ||
1245 | are transmitted. | ||
1246 | |||
1247 | VIRTIO_F_RING_INDIRECT_DESC (28) Negotiating this feature | ||
1248 | indicates that the driver can use descriptors with the | ||
1249 | VRING_DESC_F_INDIRECT flag set, as described in [sub:Indirect-Descriptors] | ||
1250 | . | ||
1251 | |||
1252 | VIRTIO_F_RING_EVENT_IDX(29) This feature enables the used_event | ||
1253 | and the avail_event fields. If set, it indicates that the | ||
1254 | device should ignore the flags field in the available ring | ||
1255 | structure. Instead, the used_event field in this structure is | ||
1256 | used by guest to suppress device interrupts. Further, the | ||
1257 | driver should ignore the flags field in the used ring | ||
1258 | structure. Instead, the avail_event field in this structure is | ||
1259 | used by the device to suppress notifications. If unset, the | ||
1260 | driver should ignore the used_event field; the device should | ||
1261 | ignore the avail_event field; the flags field is used | ||
1262 | |||
1263 | VIRTIO_F_BAD_FEATURE(30) This feature should never be | ||
1264 | negotiated by the guest; doing so is an indication that the | ||
1265 | guest is faulty[footnote: | ||
1266 | An experimental virtio PCI driver contained in Linux version | ||
1267 | 2.6.25 had this problem, and this feature bit can be used to | ||
1268 | detect it. | ||
1269 | ] | ||
1270 | |||
1271 | VIRTIO_F_FEATURES_HIGH(31) This feature indicates that the | ||
1272 | device supports feature bits 32:63. If unset, feature bits | ||
1273 | 32:63 are unset. | ||
1274 | |||
1275 | Appendix C: Network Device | ||
1276 | |||
1277 | The virtio network device is a virtual ethernet card, and is the | ||
1278 | most complex of the devices supported so far by virtio. It has | ||
1279 | enhanced rapidly and demonstrates clearly how support for new | ||
1280 | features should be added to an existing device. Empty buffers are | ||
1281 | placed in one virtqueue for receiving packets, and outgoing | ||
1282 | packets are enqueued into another for transmission in that order. | ||
1283 | A third command queue is used to control advanced filtering | ||
1284 | features. | ||
1285 | |||
1286 | Configuration | ||
1287 | |||
1288 | Subsystem Device ID 1 | ||
1289 | |||
1290 | Virtqueues 0:receiveq. 1:transmitq. 2:controlq[footnote: | ||
1291 | Only if VIRTIO_NET_F_CTRL_VQ set | ||
1292 | ] | ||
1293 | |||
1294 | Feature bits | ||
1295 | |||
1296 | VIRTIO_NET_F_CSUM (0) Device handles packets with partial | ||
1297 | checksum | ||
1298 | |||
1299 | VIRTIO_NET_F_GUEST_CSUM (1) Guest handles packets with partial | ||
1300 | checksum | ||
1301 | |||
1302 | VIRTIO_NET_F_MAC (5) Device has given MAC address. | ||
1303 | |||
1304 | VIRTIO_NET_F_GSO (6) (Deprecated) device handles packets with | ||
1305 | any GSO type.[footnote: | ||
1306 | It was supposed to indicate segmentation offload support, but | ||
1307 | upon further investigation it became clear that multiple bits | ||
1308 | were required. | ||
1309 | ] | ||
1310 | |||
1311 | VIRTIO_NET_F_GUEST_TSO4 (7) Guest can receive TSOv4. | ||
1312 | |||
1313 | VIRTIO_NET_F_GUEST_TSO6 (8) Guest can receive TSOv6. | ||
1314 | |||
1315 | VIRTIO_NET_F_GUEST_ECN (9) Guest can receive TSO with ECN. | ||
1316 | |||
1317 | VIRTIO_NET_F_GUEST_UFO (10) Guest can receive UFO. | ||
1318 | |||
1319 | VIRTIO_NET_F_HOST_TSO4 (11) Device can receive TSOv4. | ||
1320 | |||
1321 | VIRTIO_NET_F_HOST_TSO6 (12) Device can receive TSOv6. | ||
1322 | |||
1323 | VIRTIO_NET_F_HOST_ECN (13) Device can receive TSO with ECN. | ||
1324 | |||
1325 | VIRTIO_NET_F_HOST_UFO (14) Device can receive UFO. | ||
1326 | |||
1327 | VIRTIO_NET_F_MRG_RXBUF (15) Guest can merge receive buffers. | ||
1328 | |||
1329 | VIRTIO_NET_F_STATUS (16) Configuration status field is | ||
1330 | available. | ||
1331 | |||
1332 | VIRTIO_NET_F_CTRL_VQ (17) Control channel is available. | ||
1333 | |||
1334 | VIRTIO_NET_F_CTRL_RX (18) Control channel RX mode support. | ||
1335 | |||
1336 | VIRTIO_NET_F_CTRL_VLAN (19) Control channel VLAN filtering. | ||
1337 | |||
1338 | Device configuration layout Two configuration fields are | ||
1339 | currently defined. The mac address field always exists (though | ||
1340 | is only valid if VIRTIO_NET_F_MAC is set), and the status field | ||
1341 | only exists if VIRTIO_NET_F_STATUS is set. Only one bit is | ||
1342 | currently defined for the status field: VIRTIO_NET_S_LINK_UP. #define VIRTIO_NET_S_LINK_UP 1 | ||
1343 | |||
1344 | |||
1345 | |||
1346 | struct virtio_net_config { | ||
1347 | |||
1348 | u8 mac[6]; | ||
1349 | |||
1350 | u16 status; | ||
1351 | |||
1352 | }; | ||
1353 | |||
1354 | Device Initialization | ||
1355 | |||
1356 | The initialization routine should identify the receive and | ||
1357 | transmission virtqueues. | ||
1358 | |||
1359 | If the VIRTIO_NET_F_MAC feature bit is set, the configuration | ||
1360 | space “mac” entry indicates the “physical” address of the the | ||
1361 | network card, otherwise a private MAC address should be | ||
1362 | assigned. All guests are expected to negotiate this feature if | ||
1363 | it is set. | ||
1364 | |||
1365 | If the VIRTIO_NET_F_CTRL_VQ feature bit is negotiated, identify | ||
1366 | the control virtqueue. | ||
1367 | |||
1368 | If the VIRTIO_NET_F_STATUS feature bit is negotiated, the link | ||
1369 | status can be read from the bottom bit of the “status” config | ||
1370 | field. Otherwise, the link should be assumed active. | ||
1371 | |||
1372 | The receive virtqueue should be filled with receive buffers. | ||
1373 | This is described in detail below in “Setting Up Receive | ||
1374 | Buffers”. | ||
1375 | |||
1376 | A driver can indicate that it will generate checksumless | ||
1377 | packets by negotating the VIRTIO_NET_F_CSUM feature. This “ | ||
1378 | checksum offload” is a common feature on modern network cards. | ||
1379 | |||
1380 | If that feature is negotiated, a driver can use TCP or UDP | ||
1381 | segmentation offload by negotiating the VIRTIO_NET_F_HOST_TSO4 | ||
1382 | (IPv4 TCP), VIRTIO_NET_F_HOST_TSO6 (IPv6 TCP) and | ||
1383 | VIRTIO_NET_F_HOST_UFO (UDP fragmentation) features. It should | ||
1384 | not send TCP packets requiring segmentation offload which have | ||
1385 | the Explicit Congestion Notification bit set, unless the | ||
1386 | VIRTIO_NET_F_HOST_ECN feature is negotiated.[footnote: | ||
1387 | This is a common restriction in real, older network cards. | ||
1388 | ] | ||
1389 | |||
1390 | The converse features are also available: a driver can save the | ||
1391 | virtual device some work by negotiating these features.[footnote: | ||
1392 | For example, a network packet transported between two guests on | ||
1393 | the same system may not require checksumming at all, nor | ||
1394 | segmentation, if both guests are amenable. | ||
1395 | ] The VIRTIO_NET_F_GUEST_CSUM feature indicates that partially | ||
1396 | checksummed packets can be received, and if it can do that then | ||
1397 | the VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, | ||
1398 | VIRTIO_NET_F_GUEST_UFO and VIRTIO_NET_F_GUEST_ECN are the input | ||
1399 | equivalents of the features described above. See “Receiving | ||
1400 | Packets” below. | ||
1401 | |||
1402 | Device Operation | ||
1403 | |||
1404 | Packets are transmitted by placing them in the transmitq, and | ||
1405 | buffers for incoming packets are placed in the receiveq. In each | ||
1406 | case, the packet itself is preceeded by a header: | ||
1407 | |||
1408 | struct virtio_net_hdr { | ||
1409 | |||
1410 | #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 | ||
1411 | |||
1412 | u8 flags; | ||
1413 | |||
1414 | #define VIRTIO_NET_HDR_GSO_NONE 0 | ||
1415 | |||
1416 | #define VIRTIO_NET_HDR_GSO_TCPV4 1 | ||
1417 | |||
1418 | #define VIRTIO_NET_HDR_GSO_UDP 3 | ||
1419 | |||
1420 | #define VIRTIO_NET_HDR_GSO_TCPV6 4 | ||
1421 | |||
1422 | #define VIRTIO_NET_HDR_GSO_ECN 0x80 | ||
1423 | |||
1424 | u8 gso_type; | ||
1425 | |||
1426 | u16 hdr_len; | ||
1427 | |||
1428 | u16 gso_size; | ||
1429 | |||
1430 | u16 csum_start; | ||
1431 | |||
1432 | u16 csum_offset; | ||
1433 | |||
1434 | /* Only if VIRTIO_NET_F_MRG_RXBUF: */ | ||
1435 | |||
1436 | u16 num_buffers | ||
1437 | |||
1438 | }; | ||
1439 | |||
1440 | The controlq is used to control device features such as | ||
1441 | filtering. | ||
1442 | |||
1443 | Packet Transmission | ||
1444 | |||
1445 | Transmitting a single packet is simple, but varies depending on | ||
1446 | the different features the driver negotiated. | ||
1447 | |||
1448 | If the driver negotiated VIRTIO_NET_F_CSUM, and the packet has | ||
1449 | not been fully checksummed, then the virtio_net_hdr's fields | ||
1450 | are set as follows. Otherwise, the packet must be fully | ||
1451 | checksummed, and flags is zero. | ||
1452 | |||
1453 | flags has the VIRTIO_NET_HDR_F_NEEDS_CSUM set, | ||
1454 | |||
1455 | <ite:csum_start-is-set>csum_start is set to the offset within | ||
1456 | the packet to begin checksumming, and | ||
1457 | |||
1458 | csum_offset indicates how many bytes after the csum_start the | ||
1459 | new (16 bit ones' complement) checksum should be placed.[footnote: | ||
1460 | For example, consider a partially checksummed TCP (IPv4) packet. | ||
1461 | It will have a 14 byte ethernet header and 20 byte IP header | ||
1462 | followed by the TCP header (with the TCP checksum field 16 bytes | ||
1463 | into that header). csum_start will be 14+20 = 34 (the TCP | ||
1464 | checksum includes the header), and csum_offset will be 16. The | ||
1465 | value in the TCP checksum field will be the sum of the TCP pseudo | ||
1466 | header, so that replacing it by the ones' complement checksum of | ||
1467 | the TCP header and body will give the correct result. | ||
1468 | ] | ||
1469 | |||
1470 | <enu:If-the-driver>If the driver negotiated | ||
1471 | VIRTIO_NET_F_HOST_TSO4, TSO6 or UFO, and the packet requires | ||
1472 | TCP segmentation or UDP fragmentation, then the “gso_type” | ||
1473 | field is set to VIRTIO_NET_HDR_GSO_TCPV4, TCPV6 or UDP. | ||
1474 | (Otherwise, it is set to VIRTIO_NET_HDR_GSO_NONE). In this | ||
1475 | case, packets larger than 1514 bytes can be transmitted: the | ||
1476 | metadata indicates how to replicate the packet header to cut it | ||
1477 | into smaller packets. The other gso fields are set: | ||
1478 | |||
1479 | hdr_len is a hint to the device as to how much of the header | ||
1480 | needs to be kept to copy into each packet, usually set to the | ||
1481 | length of the headers, including the transport header.[footnote: | ||
1482 | Due to various bugs in implementations, this field is not useful | ||
1483 | as a guarantee of the transport header size. | ||
1484 | ] | ||
1485 | |||
1486 | gso_size is the size of the packet beyond that header (ie. | ||
1487 | MSS). | ||
1488 | |||
1489 | If the driver negotiated the VIRTIO_NET_F_HOST_ECN feature, the | ||
1490 | VIRTIO_NET_HDR_GSO_ECN bit may be set in “gso_type” as well, | ||
1491 | indicating that the TCP packet has the ECN bit set.[footnote: | ||
1492 | This case is not handled by some older hardware, so is called out | ||
1493 | specifically in the protocol. | ||
1494 | ] | ||
1495 | |||
1496 | If the driver negotiated the VIRTIO_NET_F_MRG_RXBUF feature, | ||
1497 | the num_buffers field is set to zero. | ||
1498 | |||
1499 | The header and packet are added as one output buffer to the | ||
1500 | transmitq, and the device is notified of the new entry (see [sub:Notifying-The-Device] | ||
1501 | ).[footnote: | ||
1502 | Note that the header will be two bytes longer for the | ||
1503 | VIRTIO_NET_F_MRG_RXBUF case. | ||
1504 | ] | ||
1505 | |||
1506 | Packet Transmission Interrupt | ||
1507 | |||
1508 | Often a driver will suppress transmission interrupts using the | ||
1509 | VRING_AVAIL_F_NO_INTERRUPT flag (see [sub:Receiving-Used-Buffers] | ||
1510 | ) and check for used packets in the transmit path of following | ||
1511 | packets. However, it will still receive interrupts if the | ||
1512 | VIRTIO_F_NOTIFY_ON_EMPTY feature is negotiated, indicating that | ||
1513 | the transmission queue is completely emptied. | ||
1514 | |||
1515 | The normal behavior in this interrupt handler is to retrieve and | ||
1516 | new descriptors from the used ring and free the corresponding | ||
1517 | headers and packets. | ||
1518 | |||
1519 | Setting Up Receive Buffers | ||
1520 | |||
1521 | It is generally a good idea to keep the receive virtqueue as | ||
1522 | fully populated as possible: if it runs out, network performance | ||
1523 | will suffer. | ||
1524 | |||
1525 | If the VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6 or | ||
1526 | VIRTIO_NET_F_GUEST_UFO features are used, the Guest will need to | ||
1527 | accept packets of up to 65550 bytes long (the maximum size of a | ||
1528 | TCP or UDP packet, plus the 14 byte ethernet header), otherwise | ||
1529 | 1514 bytes. So unless VIRTIO_NET_F_MRG_RXBUF is negotiated, every | ||
1530 | buffer in the receive queue needs to be at least this length [footnote: | ||
1531 | Obviously each one can be split across multiple descriptor | ||
1532 | elements. | ||
1533 | ]. | ||
1534 | |||
1535 | If VIRTIO_NET_F_MRG_RXBUF is negotiated, each buffer must be at | ||
1536 | least the size of the struct virtio_net_hdr. | ||
1537 | |||
1538 | Packet Receive Interrupt | ||
1539 | |||
1540 | When a packet is copied into a buffer in the receiveq, the | ||
1541 | optimal path is to disable further interrupts for the receiveq | ||
1542 | (see [sub:Receiving-Used-Buffers]) and process packets until no | ||
1543 | more are found, then re-enable them. | ||
1544 | |||
1545 | Processing packet involves: | ||
1546 | |||
1547 | If the driver negotiated the VIRTIO_NET_F_MRG_RXBUF feature, | ||
1548 | then the “num_buffers” field indicates how many descriptors | ||
1549 | this packet is spread over (including this one). This allows | ||
1550 | receipt of large packets without having to allocate large | ||
1551 | buffers. In this case, there will be at least “num_buffers” in | ||
1552 | the used ring, and they should be chained together to form a | ||
1553 | single packet. The other buffers will not begin with a struct | ||
1554 | virtio_net_hdr. | ||
1555 | |||
1556 | If the VIRTIO_NET_F_MRG_RXBUF feature was not negotiated, or | ||
1557 | the “num_buffers” field is one, then the entire packet will be | ||
1558 | contained within this buffer, immediately following the struct | ||
1559 | virtio_net_hdr. | ||
1560 | |||
1561 | If the VIRTIO_NET_F_GUEST_CSUM feature was negotiated, the | ||
1562 | VIRTIO_NET_HDR_F_NEEDS_CSUM bit in the “flags” field may be | ||
1563 | set: if so, the checksum on the packet is incomplete and the “ | ||
1564 | csum_start” and “csum_offset” fields indicate how to calculate | ||
1565 | it (see [ite:csum_start-is-set]). | ||
1566 | |||
1567 | If the VIRTIO_NET_F_GUEST_TSO4, TSO6 or UFO options were | ||
1568 | negotiated, then the “gso_type” may be something other than | ||
1569 | VIRTIO_NET_HDR_GSO_NONE, and the “gso_size” field indicates the | ||
1570 | desired MSS (see [enu:If-the-driver]).Control Virtqueue | ||
1571 | |||
1572 | The driver uses the control virtqueue (if VIRTIO_NET_F_VTRL_VQ is | ||
1573 | negotiated) to send commands to manipulate various features of | ||
1574 | the device which would not easily map into the configuration | ||
1575 | space. | ||
1576 | |||
1577 | All commands are of the following form: | ||
1578 | |||
1579 | struct virtio_net_ctrl { | ||
1580 | |||
1581 | u8 class; | ||
1582 | |||
1583 | u8 command; | ||
1584 | |||
1585 | u8 command-specific-data[]; | ||
1586 | |||
1587 | u8 ack; | ||
1588 | |||
1589 | }; | ||
1590 | |||
1591 | |||
1592 | |||
1593 | /* ack values */ | ||
1594 | |||
1595 | #define VIRTIO_NET_OK 0 | ||
1596 | |||
1597 | #define VIRTIO_NET_ERR 1 | ||
1598 | |||
1599 | The class, command and command-specific-data are set by the | ||
1600 | driver, and the device sets the ack byte. There is little it can | ||
1601 | do except issue a diagnostic if the ack byte is not | ||
1602 | VIRTIO_NET_OK. | ||
1603 | |||
1604 | Packet Receive Filtering | ||
1605 | |||
1606 | If the VIRTIO_NET_F_CTRL_RX feature is negotiated, the driver can | ||
1607 | send control commands for promiscuous mode, multicast receiving, | ||
1608 | and filtering of MAC addresses. | ||
1609 | |||
1610 | Note that in general, these commands are best-effort: unwanted | ||
1611 | packets may still arrive. | ||
1612 | |||
1613 | Setting Promiscuous Mode | ||
1614 | |||
1615 | #define VIRTIO_NET_CTRL_RX 0 | ||
1616 | |||
1617 | #define VIRTIO_NET_CTRL_RX_PROMISC 0 | ||
1618 | |||
1619 | #define VIRTIO_NET_CTRL_RX_ALLMULTI 1 | ||
1620 | |||
1621 | The class VIRTIO_NET_CTRL_RX has two commands: | ||
1622 | VIRTIO_NET_CTRL_RX_PROMISC turns promiscuous mode on and off, and | ||
1623 | VIRTIO_NET_CTRL_RX_ALLMULTI turns all-multicast receive on and | ||
1624 | off. The command-specific-data is one byte containing 0 (off) or | ||
1625 | 1 (on). | ||
1626 | |||
1627 | Setting MAC Address Filtering | ||
1628 | |||
1629 | struct virtio_net_ctrl_mac { | ||
1630 | |||
1631 | u32 entries; | ||
1632 | |||
1633 | u8 macs[entries][ETH_ALEN]; | ||
1634 | |||
1635 | }; | ||
1636 | |||
1637 | |||
1638 | |||
1639 | #define VIRTIO_NET_CTRL_MAC 1 | ||
1640 | |||
1641 | #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0 | ||
1642 | |||
1643 | The device can filter incoming packets by any number of | ||
1644 | destination MAC addresses.[footnote: | ||
1645 | Since there are no guarentees, it can use a hash filter | ||
1646 | orsilently switch to allmulti or promiscuous mode if it is given | ||
1647 | too many addresses. | ||
1648 | ] This table is set using the class VIRTIO_NET_CTRL_MAC and the | ||
1649 | command VIRTIO_NET_CTRL_MAC_TABLE_SET. The command-specific-data | ||
1650 | is two variable length tables of 6-byte MAC addresses. The first | ||
1651 | table contains unicast addresses, and the second contains | ||
1652 | multicast addresses. | ||
1653 | |||
1654 | VLAN Filtering | ||
1655 | |||
1656 | If the driver negotiates the VIRTION_NET_F_CTRL_VLAN feature, it | ||
1657 | can control a VLAN filter table in the device. | ||
1658 | |||
1659 | #define VIRTIO_NET_CTRL_VLAN 2 | ||
1660 | |||
1661 | #define VIRTIO_NET_CTRL_VLAN_ADD 0 | ||
1662 | |||
1663 | #define VIRTIO_NET_CTRL_VLAN_DEL 1 | ||
1664 | |||
1665 | Both the VIRTIO_NET_CTRL_VLAN_ADD and VIRTIO_NET_CTRL_VLAN_DEL | ||
1666 | command take a 16-bit VLAN id as the command-specific-data. | ||
1667 | |||
1668 | Appendix D: Block Device | ||
1669 | |||
1670 | The virtio block device is a simple virtual block device (ie. | ||
1671 | disk). Read and write requests (and other exotic requests) are | ||
1672 | placed in the queue, and serviced (probably out of order) by the | ||
1673 | device except where noted. | ||
1674 | |||
1675 | Configuration | ||
1676 | |||
1677 | Subsystem Device ID 2 | ||
1678 | |||
1679 | Virtqueues 0:requestq. | ||
1680 | |||
1681 | Feature bits | ||
1682 | |||
1683 | VIRTIO_BLK_F_BARRIER (0) Host supports request barriers. | ||
1684 | |||
1685 | VIRTIO_BLK_F_SIZE_MAX (1) Maximum size of any single segment is | ||
1686 | in “size_max”. | ||
1687 | |||
1688 | VIRTIO_BLK_F_SEG_MAX (2) Maximum number of segments in a | ||
1689 | request is in “seg_max”. | ||
1690 | |||
1691 | VIRTIO_BLK_F_GEOMETRY (4) Disk-style geometry specified in “ | ||
1692 | geometry”. | ||
1693 | |||
1694 | VIRTIO_BLK_F_RO (5) Device is read-only. | ||
1695 | |||
1696 | VIRTIO_BLK_F_BLK_SIZE (6) Block size of disk is in “blk_size”. | ||
1697 | |||
1698 | VIRTIO_BLK_F_SCSI (7) Device supports scsi packet commands. | ||
1699 | |||
1700 | VIRTIO_BLK_F_FLUSH (9) Cache flush command support. | ||
1701 | |||
1702 | |||
1703 | |||
1704 | Device configuration layout The capacity of the device | ||
1705 | (expressed in 512-byte sectors) is always present. The | ||
1706 | availability of the others all depend on various feature bits | ||
1707 | as indicated above. struct virtio_blk_config { | ||
1708 | |||
1709 | u64 capacity; | ||
1710 | |||
1711 | u32 size_max; | ||
1712 | |||
1713 | u32 seg_max; | ||
1714 | |||
1715 | struct virtio_blk_geometry { | ||
1716 | |||
1717 | u16 cylinders; | ||
1718 | |||
1719 | u8 heads; | ||
1720 | |||
1721 | u8 sectors; | ||
1722 | |||
1723 | } geometry; | ||
1724 | |||
1725 | u32 blk_size; | ||
1726 | |||
1727 | |||
1728 | |||
1729 | }; | ||
1730 | |||
1731 | Device Initialization | ||
1732 | |||
1733 | The device size should be read from the “capacity” | ||
1734 | configuration field. No requests should be submitted which goes | ||
1735 | beyond this limit. | ||
1736 | |||
1737 | If the VIRTIO_BLK_F_BLK_SIZE feature is negotiated, the | ||
1738 | blk_size field can be read to determine the optimal sector size | ||
1739 | for the driver to use. This does not effect the units used in | ||
1740 | the protocol (always 512 bytes), but awareness of the correct | ||
1741 | value can effect performance. | ||
1742 | |||
1743 | If the VIRTIO_BLK_F_RO feature is set by the device, any write | ||
1744 | requests will fail. | ||
1745 | |||
1746 | |||
1747 | |||
1748 | Device Operation | ||
1749 | |||
1750 | The driver queues requests to the virtqueue, and they are used by | ||
1751 | the device (not necessarily in order). Each request is of form: | ||
1752 | |||
1753 | struct virtio_blk_req { | ||
1754 | |||
1755 | |||
1756 | |||
1757 | u32 type; | ||
1758 | |||
1759 | u32 ioprio; | ||
1760 | |||
1761 | u64 sector; | ||
1762 | |||
1763 | char data[][512]; | ||
1764 | |||
1765 | u8 status; | ||
1766 | |||
1767 | }; | ||
1768 | |||
1769 | If the device has VIRTIO_BLK_F_SCSI feature, it can also support | ||
1770 | scsi packet command requests, each of these requests is of form:struct virtio_scsi_pc_req { | ||
1771 | |||
1772 | u32 type; | ||
1773 | |||
1774 | u32 ioprio; | ||
1775 | |||
1776 | u64 sector; | ||
1777 | |||
1778 | char cmd[]; | ||
1779 | |||
1780 | char data[][512]; | ||
1781 | |||
1782 | #define SCSI_SENSE_BUFFERSIZE 96 | ||
1783 | |||
1784 | u8 sense[SCSI_SENSE_BUFFERSIZE]; | ||
1785 | |||
1786 | u32 errors; | ||
1787 | |||
1788 | u32 data_len; | ||
1789 | |||
1790 | u32 sense_len; | ||
1791 | |||
1792 | u32 residual; | ||
1793 | |||
1794 | u8 status; | ||
1795 | |||
1796 | }; | ||
1797 | |||
1798 | The type of the request is either a read (VIRTIO_BLK_T_IN), a | ||
1799 | write (VIRTIO_BLK_T_OUT), a scsi packet command | ||
1800 | (VIRTIO_BLK_T_SCSI_CMD or VIRTIO_BLK_T_SCSI_CMD_OUT[footnote: | ||
1801 | the SCSI_CMD and SCSI_CMD_OUT types are equivalent, the device | ||
1802 | does not distinguish between them | ||
1803 | ]) or a flush (VIRTIO_BLK_T_FLUSH or VIRTIO_BLK_T_FLUSH_OUT[footnote: | ||
1804 | the FLUSH and FLUSH_OUT types are equivalent, the device does not | ||
1805 | distinguish between them | ||
1806 | ]). If the device has VIRTIO_BLK_F_BARRIER feature the high bit | ||
1807 | (VIRTIO_BLK_T_BARRIER) indicates that this request acts as a | ||
1808 | barrier and that all preceeding requests must be complete before | ||
1809 | this one, and all following requests must not be started until | ||
1810 | this is complete. Note that a barrier does not flush caches in | ||
1811 | the underlying backend device in host, and thus does not serve as | ||
1812 | data consistency guarantee. Driver must use FLUSH request to | ||
1813 | flush the host cache. | ||
1814 | |||
1815 | #define VIRTIO_BLK_T_IN 0 | ||
1816 | |||
1817 | #define VIRTIO_BLK_T_OUT 1 | ||
1818 | |||
1819 | #define VIRTIO_BLK_T_SCSI_CMD 2 | ||
1820 | |||
1821 | #define VIRTIO_BLK_T_SCSI_CMD_OUT 3 | ||
1822 | |||
1823 | #define VIRTIO_BLK_T_FLUSH 4 | ||
1824 | |||
1825 | #define VIRTIO_BLK_T_FLUSH_OUT 5 | ||
1826 | |||
1827 | #define VIRTIO_BLK_T_BARRIER 0x80000000 | ||
1828 | |||
1829 | The ioprio field is a hint about the relative priorities of | ||
1830 | requests to the device: higher numbers indicate more important | ||
1831 | requests. | ||
1832 | |||
1833 | The sector number indicates the offset (multiplied by 512) where | ||
1834 | the read or write is to occur. This field is unused and set to 0 | ||
1835 | for scsi packet commands and for flush commands. | ||
1836 | |||
1837 | The cmd field is only present for scsi packet command requests, | ||
1838 | and indicates the command to perform. This field must reside in a | ||
1839 | single, separate read-only buffer; command length can be derived | ||
1840 | from the length of this buffer. | ||
1841 | |||
1842 | Note that these first three (four for scsi packet commands) | ||
1843 | fields are always read-only: the data field is either read-only | ||
1844 | or write-only, depending on the request. The size of the read or | ||
1845 | write can be derived from the total size of the request buffers. | ||
1846 | |||
1847 | The sense field is only present for scsi packet command requests, | ||
1848 | and indicates the buffer for scsi sense data. | ||
1849 | |||
1850 | The data_len field is only present for scsi packet command | ||
1851 | requests, this field is deprecated, and should be ignored by the | ||
1852 | driver. Historically, devices copied data length there. | ||
1853 | |||
1854 | The sense_len field is only present for scsi packet command | ||
1855 | requests and indicates the number of bytes actually written to | ||
1856 | the sense buffer. | ||
1857 | |||
1858 | The residual field is only present for scsi packet command | ||
1859 | requests and indicates the residual size, calculated as data | ||
1860 | length - number of bytes actually transferred. | ||
1861 | |||
1862 | The final status byte is written by the device: either | ||
1863 | VIRTIO_BLK_S_OK for success, VIRTIO_BLK_S_IOERR for host or guest | ||
1864 | error or VIRTIO_BLK_S_UNSUPP for a request unsupported by host:#define VIRTIO_BLK_S_OK 0 | ||
1865 | |||
1866 | #define VIRTIO_BLK_S_IOERR 1 | ||
1867 | |||
1868 | #define VIRTIO_BLK_S_UNSUPP 2 | ||
1869 | |||
1870 | Historically, devices assumed that the fields type, ioprio and | ||
1871 | sector reside in a single, separate read-only buffer; the fields | ||
1872 | errors, data_len, sense_len and residual reside in a single, | ||
1873 | separate write-only buffer; the sense field in a separate | ||
1874 | write-only buffer of size 96 bytes, by itself; the fields errors, | ||
1875 | data_len, sense_len and residual in a single write-only buffer; | ||
1876 | and the status field is a separate read-only buffer of size 1 | ||
1877 | byte, by itself. | ||
1878 | |||
1879 | Appendix E: Console Device | ||
1880 | |||
1881 | The virtio console device is a simple device for data input and | ||
1882 | output. A device may have one or more ports. Each port has a pair | ||
1883 | of input and output virtqueues. Moreover, a device has a pair of | ||
1884 | control IO virtqueues. The control virtqueues are used to | ||
1885 | communicate information between the device and the driver about | ||
1886 | ports being opened and closed on either side of the connection, | ||
1887 | indication from the host about whether a particular port is a | ||
1888 | console port, adding new ports, port hot-plug/unplug, etc., and | ||
1889 | indication from the guest about whether a port or a device was | ||
1890 | successfully added, port open/close, etc.. For data IO, one or | ||
1891 | more empty buffers are placed in the receive queue for incoming | ||
1892 | data and outgoing characters are placed in the transmit queue. | ||
1893 | |||
1894 | Configuration | ||
1895 | |||
1896 | Subsystem Device ID 3 | ||
1897 | |||
1898 | Virtqueues 0:receiveq(port0). 1:transmitq(port0), 2:control | ||
1899 | receiveq[footnote: | ||
1900 | Ports 2 onwards only if VIRTIO_CONSOLE_F_MULTIPORT is set | ||
1901 | ], 3:control transmitq, 4:receiveq(port1), 5:transmitq(port1), | ||
1902 | ... | ||
1903 | |||
1904 | Feature bits | ||
1905 | |||
1906 | VIRTIO_CONSOLE_F_SIZE (0) Configuration cols and rows fields | ||
1907 | are valid. | ||
1908 | |||
1909 | VIRTIO_CONSOLE_F_MULTIPORT(1) Device has support for multiple | ||
1910 | ports; configuration fields nr_ports and max_nr_ports are | ||
1911 | valid and control virtqueues will be used. | ||
1912 | |||
1913 | Device configuration layout The size of the console is supplied | ||
1914 | in the configuration space if the VIRTIO_CONSOLE_F_SIZE feature | ||
1915 | is set. Furthermore, if the VIRTIO_CONSOLE_F_MULTIPORT feature | ||
1916 | is set, the maximum number of ports supported by the device can | ||
1917 | be fetched.struct virtio_console_config { | ||
1918 | |||
1919 | u16 cols; | ||
1920 | |||
1921 | u16 rows; | ||
1922 | |||
1923 | |||
1924 | |||
1925 | u32 max_nr_ports; | ||
1926 | |||
1927 | }; | ||
1928 | |||
1929 | Device Initialization | ||
1930 | |||
1931 | If the VIRTIO_CONSOLE_F_SIZE feature is negotiated, the driver | ||
1932 | can read the console dimensions from the configuration fields. | ||
1933 | |||
1934 | If the VIRTIO_CONSOLE_F_MULTIPORT feature is negotiated, the | ||
1935 | driver can spawn multiple ports, not all of which may be | ||
1936 | attached to a console. Some could be generic ports. In this | ||
1937 | case, the control virtqueues are enabled and according to the | ||
1938 | max_nr_ports configuration-space value, the appropriate number | ||
1939 | of virtqueues are created. A control message indicating the | ||
1940 | driver is ready is sent to the host. The host can then send | ||
1941 | control messages for adding new ports to the device. After | ||
1942 | creating and initializing each port, a | ||
1943 | VIRTIO_CONSOLE_PORT_READY control message is sent to the host | ||
1944 | for that port so the host can let us know of any additional | ||
1945 | configuration options set for that port. | ||
1946 | |||
1947 | The receiveq for each port is populated with one or more | ||
1948 | receive buffers. | ||
1949 | |||
1950 | Device Operation | ||
1951 | |||
1952 | For output, a buffer containing the characters is placed in the | ||
1953 | port's transmitq.[footnote: | ||
1954 | Because this is high importance and low bandwidth, the current | ||
1955 | Linux implementation polls for the buffer to be used, rather than | ||
1956 | waiting for an interrupt, simplifying the implementation | ||
1957 | significantly. However, for generic serial ports with the | ||
1958 | O_NONBLOCK flag set, the polling limitation is relaxed and the | ||
1959 | consumed buffers are freed upon the next write or poll call or | ||
1960 | when a port is closed or hot-unplugged. | ||
1961 | ] | ||
1962 | |||
1963 | When a buffer is used in the receiveq (signalled by an | ||
1964 | interrupt), the contents is the input to the port associated | ||
1965 | with the virtqueue for which the notification was received. | ||
1966 | |||
1967 | If the driver negotiated the VIRTIO_CONSOLE_F_SIZE feature, a | ||
1968 | configuration change interrupt may occur. The updated size can | ||
1969 | be read from the configuration fields. | ||
1970 | |||
1971 | If the driver negotiated the VIRTIO_CONSOLE_F_MULTIPORT | ||
1972 | feature, active ports are announced by the host using the | ||
1973 | VIRTIO_CONSOLE_PORT_ADD control message. The same message is | ||
1974 | used for port hot-plug as well. | ||
1975 | |||
1976 | If the host specified a port `name', a sysfs attribute is | ||
1977 | created with the name filled in, so that udev rules can be | ||
1978 | written that can create a symlink from the port's name to the | ||
1979 | char device for port discovery by applications in the guest. | ||
1980 | |||
1981 | Changes to ports' state are effected by control messages. | ||
1982 | Appropriate action is taken on the port indicated in the | ||
1983 | control message. The layout of the structure of the control | ||
1984 | buffer and the events associated are:struct virtio_console_control { | ||
1985 | |||
1986 | uint32_t id; /* Port number */ | ||
1987 | |||
1988 | uint16_t event; /* The kind of control event */ | ||
1989 | |||
1990 | uint16_t value; /* Extra information for the event */ | ||
1991 | |||
1992 | }; | ||
1993 | |||
1994 | |||
1995 | |||
1996 | /* Some events for the internal messages (control packets) */ | ||
1997 | |||
1998 | |||
1999 | |||
2000 | #define VIRTIO_CONSOLE_DEVICE_READY 0 | ||
2001 | |||
2002 | #define VIRTIO_CONSOLE_PORT_ADD 1 | ||
2003 | |||
2004 | #define VIRTIO_CONSOLE_PORT_REMOVE 2 | ||
2005 | |||
2006 | #define VIRTIO_CONSOLE_PORT_READY 3 | ||
2007 | |||
2008 | #define VIRTIO_CONSOLE_CONSOLE_PORT 4 | ||
2009 | |||
2010 | #define VIRTIO_CONSOLE_RESIZE 5 | ||
2011 | |||
2012 | #define VIRTIO_CONSOLE_PORT_OPEN 6 | ||
2013 | |||
2014 | #define VIRTIO_CONSOLE_PORT_NAME 7 | ||
2015 | |||
2016 | Appendix F: Entropy Device | ||
2017 | |||
2018 | The virtio entropy device supplies high-quality randomness for | ||
2019 | guest use. | ||
2020 | |||
2021 | Configuration | ||
2022 | |||
2023 | Subsystem Device ID 4 | ||
2024 | |||
2025 | Virtqueues 0:requestq. | ||
2026 | |||
2027 | Feature bits None currently defined | ||
2028 | |||
2029 | Device configuration layout None currently defined. | ||
2030 | |||
2031 | Device Initialization | ||
2032 | |||
2033 | The virtqueue is initialized | ||
2034 | |||
2035 | Device Operation | ||
2036 | |||
2037 | When the driver requires random bytes, it places the descriptor | ||
2038 | of one or more buffers in the queue. It will be completely filled | ||
2039 | by random data by the device. | ||
2040 | |||
2041 | Appendix G: Memory Balloon Device | ||
2042 | |||
2043 | The virtio memory balloon device is a primitive device for | ||
2044 | managing guest memory: the device asks for a certain amount of | ||
2045 | memory, and the guest supplies it (or withdraws it, if the device | ||
2046 | has more than it asks for). This allows the guest to adapt to | ||
2047 | changes in allowance of underlying physical memory. If the | ||
2048 | feature is negotiated, the device can also be used to communicate | ||
2049 | guest memory statistics to the host. | ||
2050 | |||
2051 | Configuration | ||
2052 | |||
2053 | Subsystem Device ID 5 | ||
2054 | |||
2055 | Virtqueues 0:inflateq. 1:deflateq. 2:statsq.[footnote: | ||
2056 | Only if VIRTIO_BALLON_F_STATS_VQ set | ||
2057 | ] | ||
2058 | |||
2059 | Feature bits | ||
2060 | |||
2061 | VIRTIO_BALLOON_F_MUST_TELL_HOST (0) Host must be told before | ||
2062 | pages from the balloon are used. | ||
2063 | |||
2064 | VIRTIO_BALLOON_F_STATS_VQ (1) A virtqueue for reporting guest | ||
2065 | memory statistics is present. | ||
2066 | |||
2067 | Device configuration layout Both fields of this configuration | ||
2068 | are always available. Note that they are little endian, despite | ||
2069 | convention that device fields are guest endian:struct virtio_balloon_config { | ||
2070 | |||
2071 | u32 num_pages; | ||
2072 | |||
2073 | u32 actual; | ||
2074 | |||
2075 | }; | ||
2076 | |||
2077 | Device Initialization | ||
2078 | |||
2079 | The inflate and deflate virtqueues are identified. | ||
2080 | |||
2081 | If the VIRTIO_BALLOON_F_STATS_VQ feature bit is negotiated: | ||
2082 | |||
2083 | Identify the stats virtqueue. | ||
2084 | |||
2085 | Add one empty buffer to the stats virtqueue and notify the | ||
2086 | host. | ||
2087 | |||
2088 | Device operation begins immediately. | ||
2089 | |||
2090 | Device Operation | ||
2091 | |||
2092 | Memory Ballooning The device is driven by the receipt of a | ||
2093 | configuration change interrupt. | ||
2094 | |||
2095 | The “num_pages” configuration field is examined. If this is | ||
2096 | greater than the “actual” number of pages, memory must be given | ||
2097 | to the balloon. If it is less than the “actual” number of | ||
2098 | pages, memory may be taken back from the balloon for general | ||
2099 | use. | ||
2100 | |||
2101 | To supply memory to the balloon (aka. inflate): | ||
2102 | |||
2103 | The driver constructs an array of addresses of unused memory | ||
2104 | pages. These addresses are divided by 4096[footnote: | ||
2105 | This is historical, and independent of the guest page size | ||
2106 | ] and the descriptor describing the resulting 32-bit array is | ||
2107 | added to the inflateq. | ||
2108 | |||
2109 | To remove memory from the balloon (aka. deflate): | ||
2110 | |||
2111 | The driver constructs an array of addresses of memory pages it | ||
2112 | has previously given to the balloon, as described above. This | ||
2113 | descriptor is added to the deflateq. | ||
2114 | |||
2115 | If the VIRTIO_BALLOON_F_MUST_TELL_HOST feature is set, the | ||
2116 | guest may not use these requested pages until that descriptor | ||
2117 | in the deflateq has been used by the device. | ||
2118 | |||
2119 | Otherwise, the guest may begin to re-use pages previously given | ||
2120 | to the balloon before the device has acknowledged their | ||
2121 | withdrawl. [footnote: | ||
2122 | In this case, deflation advice is merely a courtesy | ||
2123 | ] | ||
2124 | |||
2125 | In either case, once the device has completed the inflation or | ||
2126 | deflation, the “actual” field of the configuration should be | ||
2127 | updated to reflect the new number of pages in the balloon.[footnote: | ||
2128 | As updates to configuration space are not atomic, this field | ||
2129 | isn't particularly reliable, but can be used to diagnose buggy | ||
2130 | guests. | ||
2131 | ] | ||
2132 | |||
2133 | Memory Statistics | ||
2134 | |||
2135 | The stats virtqueue is atypical because communication is driven | ||
2136 | by the device (not the driver). The channel becomes active at | ||
2137 | driver initialization time when the driver adds an empty buffer | ||
2138 | and notifies the device. A request for memory statistics proceeds | ||
2139 | as follows: | ||
2140 | |||
2141 | The device pushes the buffer onto the used ring and sends an | ||
2142 | interrupt. | ||
2143 | |||
2144 | The driver pops the used buffer and discards it. | ||
2145 | |||
2146 | The driver collects memory statistics and writes them into a | ||
2147 | new buffer. | ||
2148 | |||
2149 | The driver adds the buffer to the virtqueue and notifies the | ||
2150 | device. | ||
2151 | |||
2152 | The device pops the buffer (retaining it to initiate a | ||
2153 | subsequent request) and consumes the statistics. | ||
2154 | |||
2155 | Memory Statistics Format Each statistic consists of a 16 bit | ||
2156 | tag and a 64 bit value. Both quantities are represented in the | ||
2157 | native endian of the guest. All statistics are optional and the | ||
2158 | driver may choose which ones to supply. To guarantee backwards | ||
2159 | compatibility, unsupported statistics should be omitted. | ||
2160 | |||
2161 | struct virtio_balloon_stat { | ||
2162 | |||
2163 | #define VIRTIO_BALLOON_S_SWAP_IN 0 | ||
2164 | |||
2165 | #define VIRTIO_BALLOON_S_SWAP_OUT 1 | ||
2166 | |||
2167 | #define VIRTIO_BALLOON_S_MAJFLT 2 | ||
2168 | |||
2169 | #define VIRTIO_BALLOON_S_MINFLT 3 | ||
2170 | |||
2171 | #define VIRTIO_BALLOON_S_MEMFREE 4 | ||
2172 | |||
2173 | #define VIRTIO_BALLOON_S_MEMTOT 5 | ||
2174 | |||
2175 | u16 tag; | ||
2176 | |||
2177 | u64 val; | ||
2178 | |||
2179 | } __attribute__((packed)); | ||
2180 | |||
2181 | Tags | ||
2182 | |||
2183 | VIRTIO_BALLOON_S_SWAP_IN The amount of memory that has been | ||
2184 | swapped in (in bytes). | ||
2185 | |||
2186 | VIRTIO_BALLOON_S_SWAP_OUT The amount of memory that has been | ||
2187 | swapped out to disk (in bytes). | ||
2188 | |||
2189 | VIRTIO_BALLOON_S_MAJFLT The number of major page faults that | ||
2190 | have occurred. | ||
2191 | |||
2192 | VIRTIO_BALLOON_S_MINFLT The number of minor page faults that | ||
2193 | have occurred. | ||
2194 | |||
2195 | VIRTIO_BALLOON_S_MEMFREE The amount of memory not being used | ||
2196 | for any purpose (in bytes). | ||
2197 | |||
2198 | VIRTIO_BALLOON_S_MEMTOT The total amount of memory available | ||
2199 | (in bytes). | ||
2200 | |||
diff --git a/MAINTAINERS b/MAINTAINERS index 4f555d8e5346..1a8cc600067d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1883,7 +1883,7 @@ S: Maintained | |||
1883 | F: drivers/connector/ | 1883 | F: drivers/connector/ |
1884 | 1884 | ||
1885 | CONTROL GROUPS (CGROUPS) | 1885 | CONTROL GROUPS (CGROUPS) |
1886 | M: Paul Menage <menage@google.com> | 1886 | M: Paul Menage <paul@paulmenage.org> |
1887 | M: Li Zefan <lizf@cn.fujitsu.com> | 1887 | M: Li Zefan <lizf@cn.fujitsu.com> |
1888 | L: containers@lists.linux-foundation.org | 1888 | L: containers@lists.linux-foundation.org |
1889 | S: Maintained | 1889 | S: Maintained |
@@ -1932,7 +1932,7 @@ S: Maintained | |||
1932 | F: tools/power/cpupower | 1932 | F: tools/power/cpupower |
1933 | 1933 | ||
1934 | CPUSETS | 1934 | CPUSETS |
1935 | M: Paul Menage <menage@google.com> | 1935 | M: Paul Menage <paul@paulmenage.org> |
1936 | W: http://www.bullopensource.org/cpuset/ | 1936 | W: http://www.bullopensource.org/cpuset/ |
1937 | W: http://oss.sgi.com/projects/cpusets/ | 1937 | W: http://oss.sgi.com/projects/cpusets/ |
1938 | S: Supported | 1938 | S: Supported |
@@ -3905,9 +3905,9 @@ F: arch/powerpc/platforms/powermac/ | |||
3905 | F: drivers/macintosh/ | 3905 | F: drivers/macintosh/ |
3906 | 3906 | ||
3907 | LINUX FOR POWERPC EMBEDDED MPC5XXX | 3907 | LINUX FOR POWERPC EMBEDDED MPC5XXX |
3908 | M: Grant Likely <grant.likely@secretlab.ca> | 3908 | M: Anatolij Gustschin <agust@denx.de> |
3909 | L: linuxppc-dev@lists.ozlabs.org | 3909 | L: linuxppc-dev@lists.ozlabs.org |
3910 | T: git git://git.secretlab.ca/git/linux-2.6.git | 3910 | T: git git://git.denx.de/linux-2.6-agust.git |
3911 | S: Maintained | 3911 | S: Maintained |
3912 | F: arch/powerpc/platforms/512x/ | 3912 | F: arch/powerpc/platforms/512x/ |
3913 | F: arch/powerpc/platforms/52xx/ | 3913 | F: arch/powerpc/platforms/52xx/ |
@@ -4971,7 +4971,7 @@ M: Paul Mackerras <paulus@samba.org> | |||
4971 | M: Ingo Molnar <mingo@elte.hu> | 4971 | M: Ingo Molnar <mingo@elte.hu> |
4972 | M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 4972 | M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> |
4973 | S: Supported | 4973 | S: Supported |
4974 | F: kernel/perf_event*.c | 4974 | F: kernel/events/* |
4975 | F: include/linux/perf_event.h | 4975 | F: include/linux/perf_event.h |
4976 | F: arch/*/kernel/perf_event*.c | 4976 | F: arch/*/kernel/perf_event*.c |
4977 | F: arch/*/kernel/*/perf_event*.c | 4977 | F: arch/*/kernel/*/perf_event*.c |
@@ -7088,7 +7088,7 @@ S: Supported | |||
7088 | F: drivers/mmc/host/vub300.c | 7088 | F: drivers/mmc/host/vub300.c |
7089 | 7089 | ||
7090 | W1 DALLAS'S 1-WIRE BUS | 7090 | W1 DALLAS'S 1-WIRE BUS |
7091 | M: Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 7091 | M: Evgeniy Polyakov <zbr@ioremap.net> |
7092 | S: Maintained | 7092 | S: Maintained |
7093 | F: Documentation/w1/ | 7093 | F: Documentation/w1/ |
7094 | F: drivers/w1/ | 7094 | F: drivers/w1/ |
@@ -7358,7 +7358,7 @@ THE REST | |||
7358 | M: Linus Torvalds <torvalds@linux-foundation.org> | 7358 | M: Linus Torvalds <torvalds@linux-foundation.org> |
7359 | L: linux-kernel@vger.kernel.org | 7359 | L: linux-kernel@vger.kernel.org |
7360 | Q: http://patchwork.kernel.org/project/LKML/list/ | 7360 | Q: http://patchwork.kernel.org/project/LKML/list/ |
7361 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git | 7361 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git |
7362 | S: Buried alive in reporters | 7362 | S: Buried alive in reporters |
7363 | F: * | 7363 | F: * |
7364 | F: */ | 7364 | F: */ |
@@ -1,8 +1,8 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 1 | 2 | PATCHLEVEL = 1 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = -rc3 |
5 | NAME = Sneaky Weasel | 5 | NAME = "Divemaster Edition" |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
8 | # To see a list of typical targets execute "make help" | 8 | # To see a list of typical targets execute "make help" |
diff --git a/arch/alpha/include/asm/sysinfo.h b/arch/alpha/include/asm/sysinfo.h index 086aba284df2..e77d77cd07b8 100644 --- a/arch/alpha/include/asm/sysinfo.h +++ b/arch/alpha/include/asm/sysinfo.h | |||
@@ -27,13 +27,4 @@ | |||
27 | #define UAC_NOFIX 2 | 27 | #define UAC_NOFIX 2 |
28 | #define UAC_SIGBUS 4 | 28 | #define UAC_SIGBUS 4 |
29 | 29 | ||
30 | |||
31 | #ifdef __KERNEL__ | ||
32 | |||
33 | /* This is the shift that is applied to the UAC bits as stored in the | ||
34 | per-thread flags. See thread_info.h. */ | ||
35 | #define UAC_SHIFT 6 | ||
36 | |||
37 | #endif | ||
38 | |||
39 | #endif /* __ASM_ALPHA_SYSINFO_H */ | 30 | #endif /* __ASM_ALPHA_SYSINFO_H */ |
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 6f32f9c84a2d..ff73db022342 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h | |||
@@ -74,9 +74,9 @@ register struct thread_info *__current_thread_info __asm__("$8"); | |||
74 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | 74 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ |
75 | #define TIF_POLLING_NRFLAG 8 /* poll_idle is polling NEED_RESCHED */ | 75 | #define TIF_POLLING_NRFLAG 8 /* poll_idle is polling NEED_RESCHED */ |
76 | #define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */ | 76 | #define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */ |
77 | #define TIF_UAC_NOPRINT 10 /* see sysinfo.h */ | 77 | #define TIF_UAC_NOPRINT 10 /* ! Preserve sequence of following */ |
78 | #define TIF_UAC_NOFIX 11 | 78 | #define TIF_UAC_NOFIX 11 /* ! flags as they match */ |
79 | #define TIF_UAC_SIGBUS 12 | 79 | #define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */ |
80 | #define TIF_MEMDIE 13 /* is terminating due to OOM killer */ | 80 | #define TIF_MEMDIE 13 /* is terminating due to OOM killer */ |
81 | #define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */ | 81 | #define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */ |
82 | #define TIF_FREEZE 16 /* is freezing for suspend */ | 82 | #define TIF_FREEZE 16 /* is freezing for suspend */ |
@@ -97,7 +97,7 @@ register struct thread_info *__current_thread_info __asm__("$8"); | |||
97 | #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ | 97 | #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ |
98 | | _TIF_SYSCALL_TRACE) | 98 | | _TIF_SYSCALL_TRACE) |
99 | 99 | ||
100 | #define ALPHA_UAC_SHIFT 10 | 100 | #define ALPHA_UAC_SHIFT TIF_UAC_NOPRINT |
101 | #define ALPHA_UAC_MASK (1 << TIF_UAC_NOPRINT | 1 << TIF_UAC_NOFIX | \ | 101 | #define ALPHA_UAC_MASK (1 << TIF_UAC_NOPRINT | 1 << TIF_UAC_NOFIX | \ |
102 | 1 << TIF_UAC_SIGBUS) | 102 | 1 << TIF_UAC_SIGBUS) |
103 | 103 | ||
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 326f0a2d56e5..01e8715e26d9 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
43 | #include <asm/system.h> | 43 | #include <asm/system.h> |
44 | #include <asm/sysinfo.h> | 44 | #include <asm/sysinfo.h> |
45 | #include <asm/thread_info.h> | ||
45 | #include <asm/hwrpb.h> | 46 | #include <asm/hwrpb.h> |
46 | #include <asm/processor.h> | 47 | #include <asm/processor.h> |
47 | 48 | ||
@@ -633,9 +634,10 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer, | |||
633 | case GSI_UACPROC: | 634 | case GSI_UACPROC: |
634 | if (nbytes < sizeof(unsigned int)) | 635 | if (nbytes < sizeof(unsigned int)) |
635 | return -EINVAL; | 636 | return -EINVAL; |
636 | w = (current_thread_info()->flags >> UAC_SHIFT) & UAC_BITMASK; | 637 | w = (current_thread_info()->flags >> ALPHA_UAC_SHIFT) & |
637 | if (put_user(w, (unsigned int __user *)buffer)) | 638 | UAC_BITMASK; |
638 | return -EFAULT; | 639 | if (put_user(w, (unsigned int __user *)buffer)) |
640 | return -EFAULT; | ||
639 | return 1; | 641 | return 1; |
640 | 642 | ||
641 | case GSI_PROC_TYPE: | 643 | case GSI_PROC_TYPE: |
@@ -756,8 +758,8 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer, | |||
756 | case SSIN_UACPROC: | 758 | case SSIN_UACPROC: |
757 | again: | 759 | again: |
758 | old = current_thread_info()->flags; | 760 | old = current_thread_info()->flags; |
759 | new = old & ~(UAC_BITMASK << UAC_SHIFT); | 761 | new = old & ~(UAC_BITMASK << ALPHA_UAC_SHIFT); |
760 | new = new | (w & UAC_BITMASK) << UAC_SHIFT; | 762 | new = new | (w & UAC_BITMASK) << ALPHA_UAC_SHIFT; |
761 | if (cmpxchg(¤t_thread_info()->flags, | 763 | if (cmpxchg(¤t_thread_info()->flags, |
762 | old, new) != old) | 764 | old, new) != old) |
763 | goto again; | 765 | goto again; |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 2c71a8f3535a..5ebc5d922ea1 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -195,8 +195,7 @@ config VECTORS_BASE | |||
195 | The base address of exception vectors. | 195 | The base address of exception vectors. |
196 | 196 | ||
197 | config ARM_PATCH_PHYS_VIRT | 197 | config ARM_PATCH_PHYS_VIRT |
198 | bool "Patch physical to virtual translations at runtime (EXPERIMENTAL)" | 198 | bool "Patch physical to virtual translations at runtime" |
199 | depends on EXPERIMENTAL | ||
200 | depends on !XIP_KERNEL && MMU | 199 | depends on !XIP_KERNEL && MMU |
201 | depends on !ARCH_REALVIEW || !SPARSEMEM | 200 | depends on !ARCH_REALVIEW || !SPARSEMEM |
202 | help | 201 | help |
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S index 7fa3bb0d2397..a08783823b32 100644 --- a/arch/arm/kernel/iwmmxt.S +++ b/arch/arm/kernel/iwmmxt.S | |||
@@ -195,10 +195,10 @@ ENTRY(iwmmxt_task_disable) | |||
195 | 195 | ||
196 | @ enable access to CP0 and CP1 | 196 | @ enable access to CP0 and CP1 |
197 | XSC(mrc p15, 0, r4, c15, c1, 0) | 197 | XSC(mrc p15, 0, r4, c15, c1, 0) |
198 | XSC(orr r4, r4, #0xf) | 198 | XSC(orr r4, r4, #0x3) |
199 | XSC(mcr p15, 0, r4, c15, c1, 0) | 199 | XSC(mcr p15, 0, r4, c15, c1, 0) |
200 | PJ4(mrc p15, 0, r4, c1, c0, 2) | 200 | PJ4(mrc p15, 0, r4, c1, c0, 2) |
201 | PJ4(orr r4, r4, #0x3) | 201 | PJ4(orr r4, r4, #0xf) |
202 | PJ4(mcr p15, 0, r4, c1, c0, 2) | 202 | PJ4(mcr p15, 0, r4, c1, c0, 2) |
203 | 203 | ||
204 | mov r0, #0 @ nothing to load | 204 | mov r0, #0 @ nothing to load |
@@ -313,7 +313,7 @@ ENTRY(iwmmxt_task_switch) | |||
313 | teq r2, r3 @ next task owns it? | 313 | teq r2, r3 @ next task owns it? |
314 | movne pc, lr @ no: leave Concan disabled | 314 | movne pc, lr @ no: leave Concan disabled |
315 | 315 | ||
316 | 1: @ flip Conan access | 316 | 1: @ flip Concan access |
317 | XSC(eor r1, r1, #0x3) | 317 | XSC(eor r1, r1, #0x3) |
318 | XSC(mcr p15, 0, r1, c15, c1, 0) | 318 | XSC(mcr p15, 0, r1, c15, c1, 0) |
319 | PJ4(eor r1, r1, #0xf) | 319 | PJ4(eor r1, r1, #0xf) |
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 05b377616fd5..cc2020c2c709 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c | |||
@@ -323,7 +323,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
323 | #endif | 323 | #endif |
324 | s = find_mod_section(hdr, sechdrs, ".alt.smp.init"); | 324 | s = find_mod_section(hdr, sechdrs, ".alt.smp.init"); |
325 | if (s && !is_smp()) | 325 | if (s && !is_smp()) |
326 | #ifdef CONFIG_SMP_ON_UP | ||
326 | fixup_smp((void *)s->sh_addr, s->sh_size); | 327 | fixup_smp((void *)s->sh_addr, s->sh_size); |
328 | #else | ||
329 | return -EINVAL; | ||
330 | #endif | ||
327 | return 0; | 331 | return 0; |
328 | } | 332 | } |
329 | 333 | ||
diff --git a/arch/arm/mach-imx/clock-imx25.c b/arch/arm/mach-imx/clock-imx25.c index 0fc7ba56d616..e63e23504fe5 100644 --- a/arch/arm/mach-imx/clock-imx25.c +++ b/arch/arm/mach-imx/clock-imx25.c | |||
@@ -331,6 +331,9 @@ int __init mx25_clocks_init(void) | |||
331 | __raw_writel(__raw_readl(CRM_BASE+0x64) | (1 << 7) | (1 << 0), | 331 | __raw_writel(__raw_readl(CRM_BASE+0x64) | (1 << 7) | (1 << 0), |
332 | CRM_BASE + 0x64); | 332 | CRM_BASE + 0x64); |
333 | 333 | ||
334 | /* Clock source for gpt is ahb_div */ | ||
335 | __raw_writel(__raw_readl(CRM_BASE+0x64) & ~(1 << 5), CRM_BASE + 0x64); | ||
336 | |||
334 | mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54); | 337 | mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54); |
335 | 338 | ||
336 | return 0; | 339 | return 0; |
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c index 6707de0ab716..6778f8193bc6 100644 --- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c +++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/input.h> | 30 | #include <linux/input.h> |
31 | #include <linux/gpio.h> | 31 | #include <linux/gpio.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #include <sound/tlv320aic32x4.h> | ||
33 | #include <asm/mach-types.h> | 34 | #include <asm/mach-types.h> |
34 | #include <asm/mach/arch.h> | 35 | #include <asm/mach/arch.h> |
35 | #include <asm/mach/time.h> | 36 | #include <asm/mach/time.h> |
@@ -196,6 +197,17 @@ static struct pca953x_platform_data visstrim_m10_pca9555_pdata = { | |||
196 | .invert = 0, | 197 | .invert = 0, |
197 | }; | 198 | }; |
198 | 199 | ||
200 | static struct aic32x4_pdata visstrim_m10_aic32x4_pdata = { | ||
201 | .power_cfg = AIC32X4_PWR_MICBIAS_2075_LDOIN | | ||
202 | AIC32X4_PWR_AVDD_DVDD_WEAK_DISABLE | | ||
203 | AIC32X4_PWR_AIC32X4_LDO_ENABLE | | ||
204 | AIC32X4_PWR_CMMODE_LDOIN_RANGE_18_36 | | ||
205 | AIC32X4_PWR_CMMODE_HP_LDOIN_POWERED, | ||
206 | .micpga_routing = AIC32X4_MICPGA_ROUTE_LMIC_IN2R_10K | | ||
207 | AIC32X4_MICPGA_ROUTE_RMIC_IN1L_10K, | ||
208 | .swapdacs = false, | ||
209 | }; | ||
210 | |||
199 | static struct i2c_board_info visstrim_m10_i2c_devices[] = { | 211 | static struct i2c_board_info visstrim_m10_i2c_devices[] = { |
200 | { | 212 | { |
201 | I2C_BOARD_INFO("pca9555", 0x20), | 213 | I2C_BOARD_INFO("pca9555", 0x20), |
@@ -203,6 +215,7 @@ static struct i2c_board_info visstrim_m10_i2c_devices[] = { | |||
203 | }, | 215 | }, |
204 | { | 216 | { |
205 | I2C_BOARD_INFO("tlv320aic32x4", 0x18), | 217 | I2C_BOARD_INFO("tlv320aic32x4", 0x18), |
218 | .platform_data = &visstrim_m10_aic32x4_pdata, | ||
206 | } | 219 | } |
207 | }; | 220 | }; |
208 | 221 | ||
diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c index 0ce49478a479..29ca8907a780 100644 --- a/arch/arm/mach-imx/mach-mx31ads.c +++ b/arch/arm/mach-imx/mach-mx31ads.c | |||
@@ -468,7 +468,7 @@ static struct i2c_board_info __initdata mx31ads_i2c1_devices[] = { | |||
468 | #endif | 468 | #endif |
469 | }; | 469 | }; |
470 | 470 | ||
471 | static void mxc_init_i2c(void) | 471 | static void __init mxc_init_i2c(void) |
472 | { | 472 | { |
473 | i2c_register_board_info(1, mx31ads_i2c1_devices, | 473 | i2c_register_board_info(1, mx31ads_i2c1_devices, |
474 | ARRAY_SIZE(mx31ads_i2c1_devices)); | 474 | ARRAY_SIZE(mx31ads_i2c1_devices)); |
@@ -486,7 +486,7 @@ static unsigned int ssi_pins[] = { | |||
486 | MX31_PIN_STXD5__STXD5, | 486 | MX31_PIN_STXD5__STXD5, |
487 | }; | 487 | }; |
488 | 488 | ||
489 | static void mxc_init_audio(void) | 489 | static void __init mxc_init_audio(void) |
490 | { | 490 | { |
491 | imx31_add_imx_ssi(0, NULL); | 491 | imx31_add_imx_ssi(0, NULL); |
492 | mxc_iomux_setup_multiple_pins(ssi_pins, ARRAY_SIZE(ssi_pins), "ssi"); | 492 | mxc_iomux_setup_multiple_pins(ssi_pins, ARRAY_SIZE(ssi_pins), "ssi"); |
diff --git a/arch/arm/mach-imx/mach-mx31lilly.c b/arch/arm/mach-imx/mach-mx31lilly.c index 750368ddf0f9..126913ad106a 100644 --- a/arch/arm/mach-imx/mach-mx31lilly.c +++ b/arch/arm/mach-imx/mach-mx31lilly.c | |||
@@ -192,7 +192,7 @@ static struct mxc_usbh_platform_data usbh2_pdata __initdata = { | |||
192 | .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT, | 192 | .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT, |
193 | }; | 193 | }; |
194 | 194 | ||
195 | static void lilly1131_usb_init(void) | 195 | static void __init lilly1131_usb_init(void) |
196 | { | 196 | { |
197 | imx31_add_mxc_ehci_hs(1, &usbh1_pdata); | 197 | imx31_add_mxc_ehci_hs(1, &usbh1_pdata); |
198 | 198 | ||
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c index c070c24255f4..98e25d9aaab6 100644 --- a/arch/arm/mach-mmp/gplugd.c +++ b/arch/arm/mach-mmp/gplugd.c | |||
@@ -16,16 +16,18 @@ | |||
16 | #include <mach/gpio.h> | 16 | #include <mach/gpio.h> |
17 | #include <mach/pxa168.h> | 17 | #include <mach/pxa168.h> |
18 | #include <mach/mfp-pxa168.h> | 18 | #include <mach/mfp-pxa168.h> |
19 | #include <mach/mfp-gplugd.h> | ||
20 | 19 | ||
21 | #include "common.h" | 20 | #include "common.h" |
22 | 21 | ||
23 | static unsigned long gplugd_pin_config[] __initdata = { | 22 | static unsigned long gplugd_pin_config[] __initdata = { |
24 | /* UART3 */ | 23 | /* UART3 */ |
25 | GPIO8_UART3_SOUT, | 24 | GPIO8_UART3_TXD, |
26 | GPIO9_UART3_SIN, | 25 | GPIO9_UART3_RXD, |
27 | GPI1O_UART3_CTS, | 26 | GPIO1O_UART3_CTS, |
28 | GPI11_UART3_RTS, | 27 | GPIO11_UART3_RTS, |
28 | |||
29 | /* USB OTG PEN */ | ||
30 | GPIO18_GPIO, | ||
29 | 31 | ||
30 | /* MMC2 */ | 32 | /* MMC2 */ |
31 | GPIO28_MMC2_CMD, | 33 | GPIO28_MMC2_CMD, |
@@ -109,6 +111,12 @@ static unsigned long gplugd_pin_config[] __initdata = { | |||
109 | GPIO105_CI2C_SDA, | 111 | GPIO105_CI2C_SDA, |
110 | GPIO106_CI2C_SCL, | 112 | GPIO106_CI2C_SCL, |
111 | 113 | ||
114 | /* SPI NOR Flash on SSP2 */ | ||
115 | GPIO107_SSP2_RXD, | ||
116 | GPIO108_SSP2_TXD, | ||
117 | GPIO110_GPIO, /* SPI_CSn */ | ||
118 | GPIO111_SSP2_CLK, | ||
119 | |||
112 | /* Select JTAG */ | 120 | /* Select JTAG */ |
113 | GPIO109_GPIO, | 121 | GPIO109_GPIO, |
114 | 122 | ||
@@ -154,7 +162,7 @@ static void __init select_disp_freq(void) | |||
154 | "frequency\n"); | 162 | "frequency\n"); |
155 | } else { | 163 | } else { |
156 | gpio_direction_output(35, 1); | 164 | gpio_direction_output(35, 1); |
157 | gpio_free(104); | 165 | gpio_free(35); |
158 | } | 166 | } |
159 | 167 | ||
160 | if (unlikely(gpio_request(85, "DISP_FREQ_SEL_2"))) { | 168 | if (unlikely(gpio_request(85, "DISP_FREQ_SEL_2"))) { |
@@ -162,7 +170,7 @@ static void __init select_disp_freq(void) | |||
162 | "frequency\n"); | 170 | "frequency\n"); |
163 | } else { | 171 | } else { |
164 | gpio_direction_output(85, 0); | 172 | gpio_direction_output(85, 0); |
165 | gpio_free(104); | 173 | gpio_free(85); |
166 | } | 174 | } |
167 | } | 175 | } |
168 | 176 | ||
diff --git a/arch/arm/mach-mmp/include/mach/mfp-gplugd.h b/arch/arm/mach-mmp/include/mach/mfp-gplugd.h deleted file mode 100644 index b8cf38d85600..000000000000 --- a/arch/arm/mach-mmp/include/mach/mfp-gplugd.h +++ /dev/null | |||
@@ -1,52 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mach-mmp/include/mach/mfp-gplugd.h | ||
3 | * | ||
4 | * MFP definitions used in gplugD | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #ifndef __MACH_MFP_GPLUGD_H | ||
12 | #define __MACH_MFP_GPLUGD_H | ||
13 | |||
14 | #include <plat/mfp.h> | ||
15 | #include <mach/mfp.h> | ||
16 | |||
17 | /* UART3 */ | ||
18 | #define GPIO8_UART3_SOUT MFP_CFG(GPIO8, AF2) | ||
19 | #define GPIO9_UART3_SIN MFP_CFG(GPIO9, AF2) | ||
20 | #define GPI1O_UART3_CTS MFP_CFG(GPIO10, AF2) | ||
21 | #define GPI11_UART3_RTS MFP_CFG(GPIO11, AF2) | ||
22 | |||
23 | /* MMC2 */ | ||
24 | #define GPIO28_MMC2_CMD MFP_CFG_DRV(GPIO28, AF6, FAST) | ||
25 | #define GPIO29_MMC2_CLK MFP_CFG_DRV(GPIO29, AF6, FAST) | ||
26 | #define GPIO30_MMC2_DAT0 MFP_CFG_DRV(GPIO30, AF6, FAST) | ||
27 | #define GPIO31_MMC2_DAT1 MFP_CFG_DRV(GPIO31, AF6, FAST) | ||
28 | #define GPIO32_MMC2_DAT2 MFP_CFG_DRV(GPIO32, AF6, FAST) | ||
29 | #define GPIO33_MMC2_DAT3 MFP_CFG_DRV(GPIO33, AF6, FAST) | ||
30 | |||
31 | /* I2S */ | ||
32 | #undef GPIO114_I2S_FRM | ||
33 | #undef GPIO115_I2S_BCLK | ||
34 | |||
35 | #define GPIO114_I2S_FRM MFP_CFG_DRV(GPIO114, AF1, FAST) | ||
36 | #define GPIO115_I2S_BCLK MFP_CFG_DRV(GPIO115, AF1, FAST) | ||
37 | #define GPIO116_I2S_TXD MFP_CFG_DRV(GPIO116, AF1, FAST) | ||
38 | |||
39 | /* MMC4 */ | ||
40 | #define GPIO125_MMC4_DAT3 MFP_CFG_DRV(GPIO125, AF7, FAST) | ||
41 | #define GPIO126_MMC4_DAT2 MFP_CFG_DRV(GPIO126, AF7, FAST) | ||
42 | #define GPIO127_MMC4_DAT1 MFP_CFG_DRV(GPIO127, AF7, FAST) | ||
43 | #define GPIO0_2_MMC4_DAT0 MFP_CFG_DRV(GPIO0_2, AF7, FAST) | ||
44 | #define GPIO1_2_MMC4_CMD MFP_CFG_DRV(GPIO1_2, AF7, FAST) | ||
45 | #define GPIO2_2_MMC4_CLK MFP_CFG_DRV(GPIO2_2, AF7, FAST) | ||
46 | |||
47 | /* OTG GPIO */ | ||
48 | #define GPIO_USB_OTG_PEN 18 | ||
49 | #define GPIO_USB_OIDIR 20 | ||
50 | |||
51 | /* Other GPIOs are 35, 84, 85 */ | ||
52 | #endif /* __MACH_MFP_GPLUGD_H */ | ||
diff --git a/arch/arm/mach-mmp/include/mach/mfp-pxa168.h b/arch/arm/mach-mmp/include/mach/mfp-pxa168.h index 8c782328b21c..92aaa3c19d61 100644 --- a/arch/arm/mach-mmp/include/mach/mfp-pxa168.h +++ b/arch/arm/mach-mmp/include/mach/mfp-pxa168.h | |||
@@ -203,6 +203,10 @@ | |||
203 | #define GPIO33_CF_nCD2 MFP_CFG(GPIO33, AF3) | 203 | #define GPIO33_CF_nCD2 MFP_CFG(GPIO33, AF3) |
204 | 204 | ||
205 | /* UART */ | 205 | /* UART */ |
206 | #define GPIO8_UART3_TXD MFP_CFG(GPIO8, AF2) | ||
207 | #define GPIO9_UART3_RXD MFP_CFG(GPIO9, AF2) | ||
208 | #define GPIO1O_UART3_CTS MFP_CFG(GPIO10, AF2) | ||
209 | #define GPIO11_UART3_RTS MFP_CFG(GPIO11, AF2) | ||
206 | #define GPIO88_UART2_TXD MFP_CFG(GPIO88, AF2) | 210 | #define GPIO88_UART2_TXD MFP_CFG(GPIO88, AF2) |
207 | #define GPIO89_UART2_RXD MFP_CFG(GPIO89, AF2) | 211 | #define GPIO89_UART2_RXD MFP_CFG(GPIO89, AF2) |
208 | #define GPIO107_UART1_TXD MFP_CFG_DRV(GPIO107, AF1, FAST) | 212 | #define GPIO107_UART1_TXD MFP_CFG_DRV(GPIO107, AF1, FAST) |
@@ -232,6 +236,22 @@ | |||
232 | #define GPIO53_MMC1_CD MFP_CFG(GPIO53, AF1) | 236 | #define GPIO53_MMC1_CD MFP_CFG(GPIO53, AF1) |
233 | #define GPIO46_MMC1_WP MFP_CFG(GPIO46, AF1) | 237 | #define GPIO46_MMC1_WP MFP_CFG(GPIO46, AF1) |
234 | 238 | ||
239 | /* MMC2 */ | ||
240 | #define GPIO28_MMC2_CMD MFP_CFG_DRV(GPIO28, AF6, FAST) | ||
241 | #define GPIO29_MMC2_CLK MFP_CFG_DRV(GPIO29, AF6, FAST) | ||
242 | #define GPIO30_MMC2_DAT0 MFP_CFG_DRV(GPIO30, AF6, FAST) | ||
243 | #define GPIO31_MMC2_DAT1 MFP_CFG_DRV(GPIO31, AF6, FAST) | ||
244 | #define GPIO32_MMC2_DAT2 MFP_CFG_DRV(GPIO32, AF6, FAST) | ||
245 | #define GPIO33_MMC2_DAT3 MFP_CFG_DRV(GPIO33, AF6, FAST) | ||
246 | |||
247 | /* MMC4 */ | ||
248 | #define GPIO125_MMC4_DAT3 MFP_CFG_DRV(GPIO125, AF7, FAST) | ||
249 | #define GPIO126_MMC4_DAT2 MFP_CFG_DRV(GPIO126, AF7, FAST) | ||
250 | #define GPIO127_MMC4_DAT1 MFP_CFG_DRV(GPIO127, AF7, FAST) | ||
251 | #define GPIO0_2_MMC4_DAT0 MFP_CFG_DRV(GPIO0_2, AF7, FAST) | ||
252 | #define GPIO1_2_MMC4_CMD MFP_CFG_DRV(GPIO1_2, AF7, FAST) | ||
253 | #define GPIO2_2_MMC4_CLK MFP_CFG_DRV(GPIO2_2, AF7, FAST) | ||
254 | |||
235 | /* LCD */ | 255 | /* LCD */ |
236 | #define GPIO84_LCD_CS MFP_CFG(GPIO84, AF1) | 256 | #define GPIO84_LCD_CS MFP_CFG(GPIO84, AF1) |
237 | #define GPIO60_LCD_DD0 MFP_CFG(GPIO60, AF1) | 257 | #define GPIO60_LCD_DD0 MFP_CFG(GPIO60, AF1) |
@@ -269,11 +289,12 @@ | |||
269 | #define GPIO106_CI2C_SCL MFP_CFG(GPIO106, AF1) | 289 | #define GPIO106_CI2C_SCL MFP_CFG(GPIO106, AF1) |
270 | 290 | ||
271 | /* I2S */ | 291 | /* I2S */ |
272 | #define GPIO113_I2S_MCLK MFP_CFG(GPIO113,AF6) | 292 | #define GPIO113_I2S_MCLK MFP_CFG(GPIO113, AF6) |
273 | #define GPIO114_I2S_FRM MFP_CFG(GPIO114,AF1) | 293 | #define GPIO114_I2S_FRM MFP_CFG(GPIO114, AF1) |
274 | #define GPIO115_I2S_BCLK MFP_CFG(GPIO115,AF1) | 294 | #define GPIO115_I2S_BCLK MFP_CFG(GPIO115, AF1) |
275 | #define GPIO116_I2S_RXD MFP_CFG(GPIO116,AF2) | 295 | #define GPIO116_I2S_RXD MFP_CFG(GPIO116, AF2) |
276 | #define GPIO117_I2S_TXD MFP_CFG(GPIO117,AF2) | 296 | #define GPIO116_I2S_TXD MFP_CFG(GPIO116, AF1) |
297 | #define GPIO117_I2S_TXD MFP_CFG(GPIO117, AF2) | ||
277 | 298 | ||
278 | /* PWM */ | 299 | /* PWM */ |
279 | #define GPIO96_PWM3_OUT MFP_CFG(GPIO96, AF1) | 300 | #define GPIO96_PWM3_OUT MFP_CFG(GPIO96, AF1) |
@@ -324,4 +345,10 @@ | |||
324 | #define GPIO101_MII_MDIO MFP_CFG(GPIO101, AF5) | 345 | #define GPIO101_MII_MDIO MFP_CFG(GPIO101, AF5) |
325 | #define GPIO103_RX_DV MFP_CFG(GPIO103, AF5) | 346 | #define GPIO103_RX_DV MFP_CFG(GPIO103, AF5) |
326 | 347 | ||
348 | /* SSP2 */ | ||
349 | #define GPIO107_SSP2_RXD MFP_CFG(GPIO107, AF4) | ||
350 | #define GPIO108_SSP2_TXD MFP_CFG(GPIO108, AF4) | ||
351 | #define GPIO111_SSP2_CLK MFP_CFG(GPIO111, AF4) | ||
352 | #define GPIO112_SSP2_FRM MFP_CFG(GPIO112, AF4) | ||
353 | |||
327 | #endif /* __ASM_MACH_MFP_PXA168_H */ | 354 | #endif /* __ASM_MACH_MFP_PXA168_H */ |
diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index 99833b9485cf..4e91ee6e27c8 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c | |||
@@ -51,12 +51,12 @@ static inline uint32_t timer_read(void) | |||
51 | { | 51 | { |
52 | int delay = 100; | 52 | int delay = 100; |
53 | 53 | ||
54 | __raw_writel(1, TIMERS_VIRT_BASE + TMR_CVWR(0)); | 54 | __raw_writel(1, TIMERS_VIRT_BASE + TMR_CVWR(1)); |
55 | 55 | ||
56 | while (delay--) | 56 | while (delay--) |
57 | cpu_relax(); | 57 | cpu_relax(); |
58 | 58 | ||
59 | return __raw_readl(TIMERS_VIRT_BASE + TMR_CVWR(0)); | 59 | return __raw_readl(TIMERS_VIRT_BASE + TMR_CVWR(1)); |
60 | } | 60 | } |
61 | 61 | ||
62 | unsigned long long notrace sched_clock(void) | 62 | unsigned long long notrace sched_clock(void) |
@@ -75,28 +75,51 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
75 | { | 75 | { |
76 | struct clock_event_device *c = dev_id; | 76 | struct clock_event_device *c = dev_id; |
77 | 77 | ||
78 | /* disable and clear pending interrupt status */ | 78 | /* |
79 | __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(0)); | 79 | * Clear pending interrupt status. |
80 | __raw_writel(0x1, TIMERS_VIRT_BASE + TMR_ICR(0)); | 80 | */ |
81 | __raw_writel(0x01, TIMERS_VIRT_BASE + TMR_ICR(0)); | ||
82 | |||
83 | /* | ||
84 | * Disable timer 0. | ||
85 | */ | ||
86 | __raw_writel(0x02, TIMERS_VIRT_BASE + TMR_CER); | ||
87 | |||
81 | c->event_handler(c); | 88 | c->event_handler(c); |
89 | |||
82 | return IRQ_HANDLED; | 90 | return IRQ_HANDLED; |
83 | } | 91 | } |
84 | 92 | ||
85 | static int timer_set_next_event(unsigned long delta, | 93 | static int timer_set_next_event(unsigned long delta, |
86 | struct clock_event_device *dev) | 94 | struct clock_event_device *dev) |
87 | { | 95 | { |
88 | unsigned long flags, next; | 96 | unsigned long flags; |
89 | 97 | ||
90 | local_irq_save(flags); | 98 | local_irq_save(flags); |
91 | 99 | ||
92 | /* clear pending interrupt status and enable */ | 100 | /* |
101 | * Disable timer 0. | ||
102 | */ | ||
103 | __raw_writel(0x02, TIMERS_VIRT_BASE + TMR_CER); | ||
104 | |||
105 | /* | ||
106 | * Clear and enable timer match 0 interrupt. | ||
107 | */ | ||
93 | __raw_writel(0x01, TIMERS_VIRT_BASE + TMR_ICR(0)); | 108 | __raw_writel(0x01, TIMERS_VIRT_BASE + TMR_ICR(0)); |
94 | __raw_writel(0x01, TIMERS_VIRT_BASE + TMR_IER(0)); | 109 | __raw_writel(0x01, TIMERS_VIRT_BASE + TMR_IER(0)); |
95 | 110 | ||
96 | next = timer_read() + delta; | 111 | /* |
97 | __raw_writel(next, TIMERS_VIRT_BASE + TMR_TN_MM(0, 0)); | 112 | * Setup new clockevent timer value. |
113 | */ | ||
114 | __raw_writel(delta - 1, TIMERS_VIRT_BASE + TMR_TN_MM(0, 0)); | ||
115 | |||
116 | /* | ||
117 | * Enable timer 0. | ||
118 | */ | ||
119 | __raw_writel(0x03, TIMERS_VIRT_BASE + TMR_CER); | ||
98 | 120 | ||
99 | local_irq_restore(flags); | 121 | local_irq_restore(flags); |
122 | |||
100 | return 0; | 123 | return 0; |
101 | } | 124 | } |
102 | 125 | ||
@@ -145,23 +168,26 @@ static struct clocksource cksrc = { | |||
145 | static void __init timer_config(void) | 168 | static void __init timer_config(void) |
146 | { | 169 | { |
147 | uint32_t ccr = __raw_readl(TIMERS_VIRT_BASE + TMR_CCR); | 170 | uint32_t ccr = __raw_readl(TIMERS_VIRT_BASE + TMR_CCR); |
148 | uint32_t cer = __raw_readl(TIMERS_VIRT_BASE + TMR_CER); | ||
149 | uint32_t cmr = __raw_readl(TIMERS_VIRT_BASE + TMR_CMR); | ||
150 | 171 | ||
151 | __raw_writel(cer & ~0x1, TIMERS_VIRT_BASE + TMR_CER); /* disable */ | 172 | __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_CER); /* disable */ |
152 | 173 | ||
153 | ccr &= (cpu_is_mmp2()) ? TMR_CCR_CS_0(0) : TMR_CCR_CS_0(3); | 174 | ccr &= (cpu_is_mmp2()) ? (TMR_CCR_CS_0(0) | TMR_CCR_CS_1(0)) : |
175 | (TMR_CCR_CS_0(3) | TMR_CCR_CS_1(3)); | ||
154 | __raw_writel(ccr, TIMERS_VIRT_BASE + TMR_CCR); | 176 | __raw_writel(ccr, TIMERS_VIRT_BASE + TMR_CCR); |
155 | 177 | ||
156 | /* free-running mode */ | 178 | /* set timer 0 to periodic mode, and timer 1 to free-running mode */ |
157 | __raw_writel(cmr | 0x01, TIMERS_VIRT_BASE + TMR_CMR); | 179 | __raw_writel(0x2, TIMERS_VIRT_BASE + TMR_CMR); |
158 | 180 | ||
159 | __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_PLCR(0)); /* free-running */ | 181 | __raw_writel(0x1, TIMERS_VIRT_BASE + TMR_PLCR(0)); /* periodic */ |
160 | __raw_writel(0x7, TIMERS_VIRT_BASE + TMR_ICR(0)); /* clear status */ | 182 | __raw_writel(0x7, TIMERS_VIRT_BASE + TMR_ICR(0)); /* clear status */ |
161 | __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(0)); | 183 | __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(0)); |
162 | 184 | ||
163 | /* enable timer counter */ | 185 | __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_PLCR(1)); /* free-running */ |
164 | __raw_writel(cer | 0x01, TIMERS_VIRT_BASE + TMR_CER); | 186 | __raw_writel(0x7, TIMERS_VIRT_BASE + TMR_ICR(1)); /* clear status */ |
187 | __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(1)); | ||
188 | |||
189 | /* enable timer 1 counter */ | ||
190 | __raw_writel(0x2, TIMERS_VIRT_BASE + TMR_CER); | ||
165 | } | 191 | } |
166 | 192 | ||
167 | static struct irqaction timer_irq = { | 193 | static struct irqaction timer_irq = { |
diff --git a/arch/arm/mach-mx5/board-cpuimx51.c b/arch/arm/mach-mx5/board-cpuimx51.c index 7c893fa70266..68934ea8725a 100644 --- a/arch/arm/mach-mx5/board-cpuimx51.c +++ b/arch/arm/mach-mx5/board-cpuimx51.c | |||
@@ -81,7 +81,7 @@ static struct plat_serial8250_port serial_platform_data[] = { | |||
81 | .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, | 81 | .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, |
82 | }, { | 82 | }, { |
83 | .mapbase = (unsigned long)(MX51_CS1_BASE_ADDR + 0x2000000), | 83 | .mapbase = (unsigned long)(MX51_CS1_BASE_ADDR + 0x2000000), |
84 | .irq = irq_to_gpio(CPUIMX51_QUARTD_GPIO), | 84 | .irq = gpio_to_irq(CPUIMX51_QUARTD_GPIO), |
85 | .irqflags = IRQF_TRIGGER_HIGH, | 85 | .irqflags = IRQF_TRIGGER_HIGH, |
86 | .uartclk = CPUIMX51_QUART_XTAL, | 86 | .uartclk = CPUIMX51_QUART_XTAL, |
87 | .regshift = CPUIMX51_QUART_REGSHIFT, | 87 | .regshift = CPUIMX51_QUART_REGSHIFT, |
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c index e400b09109ce..11b0ff67f89d 100644 --- a/arch/arm/mach-mx5/board-mx51_babbage.c +++ b/arch/arm/mach-mx5/board-mx51_babbage.c | |||
@@ -369,7 +369,7 @@ static void __init mx51_babbage_init(void) | |||
369 | ARRAY_SIZE(mx51babbage_pads)); | 369 | ARRAY_SIZE(mx51babbage_pads)); |
370 | 370 | ||
371 | imx51_add_imx_uart(0, &uart_pdata); | 371 | imx51_add_imx_uart(0, &uart_pdata); |
372 | imx51_add_imx_uart(1, &uart_pdata); | 372 | imx51_add_imx_uart(1, NULL); |
373 | imx51_add_imx_uart(2, &uart_pdata); | 373 | imx51_add_imx_uart(2, &uart_pdata); |
374 | 374 | ||
375 | babbage_fec_reset(); | 375 | babbage_fec_reset(); |
diff --git a/arch/arm/mach-mx5/board-mx51_efikamx.c b/arch/arm/mach-mx5/board-mx51_efikamx.c index f70700dc0ec1..551daf85ff8c 100644 --- a/arch/arm/mach-mx5/board-mx51_efikamx.c +++ b/arch/arm/mach-mx5/board-mx51_efikamx.c | |||
@@ -108,9 +108,9 @@ static void __init mx51_efikamx_board_id(void) | |||
108 | gpio_request(EFIKAMX_PCBID2, "pcbid2"); | 108 | gpio_request(EFIKAMX_PCBID2, "pcbid2"); |
109 | gpio_direction_input(EFIKAMX_PCBID2); | 109 | gpio_direction_input(EFIKAMX_PCBID2); |
110 | 110 | ||
111 | id = gpio_get_value(EFIKAMX_PCBID0); | 111 | id = gpio_get_value(EFIKAMX_PCBID0) ? 1 : 0; |
112 | id |= gpio_get_value(EFIKAMX_PCBID1) << 1; | 112 | id |= (gpio_get_value(EFIKAMX_PCBID1) ? 1 : 0) << 1; |
113 | id |= gpio_get_value(EFIKAMX_PCBID2) << 2; | 113 | id |= (gpio_get_value(EFIKAMX_PCBID2) ? 1 : 0) << 2; |
114 | 114 | ||
115 | switch (id) { | 115 | switch (id) { |
116 | case 7: | 116 | case 7: |
diff --git a/arch/arm/mach-mx5/board-mx51_efikasb.c b/arch/arm/mach-mx5/board-mx51_efikasb.c index 2e4d9d32a87c..8a9bca22beb5 100644 --- a/arch/arm/mach-mx5/board-mx51_efikasb.c +++ b/arch/arm/mach-mx5/board-mx51_efikasb.c | |||
@@ -156,23 +156,24 @@ static struct gpio_keys_button mx51_efikasb_keys[] = { | |||
156 | { | 156 | { |
157 | .code = KEY_POWER, | 157 | .code = KEY_POWER, |
158 | .gpio = EFIKASB_PWRKEY, | 158 | .gpio = EFIKASB_PWRKEY, |
159 | .type = EV_PWR, | 159 | .type = EV_KEY, |
160 | .desc = "Power Button", | 160 | .desc = "Power Button", |
161 | .wakeup = 1, | 161 | .wakeup = 1, |
162 | .debounce_interval = 10, /* ms */ | 162 | .active_low = 1, |
163 | }, | 163 | }, |
164 | { | 164 | { |
165 | .code = SW_LID, | 165 | .code = SW_LID, |
166 | .gpio = EFIKASB_LID, | 166 | .gpio = EFIKASB_LID, |
167 | .type = EV_SW, | 167 | .type = EV_SW, |
168 | .desc = "Lid Switch", | 168 | .desc = "Lid Switch", |
169 | .active_low = 1, | ||
169 | }, | 170 | }, |
170 | { | 171 | { |
171 | /* SW_RFKILLALL vs KEY_RFKILL ? */ | 172 | .code = KEY_RFKILL, |
172 | .code = SW_RFKILL_ALL, | ||
173 | .gpio = EFIKASB_RFKILL, | 173 | .gpio = EFIKASB_RFKILL, |
174 | .type = EV_SW, | 174 | .type = EV_KEY, |
175 | .desc = "rfkill", | 175 | .desc = "rfkill", |
176 | .active_low = 1, | ||
176 | }, | 177 | }, |
177 | }; | 178 | }; |
178 | 179 | ||
@@ -224,8 +225,8 @@ static void __init mx51_efikasb_board_id(void) | |||
224 | gpio_request(EFIKASB_PCBID1, "pcb id1"); | 225 | gpio_request(EFIKASB_PCBID1, "pcb id1"); |
225 | gpio_direction_input(EFIKASB_PCBID1); | 226 | gpio_direction_input(EFIKASB_PCBID1); |
226 | 227 | ||
227 | id = gpio_get_value(EFIKASB_PCBID0); | 228 | id = gpio_get_value(EFIKASB_PCBID0) ? 1 : 0; |
228 | id |= gpio_get_value(EFIKASB_PCBID1) << 1; | 229 | id |= (gpio_get_value(EFIKASB_PCBID1) ? 1 : 0) << 1; |
229 | 230 | ||
230 | switch (id) { | 231 | switch (id) { |
231 | default: | 232 | default: |
diff --git a/arch/arm/mach-mx5/clock-mx51-mx53.c b/arch/arm/mach-mx5/clock-mx51-mx53.c index 7f20308c4dbd..f7bf996f463b 100644 --- a/arch/arm/mach-mx5/clock-mx51-mx53.c +++ b/arch/arm/mach-mx5/clock-mx51-mx53.c | |||
@@ -271,7 +271,11 @@ static int _clk_pll_enable(struct clk *clk) | |||
271 | int i = 0; | 271 | int i = 0; |
272 | 272 | ||
273 | pllbase = _get_pll_base(clk); | 273 | pllbase = _get_pll_base(clk); |
274 | reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) | MXC_PLL_DP_CTL_UPEN; | 274 | reg = __raw_readl(pllbase + MXC_PLL_DP_CTL); |
275 | if (reg & MXC_PLL_DP_CTL_UPEN) | ||
276 | return 0; | ||
277 | |||
278 | reg |= MXC_PLL_DP_CTL_UPEN; | ||
275 | __raw_writel(reg, pllbase + MXC_PLL_DP_CTL); | 279 | __raw_writel(reg, pllbase + MXC_PLL_DP_CTL); |
276 | 280 | ||
277 | /* Wait for lock */ | 281 | /* Wait for lock */ |
diff --git a/arch/arm/mach-mx5/mx51_efika.c b/arch/arm/mach-mx5/mx51_efika.c index 4435e03cea5d..c9209454807a 100644 --- a/arch/arm/mach-mx5/mx51_efika.c +++ b/arch/arm/mach-mx5/mx51_efika.c | |||
@@ -186,7 +186,7 @@ static int initialize_usbh1_port(struct platform_device *pdev) | |||
186 | 186 | ||
187 | mdelay(10); | 187 | mdelay(10); |
188 | 188 | ||
189 | return mx51_initialize_usb_hw(0, MXC_EHCI_ITC_NO_THRESHOLD); | 189 | return mx51_initialize_usb_hw(pdev->id, MXC_EHCI_ITC_NO_THRESHOLD); |
190 | } | 190 | } |
191 | 191 | ||
192 | static struct mxc_usbh_platform_data usbh1_config = { | 192 | static struct mxc_usbh_platform_data usbh1_config = { |
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 4ae6257b39a4..57b66d590c52 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
@@ -7,7 +7,6 @@ config ARCH_OMAP2PLUS_TYPICAL | |||
7 | default y | 7 | default y |
8 | select AEABI | 8 | select AEABI |
9 | select REGULATOR | 9 | select REGULATOR |
10 | select PM | ||
11 | select PM_RUNTIME | 10 | select PM_RUNTIME |
12 | select VFP | 11 | select VFP |
13 | select NEON if ARCH_OMAP3 || ARCH_OMAP4 | 12 | select NEON if ARCH_OMAP3 || ARCH_OMAP4 |
diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c index 5f2b55ff04ff..933e9353cb37 100644 --- a/arch/arm/mach-omap2/board-am3517crane.c +++ b/arch/arm/mach-omap2/board-am3517crane.c | |||
@@ -45,8 +45,6 @@ static struct omap_board_config_kernel am3517_crane_config[] __initdata = { | |||
45 | static struct omap_board_mux board_mux[] __initdata = { | 45 | static struct omap_board_mux board_mux[] __initdata = { |
46 | { .reg_offset = OMAP_MUX_TERMINATOR }, | 46 | { .reg_offset = OMAP_MUX_TERMINATOR }, |
47 | }; | 47 | }; |
48 | #else | ||
49 | #define board_mux NULL | ||
50 | #endif | 48 | #endif |
51 | 49 | ||
52 | static void __init am3517_crane_init_early(void) | 50 | static void __init am3517_crane_init_early(void) |
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index 32f5f895568a..3ae16b4e3f52 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c | |||
@@ -491,23 +491,22 @@ static void __init beagle_opp_init(void) | |||
491 | 491 | ||
492 | /* Custom OPP enabled for all xM versions */ | 492 | /* Custom OPP enabled for all xM versions */ |
493 | if (cpu_is_omap3630()) { | 493 | if (cpu_is_omap3630()) { |
494 | struct omap_hwmod *mh = omap_hwmod_lookup("mpu"); | 494 | struct device *mpu_dev, *iva_dev; |
495 | struct omap_hwmod *dh = omap_hwmod_lookup("iva"); | ||
496 | struct device *dev; | ||
497 | 495 | ||
498 | if (!mh || !dh) { | 496 | mpu_dev = omap2_get_mpuss_device(); |
497 | iva_dev = omap2_get_iva_device(); | ||
498 | |||
499 | if (!mpu_dev || !iva_dev) { | ||
499 | pr_err("%s: Aiee.. no mpu/dsp devices? %p %p\n", | 500 | pr_err("%s: Aiee.. no mpu/dsp devices? %p %p\n", |
500 | __func__, mh, dh); | 501 | __func__, mpu_dev, iva_dev); |
501 | return; | 502 | return; |
502 | } | 503 | } |
503 | /* Enable MPU 1GHz and lower opps */ | 504 | /* Enable MPU 1GHz and lower opps */ |
504 | dev = &mh->od->pdev.dev; | 505 | r = opp_enable(mpu_dev, 800000000); |
505 | r = opp_enable(dev, 800000000); | ||
506 | /* TODO: MPU 1GHz needs SR and ABB */ | 506 | /* TODO: MPU 1GHz needs SR and ABB */ |
507 | 507 | ||
508 | /* Enable IVA 800MHz and lower opps */ | 508 | /* Enable IVA 800MHz and lower opps */ |
509 | dev = &dh->od->pdev.dev; | 509 | r |= opp_enable(iva_dev, 660000000); |
510 | r |= opp_enable(dev, 660000000); | ||
511 | /* TODO: DSP 800MHz needs SR and ABB */ | 510 | /* TODO: DSP 800MHz needs SR and ABB */ |
512 | if (r) { | 511 | if (r) { |
513 | pr_err("%s: failed to enable higher opp %d\n", | 512 | pr_err("%s: failed to enable higher opp %d\n", |
@@ -516,10 +515,8 @@ static void __init beagle_opp_init(void) | |||
516 | * Cleanup - disable the higher freqs - we dont care | 515 | * Cleanup - disable the higher freqs - we dont care |
517 | * about the results | 516 | * about the results |
518 | */ | 517 | */ |
519 | dev = &mh->od->pdev.dev; | 518 | opp_disable(mpu_dev, 800000000); |
520 | opp_disable(dev, 800000000); | 519 | opp_disable(iva_dev, 660000000); |
521 | dev = &dh->od->pdev.dev; | ||
522 | opp_disable(dev, 660000000); | ||
523 | } | 520 | } |
524 | } | 521 | } |
525 | return; | 522 | return; |
diff --git a/arch/arm/mach-omap2/cminst44xx.h b/arch/arm/mach-omap2/cminst44xx.h index f2ea6453ade0..a018a7327879 100644 --- a/arch/arm/mach-omap2/cminst44xx.h +++ b/arch/arm/mach-omap2/cminst44xx.h | |||
@@ -18,13 +18,36 @@ extern void omap4_cminst_clkdm_force_sleep(u8 part, s16 inst, u16 cdoffs); | |||
18 | extern void omap4_cminst_clkdm_force_wakeup(u8 part, s16 inst, u16 cdoffs); | 18 | extern void omap4_cminst_clkdm_force_wakeup(u8 part, s16 inst, u16 cdoffs); |
19 | 19 | ||
20 | extern int omap4_cminst_wait_module_ready(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs); | 20 | extern int omap4_cminst_wait_module_ready(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs); |
21 | extern int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs); | 21 | |
22 | # ifdef CONFIG_ARCH_OMAP4 | ||
23 | extern int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, | ||
24 | u16 clkctrl_offs); | ||
22 | 25 | ||
23 | extern void omap4_cminst_module_enable(u8 mode, u8 part, u16 inst, s16 cdoffs, | 26 | extern void omap4_cminst_module_enable(u8 mode, u8 part, u16 inst, s16 cdoffs, |
24 | u16 clkctrl_offs); | 27 | u16 clkctrl_offs); |
25 | extern void omap4_cminst_module_disable(u8 part, u16 inst, s16 cdoffs, | 28 | extern void omap4_cminst_module_disable(u8 part, u16 inst, s16 cdoffs, |
26 | u16 clkctrl_offs); | 29 | u16 clkctrl_offs); |
27 | 30 | ||
31 | # else | ||
32 | |||
33 | static inline int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, | ||
34 | u16 clkctrl_offs) | ||
35 | { | ||
36 | return 0; | ||
37 | } | ||
38 | |||
39 | static inline void omap4_cminst_module_enable(u8 mode, u8 part, u16 inst, | ||
40 | s16 cdoffs, u16 clkctrl_offs) | ||
41 | { | ||
42 | } | ||
43 | |||
44 | static inline void omap4_cminst_module_disable(u8 part, u16 inst, s16 cdoffs, | ||
45 | u16 clkctrl_offs) | ||
46 | { | ||
47 | } | ||
48 | |||
49 | # endif | ||
50 | |||
28 | /* | 51 | /* |
29 | * In an ideal world, we would not export these low-level functions, | 52 | * In an ideal world, we would not export these low-level functions, |
30 | * but this will probably take some time to fix properly | 53 | * but this will probably take some time to fix properly |
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index c7fb22abc219..655e9480eb98 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c | |||
@@ -821,11 +821,10 @@ static void __init omap_mux_set_cmdline_signals(void) | |||
821 | if (!omap_mux_options) | 821 | if (!omap_mux_options) |
822 | return; | 822 | return; |
823 | 823 | ||
824 | options = kmalloc(strlen(omap_mux_options) + 1, GFP_KERNEL); | 824 | options = kstrdup(omap_mux_options, GFP_KERNEL); |
825 | if (!options) | 825 | if (!options) |
826 | return; | 826 | return; |
827 | 827 | ||
828 | strcpy(options, omap_mux_options); | ||
829 | next_opt = options; | 828 | next_opt = options; |
830 | 829 | ||
831 | while ((token = strsep(&next_opt, ",")) != NULL) { | 830 | while ((token = strsep(&next_opt, ",")) != NULL) { |
@@ -855,24 +854,19 @@ static int __init omap_mux_copy_names(struct omap_mux *src, | |||
855 | 854 | ||
856 | for (i = 0; i < OMAP_MUX_NR_MODES; i++) { | 855 | for (i = 0; i < OMAP_MUX_NR_MODES; i++) { |
857 | if (src->muxnames[i]) { | 856 | if (src->muxnames[i]) { |
858 | dst->muxnames[i] = | 857 | dst->muxnames[i] = kstrdup(src->muxnames[i], |
859 | kmalloc(strlen(src->muxnames[i]) + 1, | 858 | GFP_KERNEL); |
860 | GFP_KERNEL); | ||
861 | if (!dst->muxnames[i]) | 859 | if (!dst->muxnames[i]) |
862 | goto free; | 860 | goto free; |
863 | strcpy(dst->muxnames[i], src->muxnames[i]); | ||
864 | } | 861 | } |
865 | } | 862 | } |
866 | 863 | ||
867 | #ifdef CONFIG_DEBUG_FS | 864 | #ifdef CONFIG_DEBUG_FS |
868 | for (i = 0; i < OMAP_MUX_NR_SIDES; i++) { | 865 | for (i = 0; i < OMAP_MUX_NR_SIDES; i++) { |
869 | if (src->balls[i]) { | 866 | if (src->balls[i]) { |
870 | dst->balls[i] = | 867 | dst->balls[i] = kstrdup(src->balls[i], GFP_KERNEL); |
871 | kmalloc(strlen(src->balls[i]) + 1, | ||
872 | GFP_KERNEL); | ||
873 | if (!dst->balls[i]) | 868 | if (!dst->balls[i]) |
874 | goto free; | 869 | goto free; |
875 | strcpy(dst->balls[i], src->balls[i]); | ||
876 | } | 870 | } |
877 | } | 871 | } |
878 | #endif | 872 | #endif |
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c index 2ce2fb7664bc..34c01a7de810 100644 --- a/arch/arm/mach-omap2/smartreflex.c +++ b/arch/arm/mach-omap2/smartreflex.c | |||
@@ -621,7 +621,7 @@ void sr_disable(struct voltagedomain *voltdm) | |||
621 | sr_v2_disable(sr); | 621 | sr_v2_disable(sr); |
622 | } | 622 | } |
623 | 623 | ||
624 | pm_runtime_put_sync(&sr->pdev->dev); | 624 | pm_runtime_put_sync_suspend(&sr->pdev->dev); |
625 | } | 625 | } |
626 | 626 | ||
627 | /** | 627 | /** |
@@ -860,6 +860,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
860 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 860 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
861 | 861 | ||
862 | pm_runtime_enable(&pdev->dev); | 862 | pm_runtime_enable(&pdev->dev); |
863 | pm_runtime_irq_safe(&pdev->dev); | ||
863 | 864 | ||
864 | sr_info->pdev = pdev; | 865 | sr_info->pdev = pdev; |
865 | sr_info->srid = pdev->id; | 866 | sr_info->srid = pdev->id; |
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index e9640728239b..cf1de7d2630d 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c | |||
@@ -293,7 +293,8 @@ static void __init omap2_gp_clocksource_init(int gptimer_id, | |||
293 | pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n", | 293 | pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n", |
294 | gptimer_id, clksrc.rate); | 294 | gptimer_id, clksrc.rate); |
295 | 295 | ||
296 | __omap_dm_timer_load_start(clksrc.io_base, OMAP_TIMER_CTRL_ST, 0, 1); | 296 | __omap_dm_timer_load_start(clksrc.io_base, |
297 | OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0, 1); | ||
297 | init_sched_clock(&cd, dmtimer_update_sched_clock, 32, clksrc.rate); | 298 | init_sched_clock(&cd, dmtimer_update_sched_clock, 32, clksrc.rate); |
298 | 299 | ||
299 | if (clocksource_register_hz(&clocksource_gpt, clksrc.rate)) | 300 | if (clocksource_register_hz(&clocksource_gpt, clksrc.rate)) |
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c index 2543342dbccb..daa056ed8738 100644 --- a/arch/arm/mach-omap2/twl-common.c +++ b/arch/arm/mach-omap2/twl-common.c | |||
@@ -48,14 +48,7 @@ void __init omap_pmic_init(int bus, u32 clkrate, | |||
48 | omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1); | 48 | omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1); |
49 | } | 49 | } |
50 | 50 | ||
51 | static struct twl4030_usb_data omap4_usb_pdata = { | 51 | #if defined(CONFIG_ARCH_OMAP3) |
52 | .phy_init = omap4430_phy_init, | ||
53 | .phy_exit = omap4430_phy_exit, | ||
54 | .phy_power = omap4430_phy_power, | ||
55 | .phy_set_clock = omap4430_phy_set_clk, | ||
56 | .phy_suspend = omap4430_phy_suspend, | ||
57 | }; | ||
58 | |||
59 | static struct twl4030_usb_data omap3_usb_pdata = { | 52 | static struct twl4030_usb_data omap3_usb_pdata = { |
60 | .usb_mode = T2_USB_MODE_ULPI, | 53 | .usb_mode = T2_USB_MODE_ULPI, |
61 | }; | 54 | }; |
@@ -122,6 +115,45 @@ static struct regulator_init_data omap3_vpll2_idata = { | |||
122 | .consumer_supplies = omap3_vpll2_supplies, | 115 | .consumer_supplies = omap3_vpll2_supplies, |
123 | }; | 116 | }; |
124 | 117 | ||
118 | void __init omap3_pmic_get_config(struct twl4030_platform_data *pmic_data, | ||
119 | u32 pdata_flags, u32 regulators_flags) | ||
120 | { | ||
121 | if (!pmic_data->irq_base) | ||
122 | pmic_data->irq_base = TWL4030_IRQ_BASE; | ||
123 | if (!pmic_data->irq_end) | ||
124 | pmic_data->irq_end = TWL4030_IRQ_END; | ||
125 | |||
126 | /* Common platform data configurations */ | ||
127 | if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb) | ||
128 | pmic_data->usb = &omap3_usb_pdata; | ||
129 | |||
130 | if (pdata_flags & TWL_COMMON_PDATA_BCI && !pmic_data->bci) | ||
131 | pmic_data->bci = &omap3_bci_pdata; | ||
132 | |||
133 | if (pdata_flags & TWL_COMMON_PDATA_MADC && !pmic_data->madc) | ||
134 | pmic_data->madc = &omap3_madc_pdata; | ||
135 | |||
136 | if (pdata_flags & TWL_COMMON_PDATA_AUDIO && !pmic_data->audio) | ||
137 | pmic_data->audio = &omap3_audio_pdata; | ||
138 | |||
139 | /* Common regulator configurations */ | ||
140 | if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac) | ||
141 | pmic_data->vdac = &omap3_vdac_idata; | ||
142 | |||
143 | if (regulators_flags & TWL_COMMON_REGULATOR_VPLL2 && !pmic_data->vpll2) | ||
144 | pmic_data->vpll2 = &omap3_vpll2_idata; | ||
145 | } | ||
146 | #endif /* CONFIG_ARCH_OMAP3 */ | ||
147 | |||
148 | #if defined(CONFIG_ARCH_OMAP4) | ||
149 | static struct twl4030_usb_data omap4_usb_pdata = { | ||
150 | .phy_init = omap4430_phy_init, | ||
151 | .phy_exit = omap4430_phy_exit, | ||
152 | .phy_power = omap4430_phy_power, | ||
153 | .phy_set_clock = omap4430_phy_set_clk, | ||
154 | .phy_suspend = omap4430_phy_suspend, | ||
155 | }; | ||
156 | |||
125 | static struct regulator_init_data omap4_vdac_idata = { | 157 | static struct regulator_init_data omap4_vdac_idata = { |
126 | .constraints = { | 158 | .constraints = { |
127 | .min_uV = 1800000, | 159 | .min_uV = 1800000, |
@@ -273,32 +305,4 @@ void __init omap4_pmic_get_config(struct twl4030_platform_data *pmic_data, | |||
273 | !pmic_data->clk32kg) | 305 | !pmic_data->clk32kg) |
274 | pmic_data->clk32kg = &omap4_clk32kg_idata; | 306 | pmic_data->clk32kg = &omap4_clk32kg_idata; |
275 | } | 307 | } |
276 | 308 | #endif /* CONFIG_ARCH_OMAP4 */ | |
277 | void __init omap3_pmic_get_config(struct twl4030_platform_data *pmic_data, | ||
278 | u32 pdata_flags, u32 regulators_flags) | ||
279 | { | ||
280 | if (!pmic_data->irq_base) | ||
281 | pmic_data->irq_base = TWL4030_IRQ_BASE; | ||
282 | if (!pmic_data->irq_end) | ||
283 | pmic_data->irq_end = TWL4030_IRQ_END; | ||
284 | |||
285 | /* Common platform data configurations */ | ||
286 | if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb) | ||
287 | pmic_data->usb = &omap3_usb_pdata; | ||
288 | |||
289 | if (pdata_flags & TWL_COMMON_PDATA_BCI && !pmic_data->bci) | ||
290 | pmic_data->bci = &omap3_bci_pdata; | ||
291 | |||
292 | if (pdata_flags & TWL_COMMON_PDATA_MADC && !pmic_data->madc) | ||
293 | pmic_data->madc = &omap3_madc_pdata; | ||
294 | |||
295 | if (pdata_flags & TWL_COMMON_PDATA_AUDIO && !pmic_data->audio) | ||
296 | pmic_data->audio = &omap3_audio_pdata; | ||
297 | |||
298 | /* Common regulator configurations */ | ||
299 | if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac) | ||
300 | pmic_data->vdac = &omap3_vdac_idata; | ||
301 | |||
302 | if (regulators_flags & TWL_COMMON_REGULATOR_VPLL2 && !pmic_data->vpll2) | ||
303 | pmic_data->vpll2 = &omap3_vpll2_idata; | ||
304 | } | ||
diff --git a/arch/arm/mach-sa1100/pci-nanoengine.c b/arch/arm/mach-sa1100/pci-nanoengine.c index 964c6c3cd7a6..dd39fee59549 100644 --- a/arch/arm/mach-sa1100/pci-nanoengine.c +++ b/arch/arm/mach-sa1100/pci-nanoengine.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/mach-types.h> | 28 | #include <asm/mach-types.h> |
29 | 29 | ||
30 | #include <mach/nanoengine.h> | 30 | #include <mach/nanoengine.h> |
31 | #include <mach/hardware.h> | ||
31 | 32 | ||
32 | static DEFINE_SPINLOCK(nano_lock); | 33 | static DEFINE_SPINLOCK(nano_lock); |
33 | 34 | ||
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index be7c638b648b..cfbcf8b95599 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
24 | 24 | ||
25 | #include <asm/system.h> | ||
25 | #include <asm/unaligned.h> | 26 | #include <asm/unaligned.h> |
26 | 27 | ||
27 | #include "fault.h" | 28 | #include "fault.h" |
@@ -95,6 +96,33 @@ static const char *usermode_action[] = { | |||
95 | "signal+warn" | 96 | "signal+warn" |
96 | }; | 97 | }; |
97 | 98 | ||
99 | /* Return true if and only if the ARMv6 unaligned access model is in use. */ | ||
100 | static bool cpu_is_v6_unaligned(void) | ||
101 | { | ||
102 | return cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U); | ||
103 | } | ||
104 | |||
105 | static int safe_usermode(int new_usermode, bool warn) | ||
106 | { | ||
107 | /* | ||
108 | * ARMv6 and later CPUs can perform unaligned accesses for | ||
109 | * most single load and store instructions up to word size. | ||
110 | * LDM, STM, LDRD and STRD still need to be handled. | ||
111 | * | ||
112 | * Ignoring the alignment fault is not an option on these | ||
113 | * CPUs since we spin re-faulting the instruction without | ||
114 | * making any progress. | ||
115 | */ | ||
116 | if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) { | ||
117 | new_usermode |= UM_FIXUP; | ||
118 | |||
119 | if (warn) | ||
120 | printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n"); | ||
121 | } | ||
122 | |||
123 | return new_usermode; | ||
124 | } | ||
125 | |||
98 | static int alignment_proc_show(struct seq_file *m, void *v) | 126 | static int alignment_proc_show(struct seq_file *m, void *v) |
99 | { | 127 | { |
100 | seq_printf(m, "User:\t\t%lu\n", ai_user); | 128 | seq_printf(m, "User:\t\t%lu\n", ai_user); |
@@ -125,7 +153,7 @@ static ssize_t alignment_proc_write(struct file *file, const char __user *buffer | |||
125 | if (get_user(mode, buffer)) | 153 | if (get_user(mode, buffer)) |
126 | return -EFAULT; | 154 | return -EFAULT; |
127 | if (mode >= '0' && mode <= '5') | 155 | if (mode >= '0' && mode <= '5') |
128 | ai_usermode = mode - '0'; | 156 | ai_usermode = safe_usermode(mode - '0', true); |
129 | } | 157 | } |
130 | return count; | 158 | return count; |
131 | } | 159 | } |
@@ -886,9 +914,16 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
886 | if (ai_usermode & UM_FIXUP) | 914 | if (ai_usermode & UM_FIXUP) |
887 | goto fixup; | 915 | goto fixup; |
888 | 916 | ||
889 | if (ai_usermode & UM_SIGNAL) | 917 | if (ai_usermode & UM_SIGNAL) { |
890 | force_sig(SIGBUS, current); | 918 | siginfo_t si; |
891 | else { | 919 | |
920 | si.si_signo = SIGBUS; | ||
921 | si.si_errno = 0; | ||
922 | si.si_code = BUS_ADRALN; | ||
923 | si.si_addr = (void __user *)addr; | ||
924 | |||
925 | force_sig_info(si.si_signo, &si, current); | ||
926 | } else { | ||
892 | /* | 927 | /* |
893 | * We're about to disable the alignment trap and return to | 928 | * We're about to disable the alignment trap and return to |
894 | * user space. But if an interrupt occurs before actually | 929 | * user space. But if an interrupt occurs before actually |
@@ -926,20 +961,11 @@ static int __init alignment_init(void) | |||
926 | return -ENOMEM; | 961 | return -ENOMEM; |
927 | #endif | 962 | #endif |
928 | 963 | ||
929 | /* | 964 | if (cpu_is_v6_unaligned()) { |
930 | * ARMv6 and later CPUs can perform unaligned accesses for | ||
931 | * most single load and store instructions up to word size. | ||
932 | * LDM, STM, LDRD and STRD still need to be handled. | ||
933 | * | ||
934 | * Ignoring the alignment fault is not an option on these | ||
935 | * CPUs since we spin re-faulting the instruction without | ||
936 | * making any progress. | ||
937 | */ | ||
938 | if (cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U)) { | ||
939 | cr_alignment &= ~CR_A; | 965 | cr_alignment &= ~CR_A; |
940 | cr_no_alignment &= ~CR_A; | 966 | cr_no_alignment &= ~CR_A; |
941 | set_cr(cr_alignment); | 967 | set_cr(cr_alignment); |
942 | ai_usermode = UM_FIXUP; | 968 | ai_usermode = safe_usermode(ai_usermode, false); |
943 | } | 969 | } |
944 | 970 | ||
945 | hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, | 971 | hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 2fee782077c1..91bca355cd31 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -441,7 +441,7 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s) | |||
441 | static inline void poison_init_mem(void *s, size_t count) | 441 | static inline void poison_init_mem(void *s, size_t count) |
442 | { | 442 | { |
443 | u32 *p = (u32 *)s; | 443 | u32 *p = (u32 *)s; |
444 | while ((count = count - 4)) | 444 | for (; count != 0; count -= 4) |
445 | *p++ = 0xe7fddef0; | 445 | *p++ = 0xe7fddef0; |
446 | } | 446 | } |
447 | 447 | ||
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index f8f7ea34bfc5..683af3a182b7 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S | |||
@@ -410,6 +410,7 @@ __arm946_proc_info: | |||
410 | .long 0x41009460 | 410 | .long 0x41009460 |
411 | .long 0xff00fff0 | 411 | .long 0xff00fff0 |
412 | .long 0 | 412 | .long 0 |
413 | .long 0 | ||
413 | b __arm946_setup | 414 | b __arm946_setup |
414 | .long cpu_arch_name | 415 | .long cpu_arch_name |
415 | .long cpu_elf_name | 416 | .long cpu_elf_name |
@@ -418,6 +419,6 @@ __arm946_proc_info: | |||
418 | .long arm946_processor_functions | 419 | .long arm946_processor_functions |
419 | .long 0 | 420 | .long 0 |
420 | .long 0 | 421 | .long 0 |
421 | .long arm940_cache_fns | 422 | .long arm946_cache_fns |
422 | .size __arm946_proc_info, . - __arm946_proc_info | 423 | .size __arm946_proc_info, . - __arm946_proc_info |
423 | 424 | ||
diff --git a/arch/arm/plat-mxc/include/mach/debug-macro.S b/arch/arm/plat-mxc/include/mach/debug-macro.S index 91fc7cdb5dc9..e4dde91f0231 100644 --- a/arch/arm/plat-mxc/include/mach/debug-macro.S +++ b/arch/arm/plat-mxc/include/mach/debug-macro.S | |||
@@ -44,6 +44,14 @@ | |||
44 | #define UART_PADDR MX51_UART1_BASE_ADDR | 44 | #define UART_PADDR MX51_UART1_BASE_ADDR |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | /* iMX50/53 have same addresses, but not iMX51 */ | ||
48 | #if defined(CONFIG_SOC_IMX50) || defined(CONFIG_SOC_IMX53) | ||
49 | #ifdef UART_PADDR | ||
50 | #error "CONFIG_DEBUG_LL is incompatible with multiple archs" | ||
51 | #endif | ||
52 | #define UART_PADDR MX53_UART1_BASE_ADDR | ||
53 | #endif | ||
54 | |||
47 | #define UART_VADDR IMX_IO_ADDRESS(UART_PADDR) | 55 | #define UART_VADDR IMX_IO_ADDRESS(UART_PADDR) |
48 | 56 | ||
49 | .macro addruart, rp, rv | 57 | .macro addruart, rp, rv |
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx53.h b/arch/arm/plat-mxc/include/mach/iomux-mx53.h index 9440b9e00e89..5408fd1fc736 100644 --- a/arch/arm/plat-mxc/include/mach/iomux-mx53.h +++ b/arch/arm/plat-mxc/include/mach/iomux-mx53.h | |||
@@ -30,6 +30,9 @@ | |||
30 | #define MX53_SDHC_PAD_CTRL (PAD_CTL_HYS | PAD_CTL_PKE | PAD_CTL_PUE | \ | 30 | #define MX53_SDHC_PAD_CTRL (PAD_CTL_HYS | PAD_CTL_PKE | PAD_CTL_PUE | \ |
31 | PAD_CTL_PUS_47K_UP | PAD_CTL_DSE_HIGH | \ | 31 | PAD_CTL_PUS_47K_UP | PAD_CTL_DSE_HIGH | \ |
32 | PAD_CTL_SRE_FAST) | 32 | PAD_CTL_SRE_FAST) |
33 | #define PAD_CTRL_I2C (PAD_CTL_SRE_FAST | PAD_CTL_ODE | PAD_CTL_PKE | \ | ||
34 | PAD_CTL_PUE | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP \ | ||
35 | | PAD_CTL_HYS) | ||
33 | 36 | ||
34 | #define _MX53_PAD_GPIO_19__KPP_COL_5 IOMUX_PAD(0x348, 0x20, 0, 0x840, 0, 0) | 37 | #define _MX53_PAD_GPIO_19__KPP_COL_5 IOMUX_PAD(0x348, 0x20, 0, 0x840, 0, 0) |
35 | #define _MX53_PAD_GPIO_19__GPIO4_5 IOMUX_PAD(0x348, 0x20, 1, 0x0, 0, 0) | 38 | #define _MX53_PAD_GPIO_19__GPIO4_5 IOMUX_PAD(0x348, 0x20, 1, 0x0, 0, 0) |
@@ -1256,7 +1259,7 @@ | |||
1256 | #define MX53_PAD_KEY_COL3__GPIO4_12 (_MX53_PAD_KEY_COL3__GPIO4_12 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1259 | #define MX53_PAD_KEY_COL3__GPIO4_12 (_MX53_PAD_KEY_COL3__GPIO4_12 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1257 | #define MX53_PAD_KEY_COL3__USBOH3_H2_DP (_MX53_PAD_KEY_COL3__USBOH3_H2_DP | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1260 | #define MX53_PAD_KEY_COL3__USBOH3_H2_DP (_MX53_PAD_KEY_COL3__USBOH3_H2_DP | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1258 | #define MX53_PAD_KEY_COL3__SPDIF_IN1 (_MX53_PAD_KEY_COL3__SPDIF_IN1 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1261 | #define MX53_PAD_KEY_COL3__SPDIF_IN1 (_MX53_PAD_KEY_COL3__SPDIF_IN1 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1259 | #define MX53_PAD_KEY_COL3__I2C2_SCL (_MX53_PAD_KEY_COL3__I2C2_SCL | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1262 | #define MX53_PAD_KEY_COL3__I2C2_SCL (_MX53_PAD_KEY_COL3__I2C2_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C)) |
1260 | #define MX53_PAD_KEY_COL3__ECSPI1_SS3 (_MX53_PAD_KEY_COL3__ECSPI1_SS3 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1263 | #define MX53_PAD_KEY_COL3__ECSPI1_SS3 (_MX53_PAD_KEY_COL3__ECSPI1_SS3 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1261 | #define MX53_PAD_KEY_COL3__FEC_CRS (_MX53_PAD_KEY_COL3__FEC_CRS | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1264 | #define MX53_PAD_KEY_COL3__FEC_CRS (_MX53_PAD_KEY_COL3__FEC_CRS | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1262 | #define MX53_PAD_KEY_COL3__USBPHY1_SIECLOCK (_MX53_PAD_KEY_COL3__USBPHY1_SIECLOCK | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1265 | #define MX53_PAD_KEY_COL3__USBPHY1_SIECLOCK (_MX53_PAD_KEY_COL3__USBPHY1_SIECLOCK | MUX_PAD_CTRL(NO_PAD_CTRL)) |
@@ -1264,7 +1267,7 @@ | |||
1264 | #define MX53_PAD_KEY_ROW3__GPIO4_13 (_MX53_PAD_KEY_ROW3__GPIO4_13 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1267 | #define MX53_PAD_KEY_ROW3__GPIO4_13 (_MX53_PAD_KEY_ROW3__GPIO4_13 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1265 | #define MX53_PAD_KEY_ROW3__USBOH3_H2_DM (_MX53_PAD_KEY_ROW3__USBOH3_H2_DM | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1268 | #define MX53_PAD_KEY_ROW3__USBOH3_H2_DM (_MX53_PAD_KEY_ROW3__USBOH3_H2_DM | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1266 | #define MX53_PAD_KEY_ROW3__CCM_ASRC_EXT_CLK (_MX53_PAD_KEY_ROW3__CCM_ASRC_EXT_CLK | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1269 | #define MX53_PAD_KEY_ROW3__CCM_ASRC_EXT_CLK (_MX53_PAD_KEY_ROW3__CCM_ASRC_EXT_CLK | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1267 | #define MX53_PAD_KEY_ROW3__I2C2_SDA (_MX53_PAD_KEY_ROW3__I2C2_SDA | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1270 | #define MX53_PAD_KEY_ROW3__I2C2_SDA (_MX53_PAD_KEY_ROW3__I2C2_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C)) |
1268 | #define MX53_PAD_KEY_ROW3__OSC32K_32K_OUT (_MX53_PAD_KEY_ROW3__OSC32K_32K_OUT | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1271 | #define MX53_PAD_KEY_ROW3__OSC32K_32K_OUT (_MX53_PAD_KEY_ROW3__OSC32K_32K_OUT | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1269 | #define MX53_PAD_KEY_ROW3__CCM_PLL4_BYP (_MX53_PAD_KEY_ROW3__CCM_PLL4_BYP | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1272 | #define MX53_PAD_KEY_ROW3__CCM_PLL4_BYP (_MX53_PAD_KEY_ROW3__CCM_PLL4_BYP | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1270 | #define MX53_PAD_KEY_ROW3__USBPHY1_LINESTATE_0 (_MX53_PAD_KEY_ROW3__USBPHY1_LINESTATE_0 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1273 | #define MX53_PAD_KEY_ROW3__USBPHY1_LINESTATE_0 (_MX53_PAD_KEY_ROW3__USBPHY1_LINESTATE_0 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
@@ -1536,7 +1539,7 @@ | |||
1536 | #define MX53_PAD_CSI0_DAT8__KPP_COL_7 (_MX53_PAD_CSI0_DAT8__KPP_COL_7 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1539 | #define MX53_PAD_CSI0_DAT8__KPP_COL_7 (_MX53_PAD_CSI0_DAT8__KPP_COL_7 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1537 | #define MX53_PAD_CSI0_DAT8__ECSPI2_SCLK (_MX53_PAD_CSI0_DAT8__ECSPI2_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1540 | #define MX53_PAD_CSI0_DAT8__ECSPI2_SCLK (_MX53_PAD_CSI0_DAT8__ECSPI2_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1538 | #define MX53_PAD_CSI0_DAT8__USBOH3_USBH3_OC (_MX53_PAD_CSI0_DAT8__USBOH3_USBH3_OC | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1541 | #define MX53_PAD_CSI0_DAT8__USBOH3_USBH3_OC (_MX53_PAD_CSI0_DAT8__USBOH3_USBH3_OC | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1539 | #define MX53_PAD_CSI0_DAT8__I2C1_SDA (_MX53_PAD_CSI0_DAT8__I2C1_SDA | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1542 | #define MX53_PAD_CSI0_DAT8__I2C1_SDA (_MX53_PAD_CSI0_DAT8__I2C1_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C)) |
1540 | #define MX53_PAD_CSI0_DAT8__EMI_EMI_DEBUG_37 (_MX53_PAD_CSI0_DAT8__EMI_EMI_DEBUG_37 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1543 | #define MX53_PAD_CSI0_DAT8__EMI_EMI_DEBUG_37 (_MX53_PAD_CSI0_DAT8__EMI_EMI_DEBUG_37 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1541 | #define MX53_PAD_CSI0_DAT8__TPIU_TRACE_5 (_MX53_PAD_CSI0_DAT8__TPIU_TRACE_5 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1544 | #define MX53_PAD_CSI0_DAT8__TPIU_TRACE_5 (_MX53_PAD_CSI0_DAT8__TPIU_TRACE_5 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1542 | #define MX53_PAD_CSI0_DAT9__IPU_CSI0_D_9 (_MX53_PAD_CSI0_DAT9__IPU_CSI0_D_9 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1545 | #define MX53_PAD_CSI0_DAT9__IPU_CSI0_D_9 (_MX53_PAD_CSI0_DAT9__IPU_CSI0_D_9 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
@@ -1544,7 +1547,7 @@ | |||
1544 | #define MX53_PAD_CSI0_DAT9__KPP_ROW_7 (_MX53_PAD_CSI0_DAT9__KPP_ROW_7 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1547 | #define MX53_PAD_CSI0_DAT9__KPP_ROW_7 (_MX53_PAD_CSI0_DAT9__KPP_ROW_7 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1545 | #define MX53_PAD_CSI0_DAT9__ECSPI2_MOSI (_MX53_PAD_CSI0_DAT9__ECSPI2_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1548 | #define MX53_PAD_CSI0_DAT9__ECSPI2_MOSI (_MX53_PAD_CSI0_DAT9__ECSPI2_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1546 | #define MX53_PAD_CSI0_DAT9__USBOH3_USBH3_PWR (_MX53_PAD_CSI0_DAT9__USBOH3_USBH3_PWR | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1549 | #define MX53_PAD_CSI0_DAT9__USBOH3_USBH3_PWR (_MX53_PAD_CSI0_DAT9__USBOH3_USBH3_PWR | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1547 | #define MX53_PAD_CSI0_DAT9__I2C1_SCL (_MX53_PAD_CSI0_DAT9__I2C1_SCL | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1550 | #define MX53_PAD_CSI0_DAT9__I2C1_SCL (_MX53_PAD_CSI0_DAT9__I2C1_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C)) |
1548 | #define MX53_PAD_CSI0_DAT9__EMI_EMI_DEBUG_38 (_MX53_PAD_CSI0_DAT9__EMI_EMI_DEBUG_38 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1551 | #define MX53_PAD_CSI0_DAT9__EMI_EMI_DEBUG_38 (_MX53_PAD_CSI0_DAT9__EMI_EMI_DEBUG_38 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1549 | #define MX53_PAD_CSI0_DAT9__TPIU_TRACE_6 (_MX53_PAD_CSI0_DAT9__TPIU_TRACE_6 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1552 | #define MX53_PAD_CSI0_DAT9__TPIU_TRACE_6 (_MX53_PAD_CSI0_DAT9__TPIU_TRACE_6 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1550 | #define MX53_PAD_CSI0_DAT10__IPU_CSI0_D_10 (_MX53_PAD_CSI0_DAT10__IPU_CSI0_D_10 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1553 | #define MX53_PAD_CSI0_DAT10__IPU_CSI0_D_10 (_MX53_PAD_CSI0_DAT10__IPU_CSI0_D_10 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
@@ -1631,25 +1634,25 @@ | |||
1631 | #define MX53_PAD_EIM_EB2__CCM_DI1_EXT_CLK (_MX53_PAD_EIM_EB2__CCM_DI1_EXT_CLK | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1634 | #define MX53_PAD_EIM_EB2__CCM_DI1_EXT_CLK (_MX53_PAD_EIM_EB2__CCM_DI1_EXT_CLK | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1632 | #define MX53_PAD_EIM_EB2__IPU_SER_DISP1_CS (_MX53_PAD_EIM_EB2__IPU_SER_DISP1_CS | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1635 | #define MX53_PAD_EIM_EB2__IPU_SER_DISP1_CS (_MX53_PAD_EIM_EB2__IPU_SER_DISP1_CS | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1633 | #define MX53_PAD_EIM_EB2__ECSPI1_SS0 (_MX53_PAD_EIM_EB2__ECSPI1_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1636 | #define MX53_PAD_EIM_EB2__ECSPI1_SS0 (_MX53_PAD_EIM_EB2__ECSPI1_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1634 | #define MX53_PAD_EIM_EB2__I2C2_SCL (_MX53_PAD_EIM_EB2__I2C2_SCL | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1637 | #define MX53_PAD_EIM_EB2__I2C2_SCL (_MX53_PAD_EIM_EB2__I2C2_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C)) |
1635 | #define MX53_PAD_EIM_D16__EMI_WEIM_D_16 (_MX53_PAD_EIM_D16__EMI_WEIM_D_16 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1638 | #define MX53_PAD_EIM_D16__EMI_WEIM_D_16 (_MX53_PAD_EIM_D16__EMI_WEIM_D_16 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1636 | #define MX53_PAD_EIM_D16__GPIO3_16 (_MX53_PAD_EIM_D16__GPIO3_16 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1639 | #define MX53_PAD_EIM_D16__GPIO3_16 (_MX53_PAD_EIM_D16__GPIO3_16 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1637 | #define MX53_PAD_EIM_D16__IPU_DI0_PIN5 (_MX53_PAD_EIM_D16__IPU_DI0_PIN5 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1640 | #define MX53_PAD_EIM_D16__IPU_DI0_PIN5 (_MX53_PAD_EIM_D16__IPU_DI0_PIN5 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1638 | #define MX53_PAD_EIM_D16__IPU_DISPB1_SER_CLK (_MX53_PAD_EIM_D16__IPU_DISPB1_SER_CLK | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1641 | #define MX53_PAD_EIM_D16__IPU_DISPB1_SER_CLK (_MX53_PAD_EIM_D16__IPU_DISPB1_SER_CLK | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1639 | #define MX53_PAD_EIM_D16__ECSPI1_SCLK (_MX53_PAD_EIM_D16__ECSPI1_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1642 | #define MX53_PAD_EIM_D16__ECSPI1_SCLK (_MX53_PAD_EIM_D16__ECSPI1_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1640 | #define MX53_PAD_EIM_D16__I2C2_SDA (_MX53_PAD_EIM_D16__I2C2_SDA | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1643 | #define MX53_PAD_EIM_D16__I2C2_SDA (_MX53_PAD_EIM_D16__I2C2_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C)) |
1641 | #define MX53_PAD_EIM_D17__EMI_WEIM_D_17 (_MX53_PAD_EIM_D17__EMI_WEIM_D_17 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1644 | #define MX53_PAD_EIM_D17__EMI_WEIM_D_17 (_MX53_PAD_EIM_D17__EMI_WEIM_D_17 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1642 | #define MX53_PAD_EIM_D17__GPIO3_17 (_MX53_PAD_EIM_D17__GPIO3_17 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1645 | #define MX53_PAD_EIM_D17__GPIO3_17 (_MX53_PAD_EIM_D17__GPIO3_17 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1643 | #define MX53_PAD_EIM_D17__IPU_DI0_PIN6 (_MX53_PAD_EIM_D17__IPU_DI0_PIN6 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1646 | #define MX53_PAD_EIM_D17__IPU_DI0_PIN6 (_MX53_PAD_EIM_D17__IPU_DI0_PIN6 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1644 | #define MX53_PAD_EIM_D17__IPU_DISPB1_SER_DIN (_MX53_PAD_EIM_D17__IPU_DISPB1_SER_DIN | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1647 | #define MX53_PAD_EIM_D17__IPU_DISPB1_SER_DIN (_MX53_PAD_EIM_D17__IPU_DISPB1_SER_DIN | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1645 | #define MX53_PAD_EIM_D17__ECSPI1_MISO (_MX53_PAD_EIM_D17__ECSPI1_MISO | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1648 | #define MX53_PAD_EIM_D17__ECSPI1_MISO (_MX53_PAD_EIM_D17__ECSPI1_MISO | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1646 | #define MX53_PAD_EIM_D17__I2C3_SCL (_MX53_PAD_EIM_D17__I2C3_SCL | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1649 | #define MX53_PAD_EIM_D17__I2C3_SCL (_MX53_PAD_EIM_D17__I2C3_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C)) |
1647 | #define MX53_PAD_EIM_D18__EMI_WEIM_D_18 (_MX53_PAD_EIM_D18__EMI_WEIM_D_18 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1650 | #define MX53_PAD_EIM_D18__EMI_WEIM_D_18 (_MX53_PAD_EIM_D18__EMI_WEIM_D_18 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1648 | #define MX53_PAD_EIM_D18__GPIO3_18 (_MX53_PAD_EIM_D18__GPIO3_18 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1651 | #define MX53_PAD_EIM_D18__GPIO3_18 (_MX53_PAD_EIM_D18__GPIO3_18 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1649 | #define MX53_PAD_EIM_D18__IPU_DI0_PIN7 (_MX53_PAD_EIM_D18__IPU_DI0_PIN7 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1652 | #define MX53_PAD_EIM_D18__IPU_DI0_PIN7 (_MX53_PAD_EIM_D18__IPU_DI0_PIN7 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1650 | #define MX53_PAD_EIM_D18__IPU_DISPB1_SER_DIO (_MX53_PAD_EIM_D18__IPU_DISPB1_SER_DIO | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1653 | #define MX53_PAD_EIM_D18__IPU_DISPB1_SER_DIO (_MX53_PAD_EIM_D18__IPU_DISPB1_SER_DIO | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1651 | #define MX53_PAD_EIM_D18__ECSPI1_MOSI (_MX53_PAD_EIM_D18__ECSPI1_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1654 | #define MX53_PAD_EIM_D18__ECSPI1_MOSI (_MX53_PAD_EIM_D18__ECSPI1_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1652 | #define MX53_PAD_EIM_D18__I2C3_SDA (_MX53_PAD_EIM_D18__I2C3_SDA | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1655 | #define MX53_PAD_EIM_D18__I2C3_SDA (_MX53_PAD_EIM_D18__I2C3_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C)) |
1653 | #define MX53_PAD_EIM_D18__IPU_DI1_D0_CS (_MX53_PAD_EIM_D18__IPU_DI1_D0_CS | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1656 | #define MX53_PAD_EIM_D18__IPU_DI1_D0_CS (_MX53_PAD_EIM_D18__IPU_DI1_D0_CS | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1654 | #define MX53_PAD_EIM_D19__EMI_WEIM_D_19 (_MX53_PAD_EIM_D19__EMI_WEIM_D_19 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1657 | #define MX53_PAD_EIM_D19__EMI_WEIM_D_19 (_MX53_PAD_EIM_D19__EMI_WEIM_D_19 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1655 | #define MX53_PAD_EIM_D19__GPIO3_19 (_MX53_PAD_EIM_D19__GPIO3_19 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1658 | #define MX53_PAD_EIM_D19__GPIO3_19 (_MX53_PAD_EIM_D19__GPIO3_19 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
@@ -1672,7 +1675,7 @@ | |||
1672 | #define MX53_PAD_EIM_D21__IPU_DI0_PIN17 (_MX53_PAD_EIM_D21__IPU_DI0_PIN17 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1675 | #define MX53_PAD_EIM_D21__IPU_DI0_PIN17 (_MX53_PAD_EIM_D21__IPU_DI0_PIN17 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1673 | #define MX53_PAD_EIM_D21__IPU_DISPB0_SER_CLK (_MX53_PAD_EIM_D21__IPU_DISPB0_SER_CLK | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1676 | #define MX53_PAD_EIM_D21__IPU_DISPB0_SER_CLK (_MX53_PAD_EIM_D21__IPU_DISPB0_SER_CLK | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1674 | #define MX53_PAD_EIM_D21__CSPI_SCLK (_MX53_PAD_EIM_D21__CSPI_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1677 | #define MX53_PAD_EIM_D21__CSPI_SCLK (_MX53_PAD_EIM_D21__CSPI_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1675 | #define MX53_PAD_EIM_D21__I2C1_SCL (_MX53_PAD_EIM_D21__I2C1_SCL | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1678 | #define MX53_PAD_EIM_D21__I2C1_SCL (_MX53_PAD_EIM_D21__I2C1_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C)) |
1676 | #define MX53_PAD_EIM_D21__USBOH3_USBOTG_OC (_MX53_PAD_EIM_D21__USBOH3_USBOTG_OC | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1679 | #define MX53_PAD_EIM_D21__USBOH3_USBOTG_OC (_MX53_PAD_EIM_D21__USBOH3_USBOTG_OC | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1677 | #define MX53_PAD_EIM_D22__EMI_WEIM_D_22 (_MX53_PAD_EIM_D22__EMI_WEIM_D_22 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1680 | #define MX53_PAD_EIM_D22__EMI_WEIM_D_22 (_MX53_PAD_EIM_D22__EMI_WEIM_D_22 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1678 | #define MX53_PAD_EIM_D22__GPIO3_22 (_MX53_PAD_EIM_D22__GPIO3_22 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1681 | #define MX53_PAD_EIM_D22__GPIO3_22 (_MX53_PAD_EIM_D22__GPIO3_22 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
@@ -1732,7 +1735,7 @@ | |||
1732 | #define MX53_PAD_EIM_D28__UART2_CTS (_MX53_PAD_EIM_D28__UART2_CTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL)) | 1735 | #define MX53_PAD_EIM_D28__UART2_CTS (_MX53_PAD_EIM_D28__UART2_CTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL)) |
1733 | #define MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO (_MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1736 | #define MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO (_MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1734 | #define MX53_PAD_EIM_D28__CSPI_MOSI (_MX53_PAD_EIM_D28__CSPI_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1737 | #define MX53_PAD_EIM_D28__CSPI_MOSI (_MX53_PAD_EIM_D28__CSPI_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1735 | #define MX53_PAD_EIM_D28__I2C1_SDA (_MX53_PAD_EIM_D28__I2C1_SDA | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1738 | #define MX53_PAD_EIM_D28__I2C1_SDA (_MX53_PAD_EIM_D28__I2C1_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C)) |
1736 | #define MX53_PAD_EIM_D28__IPU_EXT_TRIG (_MX53_PAD_EIM_D28__IPU_EXT_TRIG | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1739 | #define MX53_PAD_EIM_D28__IPU_EXT_TRIG (_MX53_PAD_EIM_D28__IPU_EXT_TRIG | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1737 | #define MX53_PAD_EIM_D28__IPU_DI0_PIN13 (_MX53_PAD_EIM_D28__IPU_DI0_PIN13 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1740 | #define MX53_PAD_EIM_D28__IPU_DI0_PIN13 (_MX53_PAD_EIM_D28__IPU_DI0_PIN13 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
1738 | #define MX53_PAD_EIM_D29__EMI_WEIM_D_29 (_MX53_PAD_EIM_D29__EMI_WEIM_D_29 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 1741 | #define MX53_PAD_EIM_D29__EMI_WEIM_D_29 (_MX53_PAD_EIM_D29__EMI_WEIM_D_29 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
@@ -2297,7 +2300,7 @@ | |||
2297 | #define MX53_PAD_GPIO_9__SCC_FAIL_STATE (_MX53_PAD_GPIO_9__SCC_FAIL_STATE | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2300 | #define MX53_PAD_GPIO_9__SCC_FAIL_STATE (_MX53_PAD_GPIO_9__SCC_FAIL_STATE | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2298 | #define MX53_PAD_GPIO_3__ESAI1_HCKR (_MX53_PAD_GPIO_3__ESAI1_HCKR | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2301 | #define MX53_PAD_GPIO_3__ESAI1_HCKR (_MX53_PAD_GPIO_3__ESAI1_HCKR | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2299 | #define MX53_PAD_GPIO_3__GPIO1_3 (_MX53_PAD_GPIO_3__GPIO1_3 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2302 | #define MX53_PAD_GPIO_3__GPIO1_3 (_MX53_PAD_GPIO_3__GPIO1_3 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2300 | #define MX53_PAD_GPIO_3__I2C3_SCL (_MX53_PAD_GPIO_3__I2C3_SCL | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2303 | #define MX53_PAD_GPIO_3__I2C3_SCL (_MX53_PAD_GPIO_3__I2C3_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C)) |
2301 | #define MX53_PAD_GPIO_3__DPLLIP1_TOG_EN (_MX53_PAD_GPIO_3__DPLLIP1_TOG_EN | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2304 | #define MX53_PAD_GPIO_3__DPLLIP1_TOG_EN (_MX53_PAD_GPIO_3__DPLLIP1_TOG_EN | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2302 | #define MX53_PAD_GPIO_3__CCM_CLKO2 (_MX53_PAD_GPIO_3__CCM_CLKO2 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2305 | #define MX53_PAD_GPIO_3__CCM_CLKO2 (_MX53_PAD_GPIO_3__CCM_CLKO2 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2303 | #define MX53_PAD_GPIO_3__OBSERVE_MUX_OBSRV_INT_OUT0 (_MX53_PAD_GPIO_3__OBSERVE_MUX_OBSRV_INT_OUT0 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2306 | #define MX53_PAD_GPIO_3__OBSERVE_MUX_OBSRV_INT_OUT0 (_MX53_PAD_GPIO_3__OBSERVE_MUX_OBSRV_INT_OUT0 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
@@ -2305,7 +2308,7 @@ | |||
2305 | #define MX53_PAD_GPIO_3__MLB_MLBCLK (_MX53_PAD_GPIO_3__MLB_MLBCLK | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2308 | #define MX53_PAD_GPIO_3__MLB_MLBCLK (_MX53_PAD_GPIO_3__MLB_MLBCLK | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2306 | #define MX53_PAD_GPIO_6__ESAI1_SCKT (_MX53_PAD_GPIO_6__ESAI1_SCKT | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2309 | #define MX53_PAD_GPIO_6__ESAI1_SCKT (_MX53_PAD_GPIO_6__ESAI1_SCKT | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2307 | #define MX53_PAD_GPIO_6__GPIO1_6 (_MX53_PAD_GPIO_6__GPIO1_6 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2310 | #define MX53_PAD_GPIO_6__GPIO1_6 (_MX53_PAD_GPIO_6__GPIO1_6 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2308 | #define MX53_PAD_GPIO_6__I2C3_SDA (_MX53_PAD_GPIO_6__I2C3_SDA | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2311 | #define MX53_PAD_GPIO_6__I2C3_SDA (_MX53_PAD_GPIO_6__I2C3_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C)) |
2309 | #define MX53_PAD_GPIO_6__CCM_CCM_OUT_0 (_MX53_PAD_GPIO_6__CCM_CCM_OUT_0 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2312 | #define MX53_PAD_GPIO_6__CCM_CCM_OUT_0 (_MX53_PAD_GPIO_6__CCM_CCM_OUT_0 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2310 | #define MX53_PAD_GPIO_6__CSU_CSU_INT_DEB (_MX53_PAD_GPIO_6__CSU_CSU_INT_DEB | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2313 | #define MX53_PAD_GPIO_6__CSU_CSU_INT_DEB (_MX53_PAD_GPIO_6__CSU_CSU_INT_DEB | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2311 | #define MX53_PAD_GPIO_6__OBSERVE_MUX_OBSRV_INT_OUT1 (_MX53_PAD_GPIO_6__OBSERVE_MUX_OBSRV_INT_OUT1 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2314 | #define MX53_PAD_GPIO_6__OBSERVE_MUX_OBSRV_INT_OUT1 (_MX53_PAD_GPIO_6__OBSERVE_MUX_OBSRV_INT_OUT1 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
@@ -2333,7 +2336,7 @@ | |||
2333 | #define MX53_PAD_GPIO_5__CCM_CLKO (_MX53_PAD_GPIO_5__CCM_CLKO | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2336 | #define MX53_PAD_GPIO_5__CCM_CLKO (_MX53_PAD_GPIO_5__CCM_CLKO | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2334 | #define MX53_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2 (_MX53_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2337 | #define MX53_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2 (_MX53_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2335 | #define MX53_PAD_GPIO_5__OBSERVE_MUX_OBSRV_INT_OUT4 (_MX53_PAD_GPIO_5__OBSERVE_MUX_OBSRV_INT_OUT4 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2338 | #define MX53_PAD_GPIO_5__OBSERVE_MUX_OBSRV_INT_OUT4 (_MX53_PAD_GPIO_5__OBSERVE_MUX_OBSRV_INT_OUT4 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2336 | #define MX53_PAD_GPIO_5__I2C3_SCL (_MX53_PAD_GPIO_5__I2C3_SCL | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2339 | #define MX53_PAD_GPIO_5__I2C3_SCL (_MX53_PAD_GPIO_5__I2C3_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C)) |
2337 | #define MX53_PAD_GPIO_5__CCM_PLL1_BYP (_MX53_PAD_GPIO_5__CCM_PLL1_BYP | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2340 | #define MX53_PAD_GPIO_5__CCM_PLL1_BYP (_MX53_PAD_GPIO_5__CCM_PLL1_BYP | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2338 | #define MX53_PAD_GPIO_7__ESAI1_TX4_RX1 (_MX53_PAD_GPIO_7__ESAI1_TX4_RX1 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2341 | #define MX53_PAD_GPIO_7__ESAI1_TX4_RX1 (_MX53_PAD_GPIO_7__ESAI1_TX4_RX1 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2339 | #define MX53_PAD_GPIO_7__GPIO1_7 (_MX53_PAD_GPIO_7__GPIO1_7 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2342 | #define MX53_PAD_GPIO_7__GPIO1_7 (_MX53_PAD_GPIO_7__GPIO1_7 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
@@ -2356,7 +2359,7 @@ | |||
2356 | #define MX53_PAD_GPIO_16__TZIC_PWRFAIL_INT (_MX53_PAD_GPIO_16__TZIC_PWRFAIL_INT | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2359 | #define MX53_PAD_GPIO_16__TZIC_PWRFAIL_INT (_MX53_PAD_GPIO_16__TZIC_PWRFAIL_INT | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2357 | #define MX53_PAD_GPIO_16__RTC_CE_RTC_EXT_TRIG1 (_MX53_PAD_GPIO_16__RTC_CE_RTC_EXT_TRIG1 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2360 | #define MX53_PAD_GPIO_16__RTC_CE_RTC_EXT_TRIG1 (_MX53_PAD_GPIO_16__RTC_CE_RTC_EXT_TRIG1 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2358 | #define MX53_PAD_GPIO_16__SPDIF_IN1 (_MX53_PAD_GPIO_16__SPDIF_IN1 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2361 | #define MX53_PAD_GPIO_16__SPDIF_IN1 (_MX53_PAD_GPIO_16__SPDIF_IN1 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2359 | #define MX53_PAD_GPIO_16__I2C3_SDA (_MX53_PAD_GPIO_16__I2C3_SDA | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2362 | #define MX53_PAD_GPIO_16__I2C3_SDA (_MX53_PAD_GPIO_16__I2C3_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C)) |
2360 | #define MX53_PAD_GPIO_16__SJC_DE_B (_MX53_PAD_GPIO_16__SJC_DE_B | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2363 | #define MX53_PAD_GPIO_16__SJC_DE_B (_MX53_PAD_GPIO_16__SJC_DE_B | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2361 | #define MX53_PAD_GPIO_17__ESAI1_TX0 (_MX53_PAD_GPIO_17__ESAI1_TX0 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2364 | #define MX53_PAD_GPIO_17__ESAI1_TX0 (_MX53_PAD_GPIO_17__ESAI1_TX0 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
2362 | #define MX53_PAD_GPIO_17__GPIO7_12 (_MX53_PAD_GPIO_17__GPIO7_12 | MUX_PAD_CTRL(NO_PAD_CTRL)) | 2365 | #define MX53_PAD_GPIO_17__GPIO7_12 (_MX53_PAD_GPIO_17__GPIO7_12 | MUX_PAD_CTRL(NO_PAD_CTRL)) |
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index 6e6735f04ee3..bb8f4a6b3e37 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig | |||
@@ -13,6 +13,7 @@ config ARCH_OMAP1 | |||
13 | bool "TI OMAP1" | 13 | bool "TI OMAP1" |
14 | select CLKDEV_LOOKUP | 14 | select CLKDEV_LOOKUP |
15 | select CLKSRC_MMIO | 15 | select CLKSRC_MMIO |
16 | select GENERIC_IRQ_CHIP | ||
16 | help | 17 | help |
17 | "Systems based on omap7xx, omap15xx or omap16xx" | 18 | "Systems based on omap7xx, omap15xx or omap16xx" |
18 | 19 | ||
diff --git a/arch/arm/plat-omap/include/plat/dma.h b/arch/arm/plat-omap/include/plat/dma.h index d1c916fcf770..dc562a5c0a8a 100644 --- a/arch/arm/plat-omap/include/plat/dma.h +++ b/arch/arm/plat-omap/include/plat/dma.h | |||
@@ -195,6 +195,11 @@ | |||
195 | 195 | ||
196 | #define OMAP36XX_DMA_UART4_TX 81 /* S_DMA_80 */ | 196 | #define OMAP36XX_DMA_UART4_TX 81 /* S_DMA_80 */ |
197 | #define OMAP36XX_DMA_UART4_RX 82 /* S_DMA_81 */ | 197 | #define OMAP36XX_DMA_UART4_RX 82 /* S_DMA_81 */ |
198 | |||
199 | /* Only for AM35xx */ | ||
200 | #define AM35XX_DMA_UART4_TX 54 | ||
201 | #define AM35XX_DMA_UART4_RX 55 | ||
202 | |||
198 | /*----------------------------------------------------------------------------*/ | 203 | /*----------------------------------------------------------------------------*/ |
199 | 204 | ||
200 | #define OMAP1_DMA_TOUT_IRQ (1 << 0) | 205 | #define OMAP1_DMA_TOUT_IRQ (1 << 0) |
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h index 926d25c780f3..30e10719b774 100644 --- a/arch/arm/plat-omap/include/plat/irqs.h +++ b/arch/arm/plat-omap/include/plat/irqs.h | |||
@@ -357,6 +357,7 @@ | |||
357 | #define INT_35XX_EMAC_C0_TX_PULSE_IRQ 69 | 357 | #define INT_35XX_EMAC_C0_TX_PULSE_IRQ 69 |
358 | #define INT_35XX_EMAC_C0_MISC_PULSE_IRQ 70 | 358 | #define INT_35XX_EMAC_C0_MISC_PULSE_IRQ 70 |
359 | #define INT_35XX_USBOTG_IRQ 71 | 359 | #define INT_35XX_USBOTG_IRQ 71 |
360 | #define INT_35XX_UART4 84 | ||
360 | #define INT_35XX_CCDC_VD0_IRQ 88 | 361 | #define INT_35XX_CCDC_VD0_IRQ 88 |
361 | #define INT_35XX_CCDC_VD1_IRQ 92 | 362 | #define INT_35XX_CCDC_VD1_IRQ 92 |
362 | #define INT_35XX_CCDC_VD2_IRQ 93 | 363 | #define INT_35XX_CCDC_VD2_IRQ 93 |
diff --git a/arch/arm/plat-omap/include/plat/serial.h b/arch/arm/plat-omap/include/plat/serial.h index 2723f9166ea2..de3b10c18127 100644 --- a/arch/arm/plat-omap/include/plat/serial.h +++ b/arch/arm/plat-omap/include/plat/serial.h | |||
@@ -56,6 +56,9 @@ | |||
56 | #define TI816X_UART2_BASE 0x48022000 | 56 | #define TI816X_UART2_BASE 0x48022000 |
57 | #define TI816X_UART3_BASE 0x48024000 | 57 | #define TI816X_UART3_BASE 0x48024000 |
58 | 58 | ||
59 | /* AM3505/3517 UART4 */ | ||
60 | #define AM35XX_UART4_BASE 0x4809E000 /* Only on AM3505/3517 */ | ||
61 | |||
59 | /* External port on Zoom2/3 */ | 62 | /* External port on Zoom2/3 */ |
60 | #define ZOOM_UART_BASE 0x10000000 | 63 | #define ZOOM_UART_BASE 0x10000000 |
61 | #define ZOOM_UART_VIRT 0xfa400000 | 64 | #define ZOOM_UART_VIRT 0xfa400000 |
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c index c60737c49a32..79e7fedb8602 100644 --- a/arch/arm/plat-omap/iovmm.c +++ b/arch/arm/plat-omap/iovmm.c | |||
@@ -423,9 +423,6 @@ static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da, | |||
423 | { | 423 | { |
424 | unsigned int i; | 424 | unsigned int i; |
425 | struct scatterlist *sg; | 425 | struct scatterlist *sg; |
426 | void *va; | ||
427 | |||
428 | va = phys_to_virt(pa); | ||
429 | 426 | ||
430 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | 427 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { |
431 | unsigned bytes; | 428 | unsigned bytes; |
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 3b3776d0a1a7..fff68d0d521b 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types | |||
@@ -910,7 +910,7 @@ omapl138_case_a3 MACH_OMAPL138_CASE_A3 OMAPL138_CASE_A3 3280 | |||
910 | uemd MACH_UEMD UEMD 3281 | 910 | uemd MACH_UEMD UEMD 3281 |
911 | ccwmx51mut MACH_CCWMX51MUT CCWMX51MUT 3282 | 911 | ccwmx51mut MACH_CCWMX51MUT CCWMX51MUT 3282 |
912 | rockhopper MACH_ROCKHOPPER ROCKHOPPER 3283 | 912 | rockhopper MACH_ROCKHOPPER ROCKHOPPER 3283 |
913 | nookcolor MACH_NOOKCOLOR NOOKCOLOR 3284 | 913 | encore MACH_ENCORE ENCORE 3284 |
914 | hkdkc100 MACH_HKDKC100 HKDKC100 3285 | 914 | hkdkc100 MACH_HKDKC100 HKDKC100 3285 |
915 | ts42xx MACH_TS42XX TS42XX 3286 | 915 | ts42xx MACH_TS42XX TS42XX 3286 |
916 | aebl MACH_AEBL AEBL 3287 | 916 | aebl MACH_AEBL AEBL 3287 |
diff --git a/arch/cris/include/asm/serial.h b/arch/cris/include/asm/serial.h new file mode 100644 index 000000000000..af7535a955fb --- /dev/null +++ b/arch/cris/include/asm/serial.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef _ASM_SERIAL_H | ||
2 | #define _ASM_SERIAL_H | ||
3 | |||
4 | /* | ||
5 | * This assumes you have a 1.8432 MHz clock for your UART. | ||
6 | */ | ||
7 | #define BASE_BAUD (1843200 / 16) | ||
8 | |||
9 | #endif /* _ASM_SERIAL_H */ | ||
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 124854714958..3ff7785b3beb 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -162,7 +162,6 @@ config IA64_GENERIC | |||
162 | select ACPI_NUMA | 162 | select ACPI_NUMA |
163 | select SWIOTLB | 163 | select SWIOTLB |
164 | select PCI_MSI | 164 | select PCI_MSI |
165 | select DMAR | ||
166 | help | 165 | help |
167 | This selects the system type of your hardware. A "generic" kernel | 166 | This selects the system type of your hardware. A "generic" kernel |
168 | will run on any supported IA-64 system. However, if you configure | 167 | will run on any supported IA-64 system. However, if you configure |
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index 1d7bca0a396d..0e5cd1405e0e 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig | |||
@@ -234,3 +234,4 @@ CONFIG_CRYPTO_MD5=y | |||
234 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 234 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
235 | CONFIG_CRC_T10DIF=y | 235 | CONFIG_CRC_T10DIF=y |
236 | CONFIG_MISC_DEVICES=y | 236 | CONFIG_MISC_DEVICES=y |
237 | CONFIG_DMAR=y | ||
diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h index 31d5570d6567..89f201434b5a 100644 --- a/arch/m68k/include/asm/page_mm.h +++ b/arch/m68k/include/asm/page_mm.h | |||
@@ -162,7 +162,7 @@ static inline __attribute_const__ int __virt_to_node_shift(void) | |||
162 | pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \ | 162 | pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \ |
163 | }) | 163 | }) |
164 | #define page_to_pfn(_page) ({ \ | 164 | #define page_to_pfn(_page) ({ \ |
165 | struct page *__p = (_page); \ | 165 | const struct page *__p = (_page); \ |
166 | struct pglist_data *pgdat; \ | 166 | struct pglist_data *pgdat; \ |
167 | pgdat = &pg_data_map[page_to_nid(__p)]; \ | 167 | pgdat = &pg_data_map[page_to_nid(__p)]; \ |
168 | ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \ | 168 | ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \ |
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h index 1f780b95c0f0..938986e412f1 100644 --- a/arch/powerpc/include/asm/jump_label.h +++ b/arch/powerpc/include/asm/jump_label.h | |||
@@ -22,7 +22,6 @@ static __always_inline bool arch_static_branch(struct jump_label_key *key) | |||
22 | asm goto("1:\n\t" | 22 | asm goto("1:\n\t" |
23 | "nop\n\t" | 23 | "nop\n\t" |
24 | ".pushsection __jump_table, \"aw\"\n\t" | 24 | ".pushsection __jump_table, \"aw\"\n\t" |
25 | ".align 4\n\t" | ||
26 | JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" | 25 | JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" |
27 | ".popsection \n\t" | 26 | ".popsection \n\t" |
28 | : : "i" (key) : : l_yes); | 27 | : : "i" (key) : : l_yes); |
@@ -41,7 +40,6 @@ struct jump_entry { | |||
41 | jump_label_t code; | 40 | jump_label_t code; |
42 | jump_label_t target; | 41 | jump_label_t target; |
43 | jump_label_t key; | 42 | jump_label_t key; |
44 | jump_label_t pad; | ||
45 | }; | 43 | }; |
46 | 44 | ||
47 | #endif /* _ASM_POWERPC_JUMP_LABEL_H */ | 45 | #endif /* _ASM_POWERPC_JUMP_LABEL_H */ |
diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h index 6857af58b02e..bffd062adf79 100644 --- a/arch/powerpc/include/asm/kdump.h +++ b/arch/powerpc/include/asm/kdump.h | |||
@@ -3,17 +3,7 @@ | |||
3 | 3 | ||
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | 5 | ||
6 | /* | ||
7 | * If CONFIG_RELOCATABLE is enabled we can place the kdump kernel anywhere. | ||
8 | * To keep enough space in the RMO for the first stage kernel on 64bit, we | ||
9 | * place it at 64MB. If CONFIG_RELOCATABLE is not enabled we must place | ||
10 | * the second stage at 32MB. | ||
11 | */ | ||
12 | #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC64) | ||
13 | #define KDUMP_KERNELBASE 0x4000000 | ||
14 | #else | ||
15 | #define KDUMP_KERNELBASE 0x2000000 | 6 | #define KDUMP_KERNELBASE 0x2000000 |
16 | #endif | ||
17 | 7 | ||
18 | /* How many bytes to reserve at zero for kdump. The reserve limit should | 8 | /* How many bytes to reserve at zero for kdump. The reserve limit should |
19 | * be greater or equal to the trampoline's end address. | 9 | * be greater or equal to the trampoline's end address. |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index e8aaf6fce38b..559da199edb5 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -1003,7 +1003,6 @@ | |||
1003 | #define PV_970 0x0039 | 1003 | #define PV_970 0x0039 |
1004 | #define PV_POWER5 0x003A | 1004 | #define PV_POWER5 0x003A |
1005 | #define PV_POWER5p 0x003B | 1005 | #define PV_POWER5p 0x003B |
1006 | #define PV_POWER7 0x003F | ||
1007 | #define PV_970FX 0x003C | 1006 | #define PV_970FX 0x003C |
1008 | #define PV_POWER6 0x003E | 1007 | #define PV_POWER6 0x003E |
1009 | #define PV_POWER7 0x003F | 1008 | #define PV_POWER7 0x003F |
@@ -1024,13 +1023,16 @@ | |||
1024 | #define mtmsrd(v) __mtmsrd((v), 0) | 1023 | #define mtmsrd(v) __mtmsrd((v), 0) |
1025 | #define mtmsr(v) mtmsrd(v) | 1024 | #define mtmsr(v) mtmsrd(v) |
1026 | #else | 1025 | #else |
1027 | #define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v) : "memory") | 1026 | #define mtmsr(v) asm volatile("mtmsr %0" : \ |
1027 | : "r" ((unsigned long)(v)) \ | ||
1028 | : "memory") | ||
1028 | #endif | 1029 | #endif |
1029 | 1030 | ||
1030 | #define mfspr(rn) ({unsigned long rval; \ | 1031 | #define mfspr(rn) ({unsigned long rval; \ |
1031 | asm volatile("mfspr %0," __stringify(rn) \ | 1032 | asm volatile("mfspr %0," __stringify(rn) \ |
1032 | : "=r" (rval)); rval;}) | 1033 | : "=r" (rval)); rval;}) |
1033 | #define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)\ | 1034 | #define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : \ |
1035 | : "r" ((unsigned long)(v)) \ | ||
1034 | : "memory") | 1036 | : "memory") |
1035 | 1037 | ||
1036 | #ifdef __powerpc64__ | 1038 | #ifdef __powerpc64__ |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 9fb933248ab6..fa44ff538861 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -2051,7 +2051,8 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
2051 | 2051 | ||
2052 | static struct cpu_spec the_cpu_spec; | 2052 | static struct cpu_spec the_cpu_spec; |
2053 | 2053 | ||
2054 | static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) | 2054 | static struct cpu_spec * __init setup_cpu_spec(unsigned long offset, |
2055 | struct cpu_spec *s) | ||
2055 | { | 2056 | { |
2056 | struct cpu_spec *t = &the_cpu_spec; | 2057 | struct cpu_spec *t = &the_cpu_spec; |
2057 | struct cpu_spec old; | 2058 | struct cpu_spec old; |
@@ -2114,6 +2115,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) | |||
2114 | t->cpu_setup(offset, t); | 2115 | t->cpu_setup(offset, t); |
2115 | } | 2116 | } |
2116 | #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ | 2117 | #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ |
2118 | |||
2119 | return t; | ||
2117 | } | 2120 | } |
2118 | 2121 | ||
2119 | struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) | 2122 | struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) |
@@ -2124,10 +2127,8 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) | |||
2124 | s = PTRRELOC(s); | 2127 | s = PTRRELOC(s); |
2125 | 2128 | ||
2126 | for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) { | 2129 | for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) { |
2127 | if ((pvr & s->pvr_mask) == s->pvr_value) { | 2130 | if ((pvr & s->pvr_mask) == s->pvr_value) |
2128 | setup_cpu_spec(offset, s); | 2131 | return setup_cpu_spec(offset, s); |
2129 | return s; | ||
2130 | } | ||
2131 | } | 2132 | } |
2132 | 2133 | ||
2133 | BUG(); | 2134 | BUG(); |
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c index 1577434f4088..b25f6325fc70 100644 --- a/arch/powerpc/kernel/iomap.c +++ b/arch/powerpc/kernel/iomap.c | |||
@@ -117,6 +117,7 @@ void ioport_unmap(void __iomem *addr) | |||
117 | EXPORT_SYMBOL(ioport_map); | 117 | EXPORT_SYMBOL(ioport_map); |
118 | EXPORT_SYMBOL(ioport_unmap); | 118 | EXPORT_SYMBOL(ioport_unmap); |
119 | 119 | ||
120 | #ifdef CONFIG_PCI | ||
120 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) | 121 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) |
121 | { | 122 | { |
122 | resource_size_t start = pci_resource_start(dev, bar); | 123 | resource_size_t start = pci_resource_start(dev, bar); |
@@ -146,3 +147,4 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *addr) | |||
146 | 147 | ||
147 | EXPORT_SYMBOL(pci_iomap); | 148 | EXPORT_SYMBOL(pci_iomap); |
148 | EXPORT_SYMBOL(pci_iounmap); | 149 | EXPORT_SYMBOL(pci_iounmap); |
150 | #endif /* CONFIG_PCI */ | ||
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 6658a1589955..9ce1672afb59 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c | |||
@@ -136,12 +136,16 @@ void __init reserve_crashkernel(void) | |||
136 | crashk_res.start = KDUMP_KERNELBASE; | 136 | crashk_res.start = KDUMP_KERNELBASE; |
137 | #else | 137 | #else |
138 | if (!crashk_res.start) { | 138 | if (!crashk_res.start) { |
139 | #ifdef CONFIG_PPC64 | ||
139 | /* | 140 | /* |
140 | * unspecified address, choose a region of specified size | 141 | * On 64bit we split the RMO in half but cap it at half of |
141 | * can overlap with initrd (ignoring corruption when retained) | 142 | * a small SLB (128MB) since the crash kernel needs to place |
142 | * ppc64 requires kernel and some stacks to be in first segemnt | 143 | * itself and some stacks to be in the first segment. |
143 | */ | 144 | */ |
145 | crashk_res.start = min(0x80000000ULL, (ppc64_rma_size / 2)); | ||
146 | #else | ||
144 | crashk_res.start = KDUMP_KERNELBASE; | 147 | crashk_res.start = KDUMP_KERNELBASE; |
148 | #endif | ||
145 | } | 149 | } |
146 | 150 | ||
147 | crash_base = PAGE_ALIGN(crashk_res.start); | 151 | crash_base = PAGE_ALIGN(crashk_res.start); |
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index d05ae4204bbf..564c1d8bdb5c 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c | |||
@@ -154,8 +154,12 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret) | |||
154 | ((unsigned long)ptr & 7)) | 154 | ((unsigned long)ptr & 7)) |
155 | return -EFAULT; | 155 | return -EFAULT; |
156 | 156 | ||
157 | if (!__get_user_inatomic(*ret, ptr)) | 157 | pagefault_disable(); |
158 | if (!__get_user_inatomic(*ret, ptr)) { | ||
159 | pagefault_enable(); | ||
158 | return 0; | 160 | return 0; |
161 | } | ||
162 | pagefault_enable(); | ||
159 | 163 | ||
160 | return read_user_stack_slow(ptr, ret, 8); | 164 | return read_user_stack_slow(ptr, ret, 8); |
161 | } | 165 | } |
@@ -166,8 +170,12 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) | |||
166 | ((unsigned long)ptr & 3)) | 170 | ((unsigned long)ptr & 3)) |
167 | return -EFAULT; | 171 | return -EFAULT; |
168 | 172 | ||
169 | if (!__get_user_inatomic(*ret, ptr)) | 173 | pagefault_disable(); |
174 | if (!__get_user_inatomic(*ret, ptr)) { | ||
175 | pagefault_enable(); | ||
170 | return 0; | 176 | return 0; |
177 | } | ||
178 | pagefault_enable(); | ||
171 | 179 | ||
172 | return read_user_stack_slow(ptr, ret, 4); | 180 | return read_user_stack_slow(ptr, ret, 4); |
173 | } | 181 | } |
@@ -294,11 +302,17 @@ static inline int current_is_64bit(void) | |||
294 | */ | 302 | */ |
295 | static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) | 303 | static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) |
296 | { | 304 | { |
305 | int rc; | ||
306 | |||
297 | if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || | 307 | if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || |
298 | ((unsigned long)ptr & 3)) | 308 | ((unsigned long)ptr & 3)) |
299 | return -EFAULT; | 309 | return -EFAULT; |
300 | 310 | ||
301 | return __get_user_inatomic(*ret, ptr); | 311 | pagefault_disable(); |
312 | rc = __get_user_inatomic(*ret, ptr); | ||
313 | pagefault_enable(); | ||
314 | |||
315 | return rc; | ||
302 | } | 316 | } |
303 | 317 | ||
304 | static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, | 318 | static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index c016033ba78d..a909f4e9343b 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -1020,7 +1020,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) | |||
1020 | } | 1020 | } |
1021 | if (addr == 0) | 1021 | if (addr == 0) |
1022 | return 0; | 1022 | return 0; |
1023 | RELOC(alloc_bottom) = addr; | 1023 | RELOC(alloc_bottom) = addr + size; |
1024 | 1024 | ||
1025 | prom_debug(" -> %x\n", addr); | 1025 | prom_debug(" -> %x\n", addr); |
1026 | prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom)); | 1026 | prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom)); |
@@ -1830,11 +1830,13 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, | |||
1830 | if (room > DEVTREE_CHUNK_SIZE) | 1830 | if (room > DEVTREE_CHUNK_SIZE) |
1831 | room = DEVTREE_CHUNK_SIZE; | 1831 | room = DEVTREE_CHUNK_SIZE; |
1832 | if (room < PAGE_SIZE) | 1832 | if (room < PAGE_SIZE) |
1833 | prom_panic("No memory for flatten_device_tree (no room)"); | 1833 | prom_panic("No memory for flatten_device_tree " |
1834 | "(no room)\n"); | ||
1834 | chunk = alloc_up(room, 0); | 1835 | chunk = alloc_up(room, 0); |
1835 | if (chunk == 0) | 1836 | if (chunk == 0) |
1836 | prom_panic("No memory for flatten_device_tree (claim failed)"); | 1837 | prom_panic("No memory for flatten_device_tree " |
1837 | *mem_end = RELOC(alloc_top); | 1838 | "(claim failed)\n"); |
1839 | *mem_end = chunk + room; | ||
1838 | } | 1840 | } |
1839 | 1841 | ||
1840 | ret = (void *)*mem_start; | 1842 | ret = (void *)*mem_start; |
@@ -2042,7 +2044,7 @@ static void __init flatten_device_tree(void) | |||
2042 | 2044 | ||
2043 | /* | 2045 | /* |
2044 | * Check how much room we have between alloc top & bottom (+/- a | 2046 | * Check how much room we have between alloc top & bottom (+/- a |
2045 | * few pages), crop to 4Mb, as this is our "chuck" size | 2047 | * few pages), crop to 1MB, as this is our "chunk" size |
2046 | */ | 2048 | */ |
2047 | room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000; | 2049 | room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000; |
2048 | if (room > DEVTREE_CHUNK_SIZE) | 2050 | if (room > DEVTREE_CHUNK_SIZE) |
@@ -2053,7 +2055,7 @@ static void __init flatten_device_tree(void) | |||
2053 | mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); | 2055 | mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); |
2054 | if (mem_start == 0) | 2056 | if (mem_start == 0) |
2055 | prom_panic("Can't allocate initial device-tree chunk\n"); | 2057 | prom_panic("Can't allocate initial device-tree chunk\n"); |
2056 | mem_end = RELOC(alloc_top); | 2058 | mem_end = mem_start + room; |
2057 | 2059 | ||
2058 | /* Get root of tree */ | 2060 | /* Get root of tree */ |
2059 | root = call_prom("peer", 1, 1, (phandle)0); | 2061 | root = call_prom("peer", 1, 1, (phandle)0); |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 6dd33581a228..de2950135e6e 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -1251,7 +1251,7 @@ BEGIN_FTR_SECTION | |||
1251 | reg = 0 | 1251 | reg = 0 |
1252 | .rept 32 | 1252 | .rept 32 |
1253 | li r6,reg*16+VCPU_VSRS | 1253 | li r6,reg*16+VCPU_VSRS |
1254 | stxvd2x reg,r6,r3 | 1254 | STXVD2X(reg,r6,r3) |
1255 | reg = reg + 1 | 1255 | reg = reg + 1 |
1256 | .endr | 1256 | .endr |
1257 | FTR_SECTION_ELSE | 1257 | FTR_SECTION_ELSE |
@@ -1313,7 +1313,7 @@ BEGIN_FTR_SECTION | |||
1313 | reg = 0 | 1313 | reg = 0 |
1314 | .rept 32 | 1314 | .rept 32 |
1315 | li r7,reg*16+VCPU_VSRS | 1315 | li r7,reg*16+VCPU_VSRS |
1316 | lxvd2x reg,r7,r4 | 1316 | LXVD2X(reg,r7,r4) |
1317 | reg = reg + 1 | 1317 | reg = reg + 1 |
1318 | .endr | 1318 | .endr |
1319 | FTR_SECTION_ELSE | 1319 | FTR_SECTION_ELSE |
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index d0af7fb2f344..b9ba86191aed 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig | |||
@@ -24,7 +24,7 @@ source "arch/powerpc/platforms/wsp/Kconfig" | |||
24 | 24 | ||
25 | config KVM_GUEST | 25 | config KVM_GUEST |
26 | bool "KVM Guest support" | 26 | bool "KVM Guest support" |
27 | default y | 27 | default n |
28 | ---help--- | 28 | ---help--- |
29 | This option enables various optimizations for running under the KVM | 29 | This option enables various optimizations for running under the KVM |
30 | hypervisor. Overhead for the kernel when not running inside KVM should | 30 | hypervisor. Overhead for the kernel when not running inside KVM should |
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index e9190073bb97..0e8656370063 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c | |||
@@ -181,7 +181,7 @@ static void dtl_stop(struct dtl *dtl) | |||
181 | 181 | ||
182 | lppaca_of(dtl->cpu).dtl_enable_mask = 0x0; | 182 | lppaca_of(dtl->cpu).dtl_enable_mask = 0x0; |
183 | 183 | ||
184 | unregister_dtl(hwcpu, __pa(dtl->buf)); | 184 | unregister_dtl(hwcpu); |
185 | } | 185 | } |
186 | 186 | ||
187 | static u64 dtl_current_index(struct dtl *dtl) | 187 | static u64 dtl_current_index(struct dtl *dtl) |
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index bc0288501f17..83a3ca2fd282 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c | |||
@@ -135,7 +135,7 @@ static void pseries_mach_cpu_die(void) | |||
135 | get_lppaca()->idle = 0; | 135 | get_lppaca()->idle = 0; |
136 | 136 | ||
137 | if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { | 137 | if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { |
138 | unregister_slb_shadow(hwcpu, __pa(get_slb_shadow())); | 138 | unregister_slb_shadow(hwcpu); |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * Call to start_secondary_resume() will not return. | 141 | * Call to start_secondary_resume() will not return. |
@@ -150,7 +150,7 @@ static void pseries_mach_cpu_die(void) | |||
150 | WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE); | 150 | WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE); |
151 | 151 | ||
152 | set_cpu_current_state(cpu, CPU_STATE_OFFLINE); | 152 | set_cpu_current_state(cpu, CPU_STATE_OFFLINE); |
153 | unregister_slb_shadow(hwcpu, __pa(get_slb_shadow())); | 153 | unregister_slb_shadow(hwcpu); |
154 | rtas_stop_self(); | 154 | rtas_stop_self(); |
155 | 155 | ||
156 | /* Should never get here... */ | 156 | /* Should never get here... */ |
diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c index c829e6067d54..2c4dd1fb8333 100644 --- a/arch/powerpc/platforms/pseries/io_event_irq.c +++ b/arch/powerpc/platforms/pseries/io_event_irq.c | |||
@@ -212,17 +212,15 @@ static int __init ioei_init(void) | |||
212 | struct device_node *np; | 212 | struct device_node *np; |
213 | 213 | ||
214 | ioei_check_exception_token = rtas_token("check-exception"); | 214 | ioei_check_exception_token = rtas_token("check-exception"); |
215 | if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE) { | 215 | if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE) |
216 | pr_warning("IO Event IRQ not supported on this system !\n"); | ||
217 | return -ENODEV; | 216 | return -ENODEV; |
218 | } | 217 | |
219 | np = of_find_node_by_path("/event-sources/ibm,io-events"); | 218 | np = of_find_node_by_path("/event-sources/ibm,io-events"); |
220 | if (np) { | 219 | if (np) { |
221 | request_event_sources_irqs(np, ioei_interrupt, "IO_EVENT"); | 220 | request_event_sources_irqs(np, ioei_interrupt, "IO_EVENT"); |
221 | pr_info("IBM I/O event interrupts enabled\n"); | ||
222 | of_node_put(np); | 222 | of_node_put(np); |
223 | } else { | 223 | } else { |
224 | pr_err("io_event_irq: No ibm,io-events on system! " | ||
225 | "IO Event interrupt disabled.\n"); | ||
226 | return -ENODEV; | 224 | return -ENODEV; |
227 | } | 225 | } |
228 | return 0; | 226 | return 0; |
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index 54cf3a4aa16b..7d94bdc63d50 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c | |||
@@ -25,20 +25,30 @@ static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) | |||
25 | { | 25 | { |
26 | /* Don't risk a hypervisor call if we're crashing */ | 26 | /* Don't risk a hypervisor call if we're crashing */ |
27 | if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { | 27 | if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { |
28 | unsigned long addr; | 28 | int ret; |
29 | int cpu = smp_processor_id(); | ||
30 | int hwcpu = hard_smp_processor_id(); | ||
29 | 31 | ||
30 | addr = __pa(get_slb_shadow()); | 32 | if (get_lppaca()->dtl_enable_mask) { |
31 | if (unregister_slb_shadow(hard_smp_processor_id(), addr)) | 33 | ret = unregister_dtl(hwcpu); |
32 | printk("SLB shadow buffer deregistration of " | 34 | if (ret) { |
33 | "cpu %u (hw_cpu_id %d) failed\n", | 35 | pr_err("WARNING: DTL deregistration for cpu " |
34 | smp_processor_id(), | 36 | "%d (hw %d) failed with %d\n", |
35 | hard_smp_processor_id()); | 37 | cpu, hwcpu, ret); |
38 | } | ||
39 | } | ||
40 | |||
41 | ret = unregister_slb_shadow(hwcpu); | ||
42 | if (ret) { | ||
43 | pr_err("WARNING: SLB shadow buffer deregistration " | ||
44 | "for cpu %d (hw %d) failed with %d\n", | ||
45 | cpu, hwcpu, ret); | ||
46 | } | ||
36 | 47 | ||
37 | addr = __pa(get_lppaca()); | 48 | ret = unregister_vpa(hwcpu); |
38 | if (unregister_vpa(hard_smp_processor_id(), addr)) { | 49 | if (ret) { |
39 | printk("VPA deregistration of cpu %u (hw_cpu_id %d) " | 50 | pr_err("WARNING: VPA deregistration for cpu %d " |
40 | "failed\n", smp_processor_id(), | 51 | "(hw %d) failed with %d\n", cpu, hwcpu, ret); |
41 | hard_smp_processor_id()); | ||
42 | } | 52 | } |
43 | } | 53 | } |
44 | } | 54 | } |
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index f7205d344efd..c9a29dae8c05 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -67,9 +67,8 @@ void vpa_init(int cpu) | |||
67 | ret = register_vpa(hwcpu, addr); | 67 | ret = register_vpa(hwcpu, addr); |
68 | 68 | ||
69 | if (ret) { | 69 | if (ret) { |
70 | printk(KERN_ERR "WARNING: vpa_init: VPA registration for " | 70 | pr_err("WARNING: VPA registration for cpu %d (hw %d) of area " |
71 | "cpu %d (hw %d) of area %lx returns %ld\n", | 71 | "%lx failed with %ld\n", cpu, hwcpu, addr, ret); |
72 | cpu, hwcpu, addr, ret); | ||
73 | return; | 72 | return; |
74 | } | 73 | } |
75 | /* | 74 | /* |
@@ -80,10 +79,9 @@ void vpa_init(int cpu) | |||
80 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { | 79 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { |
81 | ret = register_slb_shadow(hwcpu, addr); | 80 | ret = register_slb_shadow(hwcpu, addr); |
82 | if (ret) | 81 | if (ret) |
83 | printk(KERN_ERR | 82 | pr_err("WARNING: SLB shadow buffer registration for " |
84 | "WARNING: vpa_init: SLB shadow buffer " | 83 | "cpu %d (hw %d) of area %lx failed with %ld\n", |
85 | "registration for cpu %d (hw %d) of area %lx " | 84 | cpu, hwcpu, addr, ret); |
86 | "returns %ld\n", cpu, hwcpu, addr, ret); | ||
87 | } | 85 | } |
88 | 86 | ||
89 | /* | 87 | /* |
@@ -100,8 +98,9 @@ void vpa_init(int cpu) | |||
100 | dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; | 98 | dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; |
101 | ret = register_dtl(hwcpu, __pa(dtl)); | 99 | ret = register_dtl(hwcpu, __pa(dtl)); |
102 | if (ret) | 100 | if (ret) |
103 | pr_warn("DTL registration failed for cpu %d (%ld)\n", | 101 | pr_err("WARNING: DTL registration of cpu %d (hw %d) " |
104 | cpu, ret); | 102 | "failed with %ld\n", smp_processor_id(), |
103 | hwcpu, ret); | ||
105 | lppaca_of(cpu).dtl_enable_mask = 2; | 104 | lppaca_of(cpu).dtl_enable_mask = 2; |
106 | } | 105 | } |
107 | } | 106 | } |
@@ -204,7 +203,7 @@ static void pSeries_lpar_hptab_clear(void) | |||
204 | unsigned long ptel; | 203 | unsigned long ptel; |
205 | } ptes[4]; | 204 | } ptes[4]; |
206 | long lpar_rc; | 205 | long lpar_rc; |
207 | int i, j; | 206 | unsigned long i, j; |
208 | 207 | ||
209 | /* Read in batches of 4, | 208 | /* Read in batches of 4, |
210 | * invalidate only valid entries not in the VRMA | 209 | * invalidate only valid entries not in the VRMA |
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h index 4bf21207d7d3..41c24c146d6a 100644 --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h | |||
@@ -53,9 +53,9 @@ static inline long vpa_call(unsigned long flags, unsigned long cpu, | |||
53 | return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa); | 53 | return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa); |
54 | } | 54 | } |
55 | 55 | ||
56 | static inline long unregister_vpa(unsigned long cpu, unsigned long vpa) | 56 | static inline long unregister_vpa(unsigned long cpu) |
57 | { | 57 | { |
58 | return vpa_call(0x5, cpu, vpa); | 58 | return vpa_call(0x5, cpu, 0); |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline long register_vpa(unsigned long cpu, unsigned long vpa) | 61 | static inline long register_vpa(unsigned long cpu, unsigned long vpa) |
@@ -63,9 +63,9 @@ static inline long register_vpa(unsigned long cpu, unsigned long vpa) | |||
63 | return vpa_call(0x1, cpu, vpa); | 63 | return vpa_call(0x1, cpu, vpa); |
64 | } | 64 | } |
65 | 65 | ||
66 | static inline long unregister_slb_shadow(unsigned long cpu, unsigned long vpa) | 66 | static inline long unregister_slb_shadow(unsigned long cpu) |
67 | { | 67 | { |
68 | return vpa_call(0x7, cpu, vpa); | 68 | return vpa_call(0x7, cpu, 0); |
69 | } | 69 | } |
70 | 70 | ||
71 | static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa) | 71 | static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa) |
@@ -73,9 +73,9 @@ static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa) | |||
73 | return vpa_call(0x3, cpu, vpa); | 73 | return vpa_call(0x3, cpu, vpa); |
74 | } | 74 | } |
75 | 75 | ||
76 | static inline long unregister_dtl(unsigned long cpu, unsigned long vpa) | 76 | static inline long unregister_dtl(unsigned long cpu) |
77 | { | 77 | { |
78 | return vpa_call(0x6, cpu, vpa); | 78 | return vpa_call(0x6, cpu, 0); |
79 | } | 79 | } |
80 | 80 | ||
81 | static inline long register_dtl(unsigned long cpu, unsigned long vpa) | 81 | static inline long register_dtl(unsigned long cpu, unsigned long vpa) |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index d00e52926b71..0969fd98c4fa 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -324,8 +324,9 @@ static int alloc_dispatch_logs(void) | |||
324 | dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; | 324 | dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; |
325 | ret = register_dtl(hard_smp_processor_id(), __pa(dtl)); | 325 | ret = register_dtl(hard_smp_processor_id(), __pa(dtl)); |
326 | if (ret) | 326 | if (ret) |
327 | pr_warn("DTL registration failed for boot cpu %d (%d)\n", | 327 | pr_err("WARNING: DTL registration of cpu %d (hw %d) failed " |
328 | smp_processor_id(), ret); | 328 | "with %d\n", smp_processor_id(), |
329 | hard_smp_processor_id(), ret); | ||
329 | get_paca()->lppaca_ptr->dtl_enable_mask = 2; | 330 | get_paca()->lppaca_ptr->dtl_enable_mask = 2; |
330 | 331 | ||
331 | return 0; | 332 | return 0; |
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 2de8551df40f..c65f75aa7ff7 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #define ODSR_CLEAR 0x1c00 | 54 | #define ODSR_CLEAR 0x1c00 |
55 | #define LTLEECSR_ENABLE_ALL 0xFFC000FC | 55 | #define LTLEECSR_ENABLE_ALL 0xFFC000FC |
56 | #define ESCSR_CLEAR 0x07120204 | 56 | #define ESCSR_CLEAR 0x07120204 |
57 | #define IECSR_CLEAR 0x80000000 | ||
57 | 58 | ||
58 | #define RIO_PORT1_EDCSR 0x0640 | 59 | #define RIO_PORT1_EDCSR 0x0640 |
59 | #define RIO_PORT2_EDCSR 0x0680 | 60 | #define RIO_PORT2_EDCSR 0x0680 |
@@ -1089,11 +1090,11 @@ static void port_error_handler(struct rio_mport *port, int offset) | |||
1089 | 1090 | ||
1090 | if (offset == 0) { | 1091 | if (offset == 0) { |
1091 | out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0); | 1092 | out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0); |
1092 | out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), 0); | 1093 | out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), IECSR_CLEAR); |
1093 | out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR); | 1094 | out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR); |
1094 | } else { | 1095 | } else { |
1095 | out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0); | 1096 | out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0); |
1096 | out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), 0); | 1097 | out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), IECSR_CLEAR); |
1097 | out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR); | 1098 | out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR); |
1098 | } | 1099 | } |
1099 | } | 1100 | } |
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c index a59ba96d2c21..dbfe96bc878a 100644 --- a/arch/powerpc/sysdev/ppc4xx_pci.c +++ b/arch/powerpc/sysdev/ppc4xx_pci.c | |||
@@ -655,8 +655,6 @@ struct ppc4xx_pciex_hwops | |||
655 | 655 | ||
656 | static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops; | 656 | static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops; |
657 | 657 | ||
658 | #ifdef CONFIG_44x | ||
659 | |||
660 | static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port, | 658 | static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port, |
661 | unsigned int sdr_offset, | 659 | unsigned int sdr_offset, |
662 | unsigned int mask, | 660 | unsigned int mask, |
@@ -688,6 +686,7 @@ static int __init ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port *port) | |||
688 | return 0; | 686 | return 0; |
689 | } | 687 | } |
690 | 688 | ||
689 | |||
691 | static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port) | 690 | static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port) |
692 | { | 691 | { |
693 | printk(KERN_INFO "PCIE%d: Checking link...\n", port->index); | 692 | printk(KERN_INFO "PCIE%d: Checking link...\n", port->index); |
@@ -718,6 +717,8 @@ static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port) | |||
718 | printk(KERN_INFO "PCIE%d: No device detected.\n", port->index); | 717 | printk(KERN_INFO "PCIE%d: No device detected.\n", port->index); |
719 | } | 718 | } |
720 | 719 | ||
720 | #ifdef CONFIG_44x | ||
721 | |||
721 | /* Check various reset bits of the 440SPe PCIe core */ | 722 | /* Check various reset bits of the 440SPe PCIe core */ |
722 | static int __init ppc440spe_pciex_check_reset(struct device_node *np) | 723 | static int __init ppc440spe_pciex_check_reset(struct device_node *np) |
723 | { | 724 | { |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 42c67beadcae..1a6f20d4e7e6 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -55,6 +55,7 @@ config SPARC64 | |||
55 | select PERF_USE_VMALLOC | 55 | select PERF_USE_VMALLOC |
56 | select IRQ_PREFLOW_FASTEOI | 56 | select IRQ_PREFLOW_FASTEOI |
57 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 57 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
58 | select HAVE_C_RECORDMCOUNT | ||
58 | 59 | ||
59 | config ARCH_DEFCONFIG | 60 | config ARCH_DEFCONFIG |
60 | string | 61 | string |
diff --git a/arch/sparc/include/asm/sigcontext.h b/arch/sparc/include/asm/sigcontext.h index a1607d180354..69914d748130 100644 --- a/arch/sparc/include/asm/sigcontext.h +++ b/arch/sparc/include/asm/sigcontext.h | |||
@@ -45,6 +45,19 @@ typedef struct { | |||
45 | int si_mask; | 45 | int si_mask; |
46 | } __siginfo32_t; | 46 | } __siginfo32_t; |
47 | 47 | ||
48 | #define __SIGC_MAXWIN 7 | ||
49 | |||
50 | typedef struct { | ||
51 | unsigned long locals[8]; | ||
52 | unsigned long ins[8]; | ||
53 | } __siginfo_reg_window; | ||
54 | |||
55 | typedef struct { | ||
56 | int wsaved; | ||
57 | __siginfo_reg_window reg_window[__SIGC_MAXWIN]; | ||
58 | unsigned long rwbuf_stkptrs[__SIGC_MAXWIN]; | ||
59 | } __siginfo_rwin_t; | ||
60 | |||
48 | #ifdef CONFIG_SPARC64 | 61 | #ifdef CONFIG_SPARC64 |
49 | typedef struct { | 62 | typedef struct { |
50 | unsigned int si_float_regs [64]; | 63 | unsigned int si_float_regs [64]; |
@@ -73,6 +86,7 @@ struct sigcontext { | |||
73 | unsigned long ss_size; | 86 | unsigned long ss_size; |
74 | } sigc_stack; | 87 | } sigc_stack; |
75 | unsigned long sigc_mask; | 88 | unsigned long sigc_mask; |
89 | __siginfo_rwin_t * sigc_rwin_save; | ||
76 | }; | 90 | }; |
77 | 91 | ||
78 | #else | 92 | #else |
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 5f5b8bf3f50d..bcc98fc35281 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h | |||
@@ -131,6 +131,15 @@ static inline void arch_write_lock(arch_rwlock_t *rw) | |||
131 | *(volatile __u32 *)&lp->lock = ~0U; | 131 | *(volatile __u32 *)&lp->lock = ~0U; |
132 | } | 132 | } |
133 | 133 | ||
134 | static void inline arch_write_unlock(arch_rwlock_t *lock) | ||
135 | { | ||
136 | __asm__ __volatile__( | ||
137 | " st %%g0, [%0]" | ||
138 | : /* no outputs */ | ||
139 | : "r" (lock) | ||
140 | : "memory"); | ||
141 | } | ||
142 | |||
134 | static inline int arch_write_trylock(arch_rwlock_t *rw) | 143 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
135 | { | 144 | { |
136 | unsigned int val; | 145 | unsigned int val; |
@@ -175,8 +184,6 @@ static inline int __arch_read_trylock(arch_rwlock_t *rw) | |||
175 | res; \ | 184 | res; \ |
176 | }) | 185 | }) |
177 | 186 | ||
178 | #define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0) | ||
179 | |||
180 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | 187 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
181 | #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) | 188 | #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) |
182 | #define arch_write_lock_flags(rw, flags) arch_write_lock(rw) | 189 | #define arch_write_lock_flags(rw, flags) arch_write_lock(rw) |
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 073936a8b275..968917694978 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h | |||
@@ -210,14 +210,8 @@ static int inline arch_write_trylock(arch_rwlock_t *lock) | |||
210 | return result; | 210 | return result; |
211 | } | 211 | } |
212 | 212 | ||
213 | #define arch_read_lock(p) arch_read_lock(p) | ||
214 | #define arch_read_lock_flags(p, f) arch_read_lock(p) | 213 | #define arch_read_lock_flags(p, f) arch_read_lock(p) |
215 | #define arch_read_trylock(p) arch_read_trylock(p) | ||
216 | #define arch_read_unlock(p) arch_read_unlock(p) | ||
217 | #define arch_write_lock(p) arch_write_lock(p) | ||
218 | #define arch_write_lock_flags(p, f) arch_write_lock(p) | 214 | #define arch_write_lock_flags(p, f) arch_write_lock(p) |
219 | #define arch_write_unlock(p) arch_write_unlock(p) | ||
220 | #define arch_write_trylock(p) arch_write_trylock(p) | ||
221 | 215 | ||
222 | #define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) | 216 | #define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) |
223 | #define arch_write_can_lock(rw) (!(rw)->lock) | 217 | #define arch_write_can_lock(rw) (!(rw)->lock) |
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index b90b4a1d070a..cb85458f89d2 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile | |||
@@ -32,6 +32,7 @@ obj-$(CONFIG_SPARC32) += sun4m_irq.o sun4c_irq.o sun4d_irq.o | |||
32 | 32 | ||
33 | obj-y += process_$(BITS).o | 33 | obj-y += process_$(BITS).o |
34 | obj-y += signal_$(BITS).o | 34 | obj-y += signal_$(BITS).o |
35 | obj-y += sigutil_$(BITS).o | ||
35 | obj-$(CONFIG_SPARC32) += ioport.o | 36 | obj-$(CONFIG_SPARC32) += ioport.o |
36 | obj-y += setup_$(BITS).o | 37 | obj-y += setup_$(BITS).o |
37 | obj-y += idprom.o | 38 | obj-y += idprom.o |
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index 490e5418740d..7429b47c3aca 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c | |||
@@ -1256,13 +1256,14 @@ static int __init ds_init(void) | |||
1256 | { | 1256 | { |
1257 | unsigned long hv_ret, major, minor; | 1257 | unsigned long hv_ret, major, minor; |
1258 | 1258 | ||
1259 | hv_ret = sun4v_get_version(HV_GRP_REBOOT_DATA, &major, &minor); | 1259 | if (tlb_type == hypervisor) { |
1260 | if (hv_ret == HV_EOK) { | 1260 | hv_ret = sun4v_get_version(HV_GRP_REBOOT_DATA, &major, &minor); |
1261 | pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n", | 1261 | if (hv_ret == HV_EOK) { |
1262 | major, minor); | 1262 | pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n", |
1263 | reboot_data_supported = 1; | 1263 | major, minor); |
1264 | reboot_data_supported = 1; | ||
1265 | } | ||
1264 | } | 1266 | } |
1265 | |||
1266 | kthread_run(ds_thread, NULL, "kldomd"); | 1267 | kthread_run(ds_thread, NULL, "kldomd"); |
1267 | 1268 | ||
1268 | return vio_register_driver(&ds_driver); | 1269 | return vio_register_driver(&ds_driver); |
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index a19f04195478..1aaf8c180be5 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c | |||
@@ -352,8 +352,8 @@ int __init pcic_probe(void) | |||
352 | strcpy(pbm->prom_name, namebuf); | 352 | strcpy(pbm->prom_name, namebuf); |
353 | 353 | ||
354 | { | 354 | { |
355 | extern volatile int t_nmi[1]; | 355 | extern volatile int t_nmi[4]; |
356 | extern int pcic_nmi_trap_patch[1]; | 356 | extern int pcic_nmi_trap_patch[4]; |
357 | 357 | ||
358 | t_nmi[0] = pcic_nmi_trap_patch[0]; | 358 | t_nmi[0] = pcic_nmi_trap_patch[0]; |
359 | t_nmi[1] = pcic_nmi_trap_patch[1]; | 359 | t_nmi[1] = pcic_nmi_trap_patch[1]; |
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 75fad425e249..1ba95aff5d59 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <asm/visasm.h> | 29 | #include <asm/visasm.h> |
30 | #include <asm/compat_signal.h> | 30 | #include <asm/compat_signal.h> |
31 | 31 | ||
32 | #include "sigutil.h" | ||
33 | |||
32 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 34 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
33 | 35 | ||
34 | /* This magic should be in g_upper[0] for all upper parts | 36 | /* This magic should be in g_upper[0] for all upper parts |
@@ -44,14 +46,14 @@ typedef struct { | |||
44 | struct signal_frame32 { | 46 | struct signal_frame32 { |
45 | struct sparc_stackf32 ss; | 47 | struct sparc_stackf32 ss; |
46 | __siginfo32_t info; | 48 | __siginfo32_t info; |
47 | /* __siginfo_fpu32_t * */ u32 fpu_save; | 49 | /* __siginfo_fpu_t * */ u32 fpu_save; |
48 | unsigned int insns[2]; | 50 | unsigned int insns[2]; |
49 | unsigned int extramask[_COMPAT_NSIG_WORDS - 1]; | 51 | unsigned int extramask[_COMPAT_NSIG_WORDS - 1]; |
50 | unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */ | 52 | unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */ |
51 | /* Only valid if (info.si_regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */ | 53 | /* Only valid if (info.si_regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */ |
52 | siginfo_extra_v8plus_t v8plus; | 54 | siginfo_extra_v8plus_t v8plus; |
53 | __siginfo_fpu_t fpu_state; | 55 | /* __siginfo_rwin_t * */u32 rwin_save; |
54 | }; | 56 | } __attribute__((aligned(8))); |
55 | 57 | ||
56 | typedef struct compat_siginfo{ | 58 | typedef struct compat_siginfo{ |
57 | int si_signo; | 59 | int si_signo; |
@@ -110,18 +112,14 @@ struct rt_signal_frame32 { | |||
110 | compat_siginfo_t info; | 112 | compat_siginfo_t info; |
111 | struct pt_regs32 regs; | 113 | struct pt_regs32 regs; |
112 | compat_sigset_t mask; | 114 | compat_sigset_t mask; |
113 | /* __siginfo_fpu32_t * */ u32 fpu_save; | 115 | /* __siginfo_fpu_t * */ u32 fpu_save; |
114 | unsigned int insns[2]; | 116 | unsigned int insns[2]; |
115 | stack_t32 stack; | 117 | stack_t32 stack; |
116 | unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */ | 118 | unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */ |
117 | /* Only valid if (regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */ | 119 | /* Only valid if (regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */ |
118 | siginfo_extra_v8plus_t v8plus; | 120 | siginfo_extra_v8plus_t v8plus; |
119 | __siginfo_fpu_t fpu_state; | 121 | /* __siginfo_rwin_t * */u32 rwin_save; |
120 | }; | 122 | } __attribute__((aligned(8))); |
121 | |||
122 | /* Align macros */ | ||
123 | #define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 15) & (~15))) | ||
124 | #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 15) & (~15))) | ||
125 | 123 | ||
126 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) | 124 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) |
127 | { | 125 | { |
@@ -192,30 +190,13 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) | |||
192 | return 0; | 190 | return 0; |
193 | } | 191 | } |
194 | 192 | ||
195 | static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | ||
196 | { | ||
197 | unsigned long *fpregs = current_thread_info()->fpregs; | ||
198 | unsigned long fprs; | ||
199 | int err; | ||
200 | |||
201 | err = __get_user(fprs, &fpu->si_fprs); | ||
202 | fprs_write(0); | ||
203 | regs->tstate &= ~TSTATE_PEF; | ||
204 | if (fprs & FPRS_DL) | ||
205 | err |= copy_from_user(fpregs, &fpu->si_float_regs[0], (sizeof(unsigned int) * 32)); | ||
206 | if (fprs & FPRS_DU) | ||
207 | err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], (sizeof(unsigned int) * 32)); | ||
208 | err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr); | ||
209 | err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr); | ||
210 | current_thread_info()->fpsaved[0] |= fprs; | ||
211 | return err; | ||
212 | } | ||
213 | |||
214 | void do_sigreturn32(struct pt_regs *regs) | 193 | void do_sigreturn32(struct pt_regs *regs) |
215 | { | 194 | { |
216 | struct signal_frame32 __user *sf; | 195 | struct signal_frame32 __user *sf; |
196 | compat_uptr_t fpu_save; | ||
197 | compat_uptr_t rwin_save; | ||
217 | unsigned int psr; | 198 | unsigned int psr; |
218 | unsigned pc, npc, fpu_save; | 199 | unsigned pc, npc; |
219 | sigset_t set; | 200 | sigset_t set; |
220 | unsigned seta[_COMPAT_NSIG_WORDS]; | 201 | unsigned seta[_COMPAT_NSIG_WORDS]; |
221 | int err, i; | 202 | int err, i; |
@@ -273,8 +254,13 @@ void do_sigreturn32(struct pt_regs *regs) | |||
273 | pt_regs_clear_syscall(regs); | 254 | pt_regs_clear_syscall(regs); |
274 | 255 | ||
275 | err |= __get_user(fpu_save, &sf->fpu_save); | 256 | err |= __get_user(fpu_save, &sf->fpu_save); |
276 | if (fpu_save) | 257 | if (!err && fpu_save) |
277 | err |= restore_fpu_state32(regs, &sf->fpu_state); | 258 | err |= restore_fpu_state(regs, compat_ptr(fpu_save)); |
259 | err |= __get_user(rwin_save, &sf->rwin_save); | ||
260 | if (!err && rwin_save) { | ||
261 | if (restore_rwin_state(compat_ptr(rwin_save))) | ||
262 | goto segv; | ||
263 | } | ||
278 | err |= __get_user(seta[0], &sf->info.si_mask); | 264 | err |= __get_user(seta[0], &sf->info.si_mask); |
279 | err |= copy_from_user(seta+1, &sf->extramask, | 265 | err |= copy_from_user(seta+1, &sf->extramask, |
280 | (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int)); | 266 | (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int)); |
@@ -300,7 +286,9 @@ segv: | |||
300 | asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) | 286 | asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) |
301 | { | 287 | { |
302 | struct rt_signal_frame32 __user *sf; | 288 | struct rt_signal_frame32 __user *sf; |
303 | unsigned int psr, pc, npc, fpu_save, u_ss_sp; | 289 | unsigned int psr, pc, npc, u_ss_sp; |
290 | compat_uptr_t fpu_save; | ||
291 | compat_uptr_t rwin_save; | ||
304 | mm_segment_t old_fs; | 292 | mm_segment_t old_fs; |
305 | sigset_t set; | 293 | sigset_t set; |
306 | compat_sigset_t seta; | 294 | compat_sigset_t seta; |
@@ -359,8 +347,8 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) | |||
359 | pt_regs_clear_syscall(regs); | 347 | pt_regs_clear_syscall(regs); |
360 | 348 | ||
361 | err |= __get_user(fpu_save, &sf->fpu_save); | 349 | err |= __get_user(fpu_save, &sf->fpu_save); |
362 | if (fpu_save) | 350 | if (!err && fpu_save) |
363 | err |= restore_fpu_state32(regs, &sf->fpu_state); | 351 | err |= restore_fpu_state(regs, compat_ptr(fpu_save)); |
364 | err |= copy_from_user(&seta, &sf->mask, sizeof(compat_sigset_t)); | 352 | err |= copy_from_user(&seta, &sf->mask, sizeof(compat_sigset_t)); |
365 | err |= __get_user(u_ss_sp, &sf->stack.ss_sp); | 353 | err |= __get_user(u_ss_sp, &sf->stack.ss_sp); |
366 | st.ss_sp = compat_ptr(u_ss_sp); | 354 | st.ss_sp = compat_ptr(u_ss_sp); |
@@ -376,6 +364,12 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) | |||
376 | do_sigaltstack((stack_t __user *) &st, NULL, (unsigned long)sf); | 364 | do_sigaltstack((stack_t __user *) &st, NULL, (unsigned long)sf); |
377 | set_fs(old_fs); | 365 | set_fs(old_fs); |
378 | 366 | ||
367 | err |= __get_user(rwin_save, &sf->rwin_save); | ||
368 | if (!err && rwin_save) { | ||
369 | if (restore_rwin_state(compat_ptr(rwin_save))) | ||
370 | goto segv; | ||
371 | } | ||
372 | |||
379 | switch (_NSIG_WORDS) { | 373 | switch (_NSIG_WORDS) { |
380 | case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32); | 374 | case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32); |
381 | case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32); | 375 | case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32); |
@@ -433,26 +427,6 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns | |||
433 | return (void __user *) sp; | 427 | return (void __user *) sp; |
434 | } | 428 | } |
435 | 429 | ||
436 | static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | ||
437 | { | ||
438 | unsigned long *fpregs = current_thread_info()->fpregs; | ||
439 | unsigned long fprs; | ||
440 | int err = 0; | ||
441 | |||
442 | fprs = current_thread_info()->fpsaved[0]; | ||
443 | if (fprs & FPRS_DL) | ||
444 | err |= copy_to_user(&fpu->si_float_regs[0], fpregs, | ||
445 | (sizeof(unsigned int) * 32)); | ||
446 | if (fprs & FPRS_DU) | ||
447 | err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16, | ||
448 | (sizeof(unsigned int) * 32)); | ||
449 | err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr); | ||
450 | err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr); | ||
451 | err |= __put_user(fprs, &fpu->si_fprs); | ||
452 | |||
453 | return err; | ||
454 | } | ||
455 | |||
456 | /* The I-cache flush instruction only works in the primary ASI, which | 430 | /* The I-cache flush instruction only works in the primary ASI, which |
457 | * right now is the nucleus, aka. kernel space. | 431 | * right now is the nucleus, aka. kernel space. |
458 | * | 432 | * |
@@ -515,18 +489,23 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
515 | int signo, sigset_t *oldset) | 489 | int signo, sigset_t *oldset) |
516 | { | 490 | { |
517 | struct signal_frame32 __user *sf; | 491 | struct signal_frame32 __user *sf; |
492 | int i, err, wsaved; | ||
493 | void __user *tail; | ||
518 | int sigframe_size; | 494 | int sigframe_size; |
519 | u32 psr; | 495 | u32 psr; |
520 | int i, err; | ||
521 | unsigned int seta[_COMPAT_NSIG_WORDS]; | 496 | unsigned int seta[_COMPAT_NSIG_WORDS]; |
522 | 497 | ||
523 | /* 1. Make sure everything is clean */ | 498 | /* 1. Make sure everything is clean */ |
524 | synchronize_user_stack(); | 499 | synchronize_user_stack(); |
525 | save_and_clear_fpu(); | 500 | save_and_clear_fpu(); |
526 | 501 | ||
527 | sigframe_size = SF_ALIGNEDSZ; | 502 | wsaved = get_thread_wsaved(); |
528 | if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) | 503 | |
529 | sigframe_size -= sizeof(__siginfo_fpu_t); | 504 | sigframe_size = sizeof(*sf); |
505 | if (current_thread_info()->fpsaved[0] & FPRS_FEF) | ||
506 | sigframe_size += sizeof(__siginfo_fpu_t); | ||
507 | if (wsaved) | ||
508 | sigframe_size += sizeof(__siginfo_rwin_t); | ||
530 | 509 | ||
531 | sf = (struct signal_frame32 __user *) | 510 | sf = (struct signal_frame32 __user *) |
532 | get_sigframe(&ka->sa, regs, sigframe_size); | 511 | get_sigframe(&ka->sa, regs, sigframe_size); |
@@ -534,8 +513,7 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
534 | if (invalid_frame_pointer(sf, sigframe_size)) | 513 | if (invalid_frame_pointer(sf, sigframe_size)) |
535 | goto sigill; | 514 | goto sigill; |
536 | 515 | ||
537 | if (get_thread_wsaved() != 0) | 516 | tail = (sf + 1); |
538 | goto sigill; | ||
539 | 517 | ||
540 | /* 2. Save the current process state */ | 518 | /* 2. Save the current process state */ |
541 | if (test_thread_flag(TIF_32BIT)) { | 519 | if (test_thread_flag(TIF_32BIT)) { |
@@ -560,11 +538,22 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
560 | &sf->v8plus.asi); | 538 | &sf->v8plus.asi); |
561 | 539 | ||
562 | if (psr & PSR_EF) { | 540 | if (psr & PSR_EF) { |
563 | err |= save_fpu_state32(regs, &sf->fpu_state); | 541 | __siginfo_fpu_t __user *fp = tail; |
564 | err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save); | 542 | tail += sizeof(*fp); |
543 | err |= save_fpu_state(regs, fp); | ||
544 | err |= __put_user((u64)fp, &sf->fpu_save); | ||
565 | } else { | 545 | } else { |
566 | err |= __put_user(0, &sf->fpu_save); | 546 | err |= __put_user(0, &sf->fpu_save); |
567 | } | 547 | } |
548 | if (wsaved) { | ||
549 | __siginfo_rwin_t __user *rwp = tail; | ||
550 | tail += sizeof(*rwp); | ||
551 | err |= save_rwin_state(wsaved, rwp); | ||
552 | err |= __put_user((u64)rwp, &sf->rwin_save); | ||
553 | set_thread_wsaved(0); | ||
554 | } else { | ||
555 | err |= __put_user(0, &sf->rwin_save); | ||
556 | } | ||
568 | 557 | ||
569 | switch (_NSIG_WORDS) { | 558 | switch (_NSIG_WORDS) { |
570 | case 4: seta[7] = (oldset->sig[3] >> 32); | 559 | case 4: seta[7] = (oldset->sig[3] >> 32); |
@@ -580,10 +569,21 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
580 | err |= __copy_to_user(sf->extramask, seta + 1, | 569 | err |= __copy_to_user(sf->extramask, seta + 1, |
581 | (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int)); | 570 | (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int)); |
582 | 571 | ||
583 | err |= copy_in_user((u32 __user *)sf, | 572 | if (!wsaved) { |
584 | (u32 __user *)(regs->u_regs[UREG_FP]), | 573 | err |= copy_in_user((u32 __user *)sf, |
585 | sizeof(struct reg_window32)); | 574 | (u32 __user *)(regs->u_regs[UREG_FP]), |
586 | 575 | sizeof(struct reg_window32)); | |
576 | } else { | ||
577 | struct reg_window *rp; | ||
578 | |||
579 | rp = ¤t_thread_info()->reg_window[wsaved - 1]; | ||
580 | for (i = 0; i < 8; i++) | ||
581 | err |= __put_user(rp->locals[i], &sf->ss.locals[i]); | ||
582 | for (i = 0; i < 6; i++) | ||
583 | err |= __put_user(rp->ins[i], &sf->ss.ins[i]); | ||
584 | err |= __put_user(rp->ins[6], &sf->ss.fp); | ||
585 | err |= __put_user(rp->ins[7], &sf->ss.callers_pc); | ||
586 | } | ||
587 | if (err) | 587 | if (err) |
588 | goto sigsegv; | 588 | goto sigsegv; |
589 | 589 | ||
@@ -613,7 +613,6 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
613 | err |= __put_user(0x91d02010, &sf->insns[1]); /*t 0x10*/ | 613 | err |= __put_user(0x91d02010, &sf->insns[1]); /*t 0x10*/ |
614 | if (err) | 614 | if (err) |
615 | goto sigsegv; | 615 | goto sigsegv; |
616 | |||
617 | flush_signal_insns(address); | 616 | flush_signal_insns(address); |
618 | } | 617 | } |
619 | return 0; | 618 | return 0; |
@@ -632,18 +631,23 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
632 | siginfo_t *info) | 631 | siginfo_t *info) |
633 | { | 632 | { |
634 | struct rt_signal_frame32 __user *sf; | 633 | struct rt_signal_frame32 __user *sf; |
634 | int i, err, wsaved; | ||
635 | void __user *tail; | ||
635 | int sigframe_size; | 636 | int sigframe_size; |
636 | u32 psr; | 637 | u32 psr; |
637 | int i, err; | ||
638 | compat_sigset_t seta; | 638 | compat_sigset_t seta; |
639 | 639 | ||
640 | /* 1. Make sure everything is clean */ | 640 | /* 1. Make sure everything is clean */ |
641 | synchronize_user_stack(); | 641 | synchronize_user_stack(); |
642 | save_and_clear_fpu(); | 642 | save_and_clear_fpu(); |
643 | 643 | ||
644 | sigframe_size = RT_ALIGNEDSZ; | 644 | wsaved = get_thread_wsaved(); |
645 | if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) | 645 | |
646 | sigframe_size -= sizeof(__siginfo_fpu_t); | 646 | sigframe_size = sizeof(*sf); |
647 | if (current_thread_info()->fpsaved[0] & FPRS_FEF) | ||
648 | sigframe_size += sizeof(__siginfo_fpu_t); | ||
649 | if (wsaved) | ||
650 | sigframe_size += sizeof(__siginfo_rwin_t); | ||
647 | 651 | ||
648 | sf = (struct rt_signal_frame32 __user *) | 652 | sf = (struct rt_signal_frame32 __user *) |
649 | get_sigframe(&ka->sa, regs, sigframe_size); | 653 | get_sigframe(&ka->sa, regs, sigframe_size); |
@@ -651,8 +655,7 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
651 | if (invalid_frame_pointer(sf, sigframe_size)) | 655 | if (invalid_frame_pointer(sf, sigframe_size)) |
652 | goto sigill; | 656 | goto sigill; |
653 | 657 | ||
654 | if (get_thread_wsaved() != 0) | 658 | tail = (sf + 1); |
655 | goto sigill; | ||
656 | 659 | ||
657 | /* 2. Save the current process state */ | 660 | /* 2. Save the current process state */ |
658 | if (test_thread_flag(TIF_32BIT)) { | 661 | if (test_thread_flag(TIF_32BIT)) { |
@@ -677,11 +680,22 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
677 | &sf->v8plus.asi); | 680 | &sf->v8plus.asi); |
678 | 681 | ||
679 | if (psr & PSR_EF) { | 682 | if (psr & PSR_EF) { |
680 | err |= save_fpu_state32(regs, &sf->fpu_state); | 683 | __siginfo_fpu_t __user *fp = tail; |
681 | err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save); | 684 | tail += sizeof(*fp); |
685 | err |= save_fpu_state(regs, fp); | ||
686 | err |= __put_user((u64)fp, &sf->fpu_save); | ||
682 | } else { | 687 | } else { |
683 | err |= __put_user(0, &sf->fpu_save); | 688 | err |= __put_user(0, &sf->fpu_save); |
684 | } | 689 | } |
690 | if (wsaved) { | ||
691 | __siginfo_rwin_t __user *rwp = tail; | ||
692 | tail += sizeof(*rwp); | ||
693 | err |= save_rwin_state(wsaved, rwp); | ||
694 | err |= __put_user((u64)rwp, &sf->rwin_save); | ||
695 | set_thread_wsaved(0); | ||
696 | } else { | ||
697 | err |= __put_user(0, &sf->rwin_save); | ||
698 | } | ||
685 | 699 | ||
686 | /* Update the siginfo structure. */ | 700 | /* Update the siginfo structure. */ |
687 | err |= copy_siginfo_to_user32(&sf->info, info); | 701 | err |= copy_siginfo_to_user32(&sf->info, info); |
@@ -703,9 +717,21 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
703 | } | 717 | } |
704 | err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t)); | 718 | err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t)); |
705 | 719 | ||
706 | err |= copy_in_user((u32 __user *)sf, | 720 | if (!wsaved) { |
707 | (u32 __user *)(regs->u_regs[UREG_FP]), | 721 | err |= copy_in_user((u32 __user *)sf, |
708 | sizeof(struct reg_window32)); | 722 | (u32 __user *)(regs->u_regs[UREG_FP]), |
723 | sizeof(struct reg_window32)); | ||
724 | } else { | ||
725 | struct reg_window *rp; | ||
726 | |||
727 | rp = ¤t_thread_info()->reg_window[wsaved - 1]; | ||
728 | for (i = 0; i < 8; i++) | ||
729 | err |= __put_user(rp->locals[i], &sf->ss.locals[i]); | ||
730 | for (i = 0; i < 6; i++) | ||
731 | err |= __put_user(rp->ins[i], &sf->ss.ins[i]); | ||
732 | err |= __put_user(rp->ins[6], &sf->ss.fp); | ||
733 | err |= __put_user(rp->ins[7], &sf->ss.callers_pc); | ||
734 | } | ||
709 | if (err) | 735 | if (err) |
710 | goto sigsegv; | 736 | goto sigsegv; |
711 | 737 | ||
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 5e5c5fd03783..04ede8f04add 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c | |||
@@ -26,6 +26,8 @@ | |||
26 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
27 | #include <asm/cacheflush.h> /* flush_sig_insns */ | 27 | #include <asm/cacheflush.h> /* flush_sig_insns */ |
28 | 28 | ||
29 | #include "sigutil.h" | ||
30 | |||
29 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 31 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
30 | 32 | ||
31 | extern void fpsave(unsigned long *fpregs, unsigned long *fsr, | 33 | extern void fpsave(unsigned long *fpregs, unsigned long *fsr, |
@@ -39,8 +41,8 @@ struct signal_frame { | |||
39 | unsigned long insns[2] __attribute__ ((aligned (8))); | 41 | unsigned long insns[2] __attribute__ ((aligned (8))); |
40 | unsigned int extramask[_NSIG_WORDS - 1]; | 42 | unsigned int extramask[_NSIG_WORDS - 1]; |
41 | unsigned int extra_size; /* Should be 0 */ | 43 | unsigned int extra_size; /* Should be 0 */ |
42 | __siginfo_fpu_t fpu_state; | 44 | __siginfo_rwin_t __user *rwin_save; |
43 | }; | 45 | } __attribute__((aligned(8))); |
44 | 46 | ||
45 | struct rt_signal_frame { | 47 | struct rt_signal_frame { |
46 | struct sparc_stackf ss; | 48 | struct sparc_stackf ss; |
@@ -51,8 +53,8 @@ struct rt_signal_frame { | |||
51 | unsigned int insns[2]; | 53 | unsigned int insns[2]; |
52 | stack_t stack; | 54 | stack_t stack; |
53 | unsigned int extra_size; /* Should be 0 */ | 55 | unsigned int extra_size; /* Should be 0 */ |
54 | __siginfo_fpu_t fpu_state; | 56 | __siginfo_rwin_t __user *rwin_save; |
55 | }; | 57 | } __attribute__((aligned(8))); |
56 | 58 | ||
57 | /* Align macros */ | 59 | /* Align macros */ |
58 | #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) | 60 | #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) |
@@ -79,43 +81,13 @@ asmlinkage int sys_sigsuspend(old_sigset_t set) | |||
79 | return _sigpause_common(set); | 81 | return _sigpause_common(set); |
80 | } | 82 | } |
81 | 83 | ||
82 | static inline int | ||
83 | restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | ||
84 | { | ||
85 | int err; | ||
86 | #ifdef CONFIG_SMP | ||
87 | if (test_tsk_thread_flag(current, TIF_USEDFPU)) | ||
88 | regs->psr &= ~PSR_EF; | ||
89 | #else | ||
90 | if (current == last_task_used_math) { | ||
91 | last_task_used_math = NULL; | ||
92 | regs->psr &= ~PSR_EF; | ||
93 | } | ||
94 | #endif | ||
95 | set_used_math(); | ||
96 | clear_tsk_thread_flag(current, TIF_USEDFPU); | ||
97 | |||
98 | if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu))) | ||
99 | return -EFAULT; | ||
100 | |||
101 | err = __copy_from_user(¤t->thread.float_regs[0], &fpu->si_float_regs[0], | ||
102 | (sizeof(unsigned long) * 32)); | ||
103 | err |= __get_user(current->thread.fsr, &fpu->si_fsr); | ||
104 | err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); | ||
105 | if (current->thread.fpqdepth != 0) | ||
106 | err |= __copy_from_user(¤t->thread.fpqueue[0], | ||
107 | &fpu->si_fpqueue[0], | ||
108 | ((sizeof(unsigned long) + | ||
109 | (sizeof(unsigned long *)))*16)); | ||
110 | return err; | ||
111 | } | ||
112 | |||
113 | asmlinkage void do_sigreturn(struct pt_regs *regs) | 84 | asmlinkage void do_sigreturn(struct pt_regs *regs) |
114 | { | 85 | { |
115 | struct signal_frame __user *sf; | 86 | struct signal_frame __user *sf; |
116 | unsigned long up_psr, pc, npc; | 87 | unsigned long up_psr, pc, npc; |
117 | sigset_t set; | 88 | sigset_t set; |
118 | __siginfo_fpu_t __user *fpu_save; | 89 | __siginfo_fpu_t __user *fpu_save; |
90 | __siginfo_rwin_t __user *rwin_save; | ||
119 | int err; | 91 | int err; |
120 | 92 | ||
121 | /* Always make any pending restarted system calls return -EINTR */ | 93 | /* Always make any pending restarted system calls return -EINTR */ |
@@ -150,9 +122,11 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) | |||
150 | pt_regs_clear_syscall(regs); | 122 | pt_regs_clear_syscall(regs); |
151 | 123 | ||
152 | err |= __get_user(fpu_save, &sf->fpu_save); | 124 | err |= __get_user(fpu_save, &sf->fpu_save); |
153 | |||
154 | if (fpu_save) | 125 | if (fpu_save) |
155 | err |= restore_fpu_state(regs, fpu_save); | 126 | err |= restore_fpu_state(regs, fpu_save); |
127 | err |= __get_user(rwin_save, &sf->rwin_save); | ||
128 | if (rwin_save) | ||
129 | err |= restore_rwin_state(rwin_save); | ||
156 | 130 | ||
157 | /* This is pretty much atomic, no amount locking would prevent | 131 | /* This is pretty much atomic, no amount locking would prevent |
158 | * the races which exist anyways. | 132 | * the races which exist anyways. |
@@ -180,6 +154,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) | |||
180 | struct rt_signal_frame __user *sf; | 154 | struct rt_signal_frame __user *sf; |
181 | unsigned int psr, pc, npc; | 155 | unsigned int psr, pc, npc; |
182 | __siginfo_fpu_t __user *fpu_save; | 156 | __siginfo_fpu_t __user *fpu_save; |
157 | __siginfo_rwin_t __user *rwin_save; | ||
183 | mm_segment_t old_fs; | 158 | mm_segment_t old_fs; |
184 | sigset_t set; | 159 | sigset_t set; |
185 | stack_t st; | 160 | stack_t st; |
@@ -207,8 +182,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) | |||
207 | pt_regs_clear_syscall(regs); | 182 | pt_regs_clear_syscall(regs); |
208 | 183 | ||
209 | err |= __get_user(fpu_save, &sf->fpu_save); | 184 | err |= __get_user(fpu_save, &sf->fpu_save); |
210 | 185 | if (!err && fpu_save) | |
211 | if (fpu_save) | ||
212 | err |= restore_fpu_state(regs, fpu_save); | 186 | err |= restore_fpu_state(regs, fpu_save); |
213 | err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); | 187 | err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); |
214 | 188 | ||
@@ -228,6 +202,12 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) | |||
228 | do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf); | 202 | do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf); |
229 | set_fs(old_fs); | 203 | set_fs(old_fs); |
230 | 204 | ||
205 | err |= __get_user(rwin_save, &sf->rwin_save); | ||
206 | if (!err && rwin_save) { | ||
207 | if (restore_rwin_state(rwin_save)) | ||
208 | goto segv; | ||
209 | } | ||
210 | |||
231 | sigdelsetmask(&set, ~_BLOCKABLE); | 211 | sigdelsetmask(&set, ~_BLOCKABLE); |
232 | spin_lock_irq(¤t->sighand->siglock); | 212 | spin_lock_irq(¤t->sighand->siglock); |
233 | current->blocked = set; | 213 | current->blocked = set; |
@@ -280,53 +260,23 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re | |||
280 | return (void __user *) sp; | 260 | return (void __user *) sp; |
281 | } | 261 | } |
282 | 262 | ||
283 | static inline int | ||
284 | save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | ||
285 | { | ||
286 | int err = 0; | ||
287 | #ifdef CONFIG_SMP | ||
288 | if (test_tsk_thread_flag(current, TIF_USEDFPU)) { | ||
289 | put_psr(get_psr() | PSR_EF); | ||
290 | fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, | ||
291 | ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); | ||
292 | regs->psr &= ~(PSR_EF); | ||
293 | clear_tsk_thread_flag(current, TIF_USEDFPU); | ||
294 | } | ||
295 | #else | ||
296 | if (current == last_task_used_math) { | ||
297 | put_psr(get_psr() | PSR_EF); | ||
298 | fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, | ||
299 | ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); | ||
300 | last_task_used_math = NULL; | ||
301 | regs->psr &= ~(PSR_EF); | ||
302 | } | ||
303 | #endif | ||
304 | err |= __copy_to_user(&fpu->si_float_regs[0], | ||
305 | ¤t->thread.float_regs[0], | ||
306 | (sizeof(unsigned long) * 32)); | ||
307 | err |= __put_user(current->thread.fsr, &fpu->si_fsr); | ||
308 | err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); | ||
309 | if (current->thread.fpqdepth != 0) | ||
310 | err |= __copy_to_user(&fpu->si_fpqueue[0], | ||
311 | ¤t->thread.fpqueue[0], | ||
312 | ((sizeof(unsigned long) + | ||
313 | (sizeof(unsigned long *)))*16)); | ||
314 | clear_used_math(); | ||
315 | return err; | ||
316 | } | ||
317 | |||
318 | static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs, | 263 | static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs, |
319 | int signo, sigset_t *oldset) | 264 | int signo, sigset_t *oldset) |
320 | { | 265 | { |
321 | struct signal_frame __user *sf; | 266 | struct signal_frame __user *sf; |
322 | int sigframe_size, err; | 267 | int sigframe_size, err, wsaved; |
268 | void __user *tail; | ||
323 | 269 | ||
324 | /* 1. Make sure everything is clean */ | 270 | /* 1. Make sure everything is clean */ |
325 | synchronize_user_stack(); | 271 | synchronize_user_stack(); |
326 | 272 | ||
327 | sigframe_size = SF_ALIGNEDSZ; | 273 | wsaved = current_thread_info()->w_saved; |
328 | if (!used_math()) | 274 | |
329 | sigframe_size -= sizeof(__siginfo_fpu_t); | 275 | sigframe_size = sizeof(*sf); |
276 | if (used_math()) | ||
277 | sigframe_size += sizeof(__siginfo_fpu_t); | ||
278 | if (wsaved) | ||
279 | sigframe_size += sizeof(__siginfo_rwin_t); | ||
330 | 280 | ||
331 | sf = (struct signal_frame __user *) | 281 | sf = (struct signal_frame __user *) |
332 | get_sigframe(&ka->sa, regs, sigframe_size); | 282 | get_sigframe(&ka->sa, regs, sigframe_size); |
@@ -334,8 +284,7 @@ static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs, | |||
334 | if (invalid_frame_pointer(sf, sigframe_size)) | 284 | if (invalid_frame_pointer(sf, sigframe_size)) |
335 | goto sigill_and_return; | 285 | goto sigill_and_return; |
336 | 286 | ||
337 | if (current_thread_info()->w_saved != 0) | 287 | tail = sf + 1; |
338 | goto sigill_and_return; | ||
339 | 288 | ||
340 | /* 2. Save the current process state */ | 289 | /* 2. Save the current process state */ |
341 | err = __copy_to_user(&sf->info.si_regs, regs, sizeof(struct pt_regs)); | 290 | err = __copy_to_user(&sf->info.si_regs, regs, sizeof(struct pt_regs)); |
@@ -343,17 +292,34 @@ static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs, | |||
343 | err |= __put_user(0, &sf->extra_size); | 292 | err |= __put_user(0, &sf->extra_size); |
344 | 293 | ||
345 | if (used_math()) { | 294 | if (used_math()) { |
346 | err |= save_fpu_state(regs, &sf->fpu_state); | 295 | __siginfo_fpu_t __user *fp = tail; |
347 | err |= __put_user(&sf->fpu_state, &sf->fpu_save); | 296 | tail += sizeof(*fp); |
297 | err |= save_fpu_state(regs, fp); | ||
298 | err |= __put_user(fp, &sf->fpu_save); | ||
348 | } else { | 299 | } else { |
349 | err |= __put_user(0, &sf->fpu_save); | 300 | err |= __put_user(0, &sf->fpu_save); |
350 | } | 301 | } |
302 | if (wsaved) { | ||
303 | __siginfo_rwin_t __user *rwp = tail; | ||
304 | tail += sizeof(*rwp); | ||
305 | err |= save_rwin_state(wsaved, rwp); | ||
306 | err |= __put_user(rwp, &sf->rwin_save); | ||
307 | } else { | ||
308 | err |= __put_user(0, &sf->rwin_save); | ||
309 | } | ||
351 | 310 | ||
352 | err |= __put_user(oldset->sig[0], &sf->info.si_mask); | 311 | err |= __put_user(oldset->sig[0], &sf->info.si_mask); |
353 | err |= __copy_to_user(sf->extramask, &oldset->sig[1], | 312 | err |= __copy_to_user(sf->extramask, &oldset->sig[1], |
354 | (_NSIG_WORDS - 1) * sizeof(unsigned int)); | 313 | (_NSIG_WORDS - 1) * sizeof(unsigned int)); |
355 | err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], | 314 | if (!wsaved) { |
356 | sizeof(struct reg_window32)); | 315 | err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], |
316 | sizeof(struct reg_window32)); | ||
317 | } else { | ||
318 | struct reg_window32 *rp; | ||
319 | |||
320 | rp = ¤t_thread_info()->reg_window[wsaved - 1]; | ||
321 | err |= __copy_to_user(sf, rp, sizeof(struct reg_window32)); | ||
322 | } | ||
357 | if (err) | 323 | if (err) |
358 | goto sigsegv; | 324 | goto sigsegv; |
359 | 325 | ||
@@ -399,21 +365,24 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, | |||
399 | int signo, sigset_t *oldset, siginfo_t *info) | 365 | int signo, sigset_t *oldset, siginfo_t *info) |
400 | { | 366 | { |
401 | struct rt_signal_frame __user *sf; | 367 | struct rt_signal_frame __user *sf; |
402 | int sigframe_size; | 368 | int sigframe_size, wsaved; |
369 | void __user *tail; | ||
403 | unsigned int psr; | 370 | unsigned int psr; |
404 | int err; | 371 | int err; |
405 | 372 | ||
406 | synchronize_user_stack(); | 373 | synchronize_user_stack(); |
407 | sigframe_size = RT_ALIGNEDSZ; | 374 | wsaved = current_thread_info()->w_saved; |
408 | if (!used_math()) | 375 | sigframe_size = sizeof(*sf); |
409 | sigframe_size -= sizeof(__siginfo_fpu_t); | 376 | if (used_math()) |
377 | sigframe_size += sizeof(__siginfo_fpu_t); | ||
378 | if (wsaved) | ||
379 | sigframe_size += sizeof(__siginfo_rwin_t); | ||
410 | sf = (struct rt_signal_frame __user *) | 380 | sf = (struct rt_signal_frame __user *) |
411 | get_sigframe(&ka->sa, regs, sigframe_size); | 381 | get_sigframe(&ka->sa, regs, sigframe_size); |
412 | if (invalid_frame_pointer(sf, sigframe_size)) | 382 | if (invalid_frame_pointer(sf, sigframe_size)) |
413 | goto sigill; | 383 | goto sigill; |
414 | if (current_thread_info()->w_saved != 0) | ||
415 | goto sigill; | ||
416 | 384 | ||
385 | tail = sf + 1; | ||
417 | err = __put_user(regs->pc, &sf->regs.pc); | 386 | err = __put_user(regs->pc, &sf->regs.pc); |
418 | err |= __put_user(regs->npc, &sf->regs.npc); | 387 | err |= __put_user(regs->npc, &sf->regs.npc); |
419 | err |= __put_user(regs->y, &sf->regs.y); | 388 | err |= __put_user(regs->y, &sf->regs.y); |
@@ -425,11 +394,21 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, | |||
425 | err |= __put_user(0, &sf->extra_size); | 394 | err |= __put_user(0, &sf->extra_size); |
426 | 395 | ||
427 | if (psr & PSR_EF) { | 396 | if (psr & PSR_EF) { |
428 | err |= save_fpu_state(regs, &sf->fpu_state); | 397 | __siginfo_fpu_t *fp = tail; |
429 | err |= __put_user(&sf->fpu_state, &sf->fpu_save); | 398 | tail += sizeof(*fp); |
399 | err |= save_fpu_state(regs, fp); | ||
400 | err |= __put_user(fp, &sf->fpu_save); | ||
430 | } else { | 401 | } else { |
431 | err |= __put_user(0, &sf->fpu_save); | 402 | err |= __put_user(0, &sf->fpu_save); |
432 | } | 403 | } |
404 | if (wsaved) { | ||
405 | __siginfo_rwin_t *rwp = tail; | ||
406 | tail += sizeof(*rwp); | ||
407 | err |= save_rwin_state(wsaved, rwp); | ||
408 | err |= __put_user(rwp, &sf->rwin_save); | ||
409 | } else { | ||
410 | err |= __put_user(0, &sf->rwin_save); | ||
411 | } | ||
433 | err |= __copy_to_user(&sf->mask, &oldset->sig[0], sizeof(sigset_t)); | 412 | err |= __copy_to_user(&sf->mask, &oldset->sig[0], sizeof(sigset_t)); |
434 | 413 | ||
435 | /* Setup sigaltstack */ | 414 | /* Setup sigaltstack */ |
@@ -437,8 +416,15 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, | |||
437 | err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags); | 416 | err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags); |
438 | err |= __put_user(current->sas_ss_size, &sf->stack.ss_size); | 417 | err |= __put_user(current->sas_ss_size, &sf->stack.ss_size); |
439 | 418 | ||
440 | err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], | 419 | if (!wsaved) { |
441 | sizeof(struct reg_window32)); | 420 | err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], |
421 | sizeof(struct reg_window32)); | ||
422 | } else { | ||
423 | struct reg_window32 *rp; | ||
424 | |||
425 | rp = ¤t_thread_info()->reg_window[wsaved - 1]; | ||
426 | err |= __copy_to_user(sf, rp, sizeof(struct reg_window32)); | ||
427 | } | ||
442 | 428 | ||
443 | err |= copy_siginfo_to_user(&sf->info, info); | 429 | err |= copy_siginfo_to_user(&sf->info, info); |
444 | 430 | ||
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 006fe4515886..47509df3b893 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c | |||
@@ -34,6 +34,7 @@ | |||
34 | 34 | ||
35 | #include "entry.h" | 35 | #include "entry.h" |
36 | #include "systbls.h" | 36 | #include "systbls.h" |
37 | #include "sigutil.h" | ||
37 | 38 | ||
38 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 39 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
39 | 40 | ||
@@ -236,7 +237,7 @@ struct rt_signal_frame { | |||
236 | __siginfo_fpu_t __user *fpu_save; | 237 | __siginfo_fpu_t __user *fpu_save; |
237 | stack_t stack; | 238 | stack_t stack; |
238 | sigset_t mask; | 239 | sigset_t mask; |
239 | __siginfo_fpu_t fpu_state; | 240 | __siginfo_rwin_t *rwin_save; |
240 | }; | 241 | }; |
241 | 242 | ||
242 | static long _sigpause_common(old_sigset_t set) | 243 | static long _sigpause_common(old_sigset_t set) |
@@ -266,33 +267,12 @@ asmlinkage long sys_sigsuspend(old_sigset_t set) | |||
266 | return _sigpause_common(set); | 267 | return _sigpause_common(set); |
267 | } | 268 | } |
268 | 269 | ||
269 | static inline int | ||
270 | restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | ||
271 | { | ||
272 | unsigned long *fpregs = current_thread_info()->fpregs; | ||
273 | unsigned long fprs; | ||
274 | int err; | ||
275 | |||
276 | err = __get_user(fprs, &fpu->si_fprs); | ||
277 | fprs_write(0); | ||
278 | regs->tstate &= ~TSTATE_PEF; | ||
279 | if (fprs & FPRS_DL) | ||
280 | err |= copy_from_user(fpregs, &fpu->si_float_regs[0], | ||
281 | (sizeof(unsigned int) * 32)); | ||
282 | if (fprs & FPRS_DU) | ||
283 | err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], | ||
284 | (sizeof(unsigned int) * 32)); | ||
285 | err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr); | ||
286 | err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr); | ||
287 | current_thread_info()->fpsaved[0] |= fprs; | ||
288 | return err; | ||
289 | } | ||
290 | |||
291 | void do_rt_sigreturn(struct pt_regs *regs) | 270 | void do_rt_sigreturn(struct pt_regs *regs) |
292 | { | 271 | { |
293 | struct rt_signal_frame __user *sf; | 272 | struct rt_signal_frame __user *sf; |
294 | unsigned long tpc, tnpc, tstate; | 273 | unsigned long tpc, tnpc, tstate; |
295 | __siginfo_fpu_t __user *fpu_save; | 274 | __siginfo_fpu_t __user *fpu_save; |
275 | __siginfo_rwin_t __user *rwin_save; | ||
296 | sigset_t set; | 276 | sigset_t set; |
297 | int err; | 277 | int err; |
298 | 278 | ||
@@ -325,8 +305,8 @@ void do_rt_sigreturn(struct pt_regs *regs) | |||
325 | regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC)); | 305 | regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC)); |
326 | 306 | ||
327 | err |= __get_user(fpu_save, &sf->fpu_save); | 307 | err |= __get_user(fpu_save, &sf->fpu_save); |
328 | if (fpu_save) | 308 | if (!err && fpu_save) |
329 | err |= restore_fpu_state(regs, &sf->fpu_state); | 309 | err |= restore_fpu_state(regs, fpu_save); |
330 | 310 | ||
331 | err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); | 311 | err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); |
332 | err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf); | 312 | err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf); |
@@ -334,6 +314,12 @@ void do_rt_sigreturn(struct pt_regs *regs) | |||
334 | if (err) | 314 | if (err) |
335 | goto segv; | 315 | goto segv; |
336 | 316 | ||
317 | err |= __get_user(rwin_save, &sf->rwin_save); | ||
318 | if (!err && rwin_save) { | ||
319 | if (restore_rwin_state(rwin_save)) | ||
320 | goto segv; | ||
321 | } | ||
322 | |||
337 | regs->tpc = tpc; | 323 | regs->tpc = tpc; |
338 | regs->tnpc = tnpc; | 324 | regs->tnpc = tnpc; |
339 | 325 | ||
@@ -351,34 +337,13 @@ segv: | |||
351 | } | 337 | } |
352 | 338 | ||
353 | /* Checks if the fp is valid */ | 339 | /* Checks if the fp is valid */ |
354 | static int invalid_frame_pointer(void __user *fp, int fplen) | 340 | static int invalid_frame_pointer(void __user *fp) |
355 | { | 341 | { |
356 | if (((unsigned long) fp) & 15) | 342 | if (((unsigned long) fp) & 15) |
357 | return 1; | 343 | return 1; |
358 | return 0; | 344 | return 0; |
359 | } | 345 | } |
360 | 346 | ||
361 | static inline int | ||
362 | save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | ||
363 | { | ||
364 | unsigned long *fpregs = current_thread_info()->fpregs; | ||
365 | unsigned long fprs; | ||
366 | int err = 0; | ||
367 | |||
368 | fprs = current_thread_info()->fpsaved[0]; | ||
369 | if (fprs & FPRS_DL) | ||
370 | err |= copy_to_user(&fpu->si_float_regs[0], fpregs, | ||
371 | (sizeof(unsigned int) * 32)); | ||
372 | if (fprs & FPRS_DU) | ||
373 | err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16, | ||
374 | (sizeof(unsigned int) * 32)); | ||
375 | err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr); | ||
376 | err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr); | ||
377 | err |= __put_user(fprs, &fpu->si_fprs); | ||
378 | |||
379 | return err; | ||
380 | } | ||
381 | |||
382 | static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize) | 347 | static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize) |
383 | { | 348 | { |
384 | unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; | 349 | unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; |
@@ -414,34 +379,48 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, | |||
414 | int signo, sigset_t *oldset, siginfo_t *info) | 379 | int signo, sigset_t *oldset, siginfo_t *info) |
415 | { | 380 | { |
416 | struct rt_signal_frame __user *sf; | 381 | struct rt_signal_frame __user *sf; |
417 | int sigframe_size, err; | 382 | int wsaved, err, sf_size; |
383 | void __user *tail; | ||
418 | 384 | ||
419 | /* 1. Make sure everything is clean */ | 385 | /* 1. Make sure everything is clean */ |
420 | synchronize_user_stack(); | 386 | synchronize_user_stack(); |
421 | save_and_clear_fpu(); | 387 | save_and_clear_fpu(); |
422 | 388 | ||
423 | sigframe_size = sizeof(struct rt_signal_frame); | 389 | wsaved = get_thread_wsaved(); |
424 | if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) | ||
425 | sigframe_size -= sizeof(__siginfo_fpu_t); | ||
426 | 390 | ||
391 | sf_size = sizeof(struct rt_signal_frame); | ||
392 | if (current_thread_info()->fpsaved[0] & FPRS_FEF) | ||
393 | sf_size += sizeof(__siginfo_fpu_t); | ||
394 | if (wsaved) | ||
395 | sf_size += sizeof(__siginfo_rwin_t); | ||
427 | sf = (struct rt_signal_frame __user *) | 396 | sf = (struct rt_signal_frame __user *) |
428 | get_sigframe(ka, regs, sigframe_size); | 397 | get_sigframe(ka, regs, sf_size); |
429 | |||
430 | if (invalid_frame_pointer (sf, sigframe_size)) | ||
431 | goto sigill; | ||
432 | 398 | ||
433 | if (get_thread_wsaved() != 0) | 399 | if (invalid_frame_pointer (sf)) |
434 | goto sigill; | 400 | goto sigill; |
435 | 401 | ||
402 | tail = (sf + 1); | ||
403 | |||
436 | /* 2. Save the current process state */ | 404 | /* 2. Save the current process state */ |
437 | err = copy_to_user(&sf->regs, regs, sizeof (*regs)); | 405 | err = copy_to_user(&sf->regs, regs, sizeof (*regs)); |
438 | 406 | ||
439 | if (current_thread_info()->fpsaved[0] & FPRS_FEF) { | 407 | if (current_thread_info()->fpsaved[0] & FPRS_FEF) { |
440 | err |= save_fpu_state(regs, &sf->fpu_state); | 408 | __siginfo_fpu_t __user *fpu_save = tail; |
441 | err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save); | 409 | tail += sizeof(__siginfo_fpu_t); |
410 | err |= save_fpu_state(regs, fpu_save); | ||
411 | err |= __put_user((u64)fpu_save, &sf->fpu_save); | ||
442 | } else { | 412 | } else { |
443 | err |= __put_user(0, &sf->fpu_save); | 413 | err |= __put_user(0, &sf->fpu_save); |
444 | } | 414 | } |
415 | if (wsaved) { | ||
416 | __siginfo_rwin_t __user *rwin_save = tail; | ||
417 | tail += sizeof(__siginfo_rwin_t); | ||
418 | err |= save_rwin_state(wsaved, rwin_save); | ||
419 | err |= __put_user((u64)rwin_save, &sf->rwin_save); | ||
420 | set_thread_wsaved(0); | ||
421 | } else { | ||
422 | err |= __put_user(0, &sf->rwin_save); | ||
423 | } | ||
445 | 424 | ||
446 | /* Setup sigaltstack */ | 425 | /* Setup sigaltstack */ |
447 | err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp); | 426 | err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp); |
@@ -450,10 +429,17 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, | |||
450 | 429 | ||
451 | err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t)); | 430 | err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t)); |
452 | 431 | ||
453 | err |= copy_in_user((u64 __user *)sf, | 432 | if (!wsaved) { |
454 | (u64 __user *)(regs->u_regs[UREG_FP]+STACK_BIAS), | 433 | err |= copy_in_user((u64 __user *)sf, |
455 | sizeof(struct reg_window)); | 434 | (u64 __user *)(regs->u_regs[UREG_FP] + |
435 | STACK_BIAS), | ||
436 | sizeof(struct reg_window)); | ||
437 | } else { | ||
438 | struct reg_window *rp; | ||
456 | 439 | ||
440 | rp = ¤t_thread_info()->reg_window[wsaved - 1]; | ||
441 | err |= copy_to_user(sf, rp, sizeof(struct reg_window)); | ||
442 | } | ||
457 | if (info) | 443 | if (info) |
458 | err |= copy_siginfo_to_user(&sf->info, info); | 444 | err |= copy_siginfo_to_user(&sf->info, info); |
459 | else { | 445 | else { |
diff --git a/arch/sparc/kernel/sigutil.h b/arch/sparc/kernel/sigutil.h new file mode 100644 index 000000000000..d223aa432bb6 --- /dev/null +++ b/arch/sparc/kernel/sigutil.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef _SIGUTIL_H | ||
2 | #define _SIGUTIL_H | ||
3 | |||
4 | int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu); | ||
5 | int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu); | ||
6 | int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin); | ||
7 | int restore_rwin_state(__siginfo_rwin_t __user *rp); | ||
8 | |||
9 | #endif /* _SIGUTIL_H */ | ||
diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c new file mode 100644 index 000000000000..35c7897b009a --- /dev/null +++ b/arch/sparc/kernel/sigutil_32.c | |||
@@ -0,0 +1,120 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/types.h> | ||
3 | #include <linux/thread_info.h> | ||
4 | #include <linux/uaccess.h> | ||
5 | #include <linux/sched.h> | ||
6 | |||
7 | #include <asm/sigcontext.h> | ||
8 | #include <asm/fpumacro.h> | ||
9 | #include <asm/ptrace.h> | ||
10 | |||
11 | #include "sigutil.h" | ||
12 | |||
13 | int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | ||
14 | { | ||
15 | int err = 0; | ||
16 | #ifdef CONFIG_SMP | ||
17 | if (test_tsk_thread_flag(current, TIF_USEDFPU)) { | ||
18 | put_psr(get_psr() | PSR_EF); | ||
19 | fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, | ||
20 | ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); | ||
21 | regs->psr &= ~(PSR_EF); | ||
22 | clear_tsk_thread_flag(current, TIF_USEDFPU); | ||
23 | } | ||
24 | #else | ||
25 | if (current == last_task_used_math) { | ||
26 | put_psr(get_psr() | PSR_EF); | ||
27 | fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, | ||
28 | ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); | ||
29 | last_task_used_math = NULL; | ||
30 | regs->psr &= ~(PSR_EF); | ||
31 | } | ||
32 | #endif | ||
33 | err |= __copy_to_user(&fpu->si_float_regs[0], | ||
34 | ¤t->thread.float_regs[0], | ||
35 | (sizeof(unsigned long) * 32)); | ||
36 | err |= __put_user(current->thread.fsr, &fpu->si_fsr); | ||
37 | err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); | ||
38 | if (current->thread.fpqdepth != 0) | ||
39 | err |= __copy_to_user(&fpu->si_fpqueue[0], | ||
40 | ¤t->thread.fpqueue[0], | ||
41 | ((sizeof(unsigned long) + | ||
42 | (sizeof(unsigned long *)))*16)); | ||
43 | clear_used_math(); | ||
44 | return err; | ||
45 | } | ||
46 | |||
47 | int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | ||
48 | { | ||
49 | int err; | ||
50 | #ifdef CONFIG_SMP | ||
51 | if (test_tsk_thread_flag(current, TIF_USEDFPU)) | ||
52 | regs->psr &= ~PSR_EF; | ||
53 | #else | ||
54 | if (current == last_task_used_math) { | ||
55 | last_task_used_math = NULL; | ||
56 | regs->psr &= ~PSR_EF; | ||
57 | } | ||
58 | #endif | ||
59 | set_used_math(); | ||
60 | clear_tsk_thread_flag(current, TIF_USEDFPU); | ||
61 | |||
62 | if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu))) | ||
63 | return -EFAULT; | ||
64 | |||
65 | err = __copy_from_user(¤t->thread.float_regs[0], &fpu->si_float_regs[0], | ||
66 | (sizeof(unsigned long) * 32)); | ||
67 | err |= __get_user(current->thread.fsr, &fpu->si_fsr); | ||
68 | err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); | ||
69 | if (current->thread.fpqdepth != 0) | ||
70 | err |= __copy_from_user(¤t->thread.fpqueue[0], | ||
71 | &fpu->si_fpqueue[0], | ||
72 | ((sizeof(unsigned long) + | ||
73 | (sizeof(unsigned long *)))*16)); | ||
74 | return err; | ||
75 | } | ||
76 | |||
77 | int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin) | ||
78 | { | ||
79 | int i, err = __put_user(wsaved, &rwin->wsaved); | ||
80 | |||
81 | for (i = 0; i < wsaved; i++) { | ||
82 | struct reg_window32 *rp; | ||
83 | unsigned long fp; | ||
84 | |||
85 | rp = ¤t_thread_info()->reg_window[i]; | ||
86 | fp = current_thread_info()->rwbuf_stkptrs[i]; | ||
87 | err |= copy_to_user(&rwin->reg_window[i], rp, | ||
88 | sizeof(struct reg_window32)); | ||
89 | err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]); | ||
90 | } | ||
91 | return err; | ||
92 | } | ||
93 | |||
94 | int restore_rwin_state(__siginfo_rwin_t __user *rp) | ||
95 | { | ||
96 | struct thread_info *t = current_thread_info(); | ||
97 | int i, wsaved, err; | ||
98 | |||
99 | __get_user(wsaved, &rp->wsaved); | ||
100 | if (wsaved > NSWINS) | ||
101 | return -EFAULT; | ||
102 | |||
103 | err = 0; | ||
104 | for (i = 0; i < wsaved; i++) { | ||
105 | err |= copy_from_user(&t->reg_window[i], | ||
106 | &rp->reg_window[i], | ||
107 | sizeof(struct reg_window32)); | ||
108 | err |= __get_user(t->rwbuf_stkptrs[i], | ||
109 | &rp->rwbuf_stkptrs[i]); | ||
110 | } | ||
111 | if (err) | ||
112 | return err; | ||
113 | |||
114 | t->w_saved = wsaved; | ||
115 | synchronize_user_stack(); | ||
116 | if (t->w_saved) | ||
117 | return -EFAULT; | ||
118 | return 0; | ||
119 | |||
120 | } | ||
diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c new file mode 100644 index 000000000000..e7dc508c38eb --- /dev/null +++ b/arch/sparc/kernel/sigutil_64.c | |||
@@ -0,0 +1,93 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/types.h> | ||
3 | #include <linux/thread_info.h> | ||
4 | #include <linux/uaccess.h> | ||
5 | |||
6 | #include <asm/sigcontext.h> | ||
7 | #include <asm/fpumacro.h> | ||
8 | #include <asm/ptrace.h> | ||
9 | |||
10 | #include "sigutil.h" | ||
11 | |||
12 | int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | ||
13 | { | ||
14 | unsigned long *fpregs = current_thread_info()->fpregs; | ||
15 | unsigned long fprs; | ||
16 | int err = 0; | ||
17 | |||
18 | fprs = current_thread_info()->fpsaved[0]; | ||
19 | if (fprs & FPRS_DL) | ||
20 | err |= copy_to_user(&fpu->si_float_regs[0], fpregs, | ||
21 | (sizeof(unsigned int) * 32)); | ||
22 | if (fprs & FPRS_DU) | ||
23 | err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16, | ||
24 | (sizeof(unsigned int) * 32)); | ||
25 | err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr); | ||
26 | err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr); | ||
27 | err |= __put_user(fprs, &fpu->si_fprs); | ||
28 | |||
29 | return err; | ||
30 | } | ||
31 | |||
32 | int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | ||
33 | { | ||
34 | unsigned long *fpregs = current_thread_info()->fpregs; | ||
35 | unsigned long fprs; | ||
36 | int err; | ||
37 | |||
38 | err = __get_user(fprs, &fpu->si_fprs); | ||
39 | fprs_write(0); | ||
40 | regs->tstate &= ~TSTATE_PEF; | ||
41 | if (fprs & FPRS_DL) | ||
42 | err |= copy_from_user(fpregs, &fpu->si_float_regs[0], | ||
43 | (sizeof(unsigned int) * 32)); | ||
44 | if (fprs & FPRS_DU) | ||
45 | err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], | ||
46 | (sizeof(unsigned int) * 32)); | ||
47 | err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr); | ||
48 | err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr); | ||
49 | current_thread_info()->fpsaved[0] |= fprs; | ||
50 | return err; | ||
51 | } | ||
52 | |||
53 | int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin) | ||
54 | { | ||
55 | int i, err = __put_user(wsaved, &rwin->wsaved); | ||
56 | |||
57 | for (i = 0; i < wsaved; i++) { | ||
58 | struct reg_window *rp = ¤t_thread_info()->reg_window[i]; | ||
59 | unsigned long fp = current_thread_info()->rwbuf_stkptrs[i]; | ||
60 | |||
61 | err |= copy_to_user(&rwin->reg_window[i], rp, | ||
62 | sizeof(struct reg_window)); | ||
63 | err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]); | ||
64 | } | ||
65 | return err; | ||
66 | } | ||
67 | |||
68 | int restore_rwin_state(__siginfo_rwin_t __user *rp) | ||
69 | { | ||
70 | struct thread_info *t = current_thread_info(); | ||
71 | int i, wsaved, err; | ||
72 | |||
73 | __get_user(wsaved, &rp->wsaved); | ||
74 | if (wsaved > NSWINS) | ||
75 | return -EFAULT; | ||
76 | |||
77 | err = 0; | ||
78 | for (i = 0; i < wsaved; i++) { | ||
79 | err |= copy_from_user(&t->reg_window[i], | ||
80 | &rp->reg_window[i], | ||
81 | sizeof(struct reg_window)); | ||
82 | err |= __get_user(t->rwbuf_stkptrs[i], | ||
83 | &rp->rwbuf_stkptrs[i]); | ||
84 | } | ||
85 | if (err) | ||
86 | return err; | ||
87 | |||
88 | set_thread_wsaved(wsaved); | ||
89 | synchronize_user_stack(); | ||
90 | if (get_thread_wsaved()) | ||
91 | return -EFAULT; | ||
92 | return 0; | ||
93 | } | ||
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 7b439d9aea2a..41935fadfdfc 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h | |||
@@ -27,8 +27,8 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in | |||
27 | 27 | ||
28 | desc->base2 = (info->base_addr & 0xff000000) >> 24; | 28 | desc->base2 = (info->base_addr & 0xff000000) >> 24; |
29 | /* | 29 | /* |
30 | * Don't allow setting of the lm bit. It is useless anyway | 30 | * Don't allow setting of the lm bit. It would confuse |
31 | * because 64bit system calls require __USER_CS: | 31 | * user_64bit_mode and would get overridden by sysret anyway. |
32 | */ | 32 | */ |
33 | desc->l = 0; | 33 | desc->l = 0; |
34 | } | 34 | } |
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index f9a320984a10..7e50f06393aa 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -17,7 +17,6 @@ | |||
17 | * Vectors 0 ... 31 : system traps and exceptions - hardcoded events | 17 | * Vectors 0 ... 31 : system traps and exceptions - hardcoded events |
18 | * Vectors 32 ... 127 : device interrupts | 18 | * Vectors 32 ... 127 : device interrupts |
19 | * Vector 128 : legacy int80 syscall interface | 19 | * Vector 128 : legacy int80 syscall interface |
20 | * Vector 204 : legacy x86_64 vsyscall emulation | ||
21 | * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 except 204 : device interrupts | 20 | * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 except 204 : device interrupts |
22 | * Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts | 21 | * Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts |
23 | * | 22 | * |
@@ -51,9 +50,6 @@ | |||
51 | #ifdef CONFIG_X86_32 | 50 | #ifdef CONFIG_X86_32 |
52 | # define SYSCALL_VECTOR 0x80 | 51 | # define SYSCALL_VECTOR 0x80 |
53 | #endif | 52 | #endif |
54 | #ifdef CONFIG_X86_64 | ||
55 | # define VSYSCALL_EMU_VECTOR 0xcc | ||
56 | #endif | ||
57 | 53 | ||
58 | /* | 54 | /* |
59 | * Vectors 0x30-0x3f are used for ISA interrupts. | 55 | * Vectors 0x30-0x3f are used for ISA interrupts. |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 2c7652163111..8e8b9a4987ee 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -41,6 +41,7 @@ | |||
41 | 41 | ||
42 | #include <asm/desc_defs.h> | 42 | #include <asm/desc_defs.h> |
43 | #include <asm/kmap_types.h> | 43 | #include <asm/kmap_types.h> |
44 | #include <asm/pgtable_types.h> | ||
44 | 45 | ||
45 | struct page; | 46 | struct page; |
46 | struct thread_struct; | 47 | struct thread_struct; |
@@ -63,6 +64,11 @@ struct paravirt_callee_save { | |||
63 | struct pv_info { | 64 | struct pv_info { |
64 | unsigned int kernel_rpl; | 65 | unsigned int kernel_rpl; |
65 | int shared_kernel_pmd; | 66 | int shared_kernel_pmd; |
67 | |||
68 | #ifdef CONFIG_X86_64 | ||
69 | u16 extra_user_64bit_cs; /* __USER_CS if none */ | ||
70 | #endif | ||
71 | |||
66 | int paravirt_enabled; | 72 | int paravirt_enabled; |
67 | const char *name; | 73 | const char *name; |
68 | }; | 74 | }; |
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 94e7618fcac8..35664547125b 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h | |||
@@ -131,6 +131,9 @@ struct pt_regs { | |||
131 | #ifdef __KERNEL__ | 131 | #ifdef __KERNEL__ |
132 | 132 | ||
133 | #include <linux/init.h> | 133 | #include <linux/init.h> |
134 | #ifdef CONFIG_PARAVIRT | ||
135 | #include <asm/paravirt_types.h> | ||
136 | #endif | ||
134 | 137 | ||
135 | struct cpuinfo_x86; | 138 | struct cpuinfo_x86; |
136 | struct task_struct; | 139 | struct task_struct; |
@@ -187,6 +190,22 @@ static inline int v8086_mode(struct pt_regs *regs) | |||
187 | #endif | 190 | #endif |
188 | } | 191 | } |
189 | 192 | ||
193 | #ifdef CONFIG_X86_64 | ||
194 | static inline bool user_64bit_mode(struct pt_regs *regs) | ||
195 | { | ||
196 | #ifndef CONFIG_PARAVIRT | ||
197 | /* | ||
198 | * On non-paravirt systems, this is the only long mode CPL 3 | ||
199 | * selector. We do not allow long mode selectors in the LDT. | ||
200 | */ | ||
201 | return regs->cs == __USER_CS; | ||
202 | #else | ||
203 | /* Headers are too twisted for this to go in paravirt.h. */ | ||
204 | return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; | ||
205 | #endif | ||
206 | } | ||
207 | #endif | ||
208 | |||
190 | /* | 209 | /* |
191 | * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode | 210 | * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode |
192 | * when it traps. The previous stack will be directly underneath the saved | 211 | * when it traps. The previous stack will be directly underneath the saved |
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 2bae0a513b40..0012d0902c5f 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h | |||
@@ -40,7 +40,6 @@ asmlinkage void alignment_check(void); | |||
40 | asmlinkage void machine_check(void); | 40 | asmlinkage void machine_check(void); |
41 | #endif /* CONFIG_X86_MCE */ | 41 | #endif /* CONFIG_X86_MCE */ |
42 | asmlinkage void simd_coprocessor_error(void); | 42 | asmlinkage void simd_coprocessor_error(void); |
43 | asmlinkage void emulate_vsyscall(void); | ||
44 | 43 | ||
45 | dotraplinkage void do_divide_error(struct pt_regs *, long); | 44 | dotraplinkage void do_divide_error(struct pt_regs *, long); |
46 | dotraplinkage void do_debug(struct pt_regs *, long); | 45 | dotraplinkage void do_debug(struct pt_regs *, long); |
@@ -67,7 +66,6 @@ dotraplinkage void do_alignment_check(struct pt_regs *, long); | |||
67 | dotraplinkage void do_machine_check(struct pt_regs *, long); | 66 | dotraplinkage void do_machine_check(struct pt_regs *, long); |
68 | #endif | 67 | #endif |
69 | dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long); | 68 | dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long); |
70 | dotraplinkage void do_emulate_vsyscall(struct pt_regs *, long); | ||
71 | #ifdef CONFIG_X86_32 | 69 | #ifdef CONFIG_X86_32 |
72 | dotraplinkage void do_iret_error(struct pt_regs *, long); | 70 | dotraplinkage void do_iret_error(struct pt_regs *, long); |
73 | #endif | 71 | #endif |
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index 705bf139288c..d92641cc7acc 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h | |||
@@ -681,6 +681,8 @@ __SYSCALL(__NR_syncfs, sys_syncfs) | |||
681 | __SYSCALL(__NR_sendmmsg, sys_sendmmsg) | 681 | __SYSCALL(__NR_sendmmsg, sys_sendmmsg) |
682 | #define __NR_setns 308 | 682 | #define __NR_setns 308 |
683 | __SYSCALL(__NR_setns, sys_setns) | 683 | __SYSCALL(__NR_setns, sys_setns) |
684 | #define __NR_getcpu 309 | ||
685 | __SYSCALL(__NR_getcpu, sys_getcpu) | ||
684 | 686 | ||
685 | #ifndef __NO_STUBS | 687 | #ifndef __NO_STUBS |
686 | #define __ARCH_WANT_OLD_READDIR | 688 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h index 60107072c28b..eaea1d31f753 100644 --- a/arch/x86/include/asm/vsyscall.h +++ b/arch/x86/include/asm/vsyscall.h | |||
@@ -27,6 +27,12 @@ extern struct timezone sys_tz; | |||
27 | 27 | ||
28 | extern void map_vsyscall(void); | 28 | extern void map_vsyscall(void); |
29 | 29 | ||
30 | /* | ||
31 | * Called on instruction fetch fault in vsyscall page. | ||
32 | * Returns true if handled. | ||
33 | */ | ||
34 | extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address); | ||
35 | |||
30 | #endif /* __KERNEL__ */ | 36 | #endif /* __KERNEL__ */ |
31 | 37 | ||
32 | #endif /* _ASM_X86_VSYSCALL_H */ | 38 | #endif /* _ASM_X86_VSYSCALL_H */ |
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 64a619d47d34..7ff4669580cf 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -39,7 +39,7 @@ typedef struct xpaddr { | |||
39 | ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE)) | 39 | ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE)) |
40 | 40 | ||
41 | extern unsigned long *machine_to_phys_mapping; | 41 | extern unsigned long *machine_to_phys_mapping; |
42 | extern unsigned int machine_to_phys_order; | 42 | extern unsigned long machine_to_phys_nr; |
43 | 43 | ||
44 | extern unsigned long get_phys_to_machine(unsigned long pfn); | 44 | extern unsigned long get_phys_to_machine(unsigned long pfn); |
45 | extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); | 45 | extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); |
@@ -87,7 +87,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) | |||
87 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 87 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
88 | return mfn; | 88 | return mfn; |
89 | 89 | ||
90 | if (unlikely((mfn >> machine_to_phys_order) != 0)) { | 90 | if (unlikely(mfn >= machine_to_phys_nr)) { |
91 | pfn = ~0; | 91 | pfn = ~0; |
92 | goto try_override; | 92 | goto try_override; |
93 | } | 93 | } |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 04105574c8e9..82f2912155a5 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -17,19 +17,6 @@ CFLAGS_REMOVE_ftrace.o = -pg | |||
17 | CFLAGS_REMOVE_early_printk.o = -pg | 17 | CFLAGS_REMOVE_early_printk.o = -pg |
18 | endif | 18 | endif |
19 | 19 | ||
20 | # | ||
21 | # vsyscalls (which work on the user stack) should have | ||
22 | # no stack-protector checks: | ||
23 | # | ||
24 | nostackp := $(call cc-option, -fno-stack-protector) | ||
25 | CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp) | ||
26 | CFLAGS_hpet.o := $(nostackp) | ||
27 | CFLAGS_paravirt.o := $(nostackp) | ||
28 | GCOV_PROFILE_vsyscall_64.o := n | ||
29 | GCOV_PROFILE_hpet.o := n | ||
30 | GCOV_PROFILE_tsc.o := n | ||
31 | GCOV_PROFILE_paravirt.o := n | ||
32 | |||
33 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o | 20 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o |
34 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o | 21 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o |
35 | obj-y += time.o ioport.o ldt.o dumpstack.o | 22 | obj-y += time.o ioport.o ldt.o dumpstack.o |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index adc66c3a1fef..34b18594e724 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -207,7 +207,6 @@ static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_ri | |||
207 | ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | | 207 | ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | |
208 | APIC_DM_INIT; | 208 | APIC_DM_INIT; |
209 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | 209 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
210 | mdelay(10); | ||
211 | 210 | ||
212 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | 211 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | |
213 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | | 212 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 08119a37e53c..6b96110bb0c3 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -149,7 +149,6 @@ struct set_mtrr_data { | |||
149 | */ | 149 | */ |
150 | static int mtrr_rendezvous_handler(void *info) | 150 | static int mtrr_rendezvous_handler(void *info) |
151 | { | 151 | { |
152 | #ifdef CONFIG_SMP | ||
153 | struct set_mtrr_data *data = info; | 152 | struct set_mtrr_data *data = info; |
154 | 153 | ||
155 | /* | 154 | /* |
@@ -171,7 +170,6 @@ static int mtrr_rendezvous_handler(void *info) | |||
171 | } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) { | 170 | } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) { |
172 | mtrr_if->set_all(); | 171 | mtrr_if->set_all(); |
173 | } | 172 | } |
174 | #endif | ||
175 | return 0; | 173 | return 0; |
176 | } | 174 | } |
177 | 175 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 45fbb8f7f549..f88af2c2a561 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -1590,6 +1590,7 @@ static __init int intel_pmu_init(void) | |||
1590 | break; | 1590 | break; |
1591 | 1591 | ||
1592 | case 42: /* SandyBridge */ | 1592 | case 42: /* SandyBridge */ |
1593 | case 45: /* SandyBridge, "Romely-EP" */ | ||
1593 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | 1594 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, |
1594 | sizeof(hw_cache_event_ids)); | 1595 | sizeof(hw_cache_event_ids)); |
1595 | 1596 | ||
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 5c1a91974918..f3f6f5344001 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -54,6 +54,7 @@ | |||
54 | #include <asm/ftrace.h> | 54 | #include <asm/ftrace.h> |
55 | #include <asm/irq_vectors.h> | 55 | #include <asm/irq_vectors.h> |
56 | #include <asm/cpufeature.h> | 56 | #include <asm/cpufeature.h> |
57 | #include <asm/alternative-asm.h> | ||
57 | 58 | ||
58 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ | 59 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
59 | #include <linux/elf-em.h> | 60 | #include <linux/elf-em.h> |
@@ -873,12 +874,7 @@ ENTRY(simd_coprocessor_error) | |||
873 | 661: pushl_cfi $do_general_protection | 874 | 661: pushl_cfi $do_general_protection |
874 | 662: | 875 | 662: |
875 | .section .altinstructions,"a" | 876 | .section .altinstructions,"a" |
876 | .balign 4 | 877 | altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f |
877 | .long 661b | ||
878 | .long 663f | ||
879 | .word X86_FEATURE_XMM | ||
880 | .byte 662b-661b | ||
881 | .byte 664f-663f | ||
882 | .previous | 878 | .previous |
883 | .section .altinstr_replacement,"ax" | 879 | .section .altinstr_replacement,"ax" |
884 | 663: pushl $do_simd_coprocessor_error | 880 | 663: pushl $do_simd_coprocessor_error |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index e13329d800c8..6419bb05ecd5 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -1111,7 +1111,6 @@ zeroentry spurious_interrupt_bug do_spurious_interrupt_bug | |||
1111 | zeroentry coprocessor_error do_coprocessor_error | 1111 | zeroentry coprocessor_error do_coprocessor_error |
1112 | errorentry alignment_check do_alignment_check | 1112 | errorentry alignment_check do_alignment_check |
1113 | zeroentry simd_coprocessor_error do_simd_coprocessor_error | 1113 | zeroentry simd_coprocessor_error do_simd_coprocessor_error |
1114 | zeroentry emulate_vsyscall do_emulate_vsyscall | ||
1115 | 1114 | ||
1116 | 1115 | ||
1117 | /* Reload gs selector with exception handling */ | 1116 | /* Reload gs selector with exception handling */ |
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 613a7931ecc1..d90272e6bc40 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -307,6 +307,10 @@ struct pv_info pv_info = { | |||
307 | .paravirt_enabled = 0, | 307 | .paravirt_enabled = 0, |
308 | .kernel_rpl = 0, | 308 | .kernel_rpl = 0, |
309 | .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ | 309 | .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ |
310 | |||
311 | #ifdef CONFIG_X86_64 | ||
312 | .extra_user_64bit_cs = __USER_CS, | ||
313 | #endif | ||
310 | }; | 314 | }; |
311 | 315 | ||
312 | struct pv_init_ops pv_init_ops = { | 316 | struct pv_init_ops pv_init_ops = { |
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index 7977f0cfe339..c346d1161488 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c | |||
@@ -74,7 +74,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) | |||
74 | 74 | ||
75 | #ifdef CONFIG_X86_64 | 75 | #ifdef CONFIG_X86_64 |
76 | case 0x40 ... 0x4f: | 76 | case 0x40 ... 0x4f: |
77 | if (regs->cs != __USER_CS) | 77 | if (!user_64bit_mode(regs)) |
78 | /* 32-bit mode: register increment */ | 78 | /* 32-bit mode: register increment */ |
79 | return 0; | 79 | return 0; |
80 | /* 64-bit mode: REX prefix */ | 80 | /* 64-bit mode: REX prefix */ |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 9682ec50180c..6913369c234c 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -872,12 +872,6 @@ void __init trap_init(void) | |||
872 | set_bit(SYSCALL_VECTOR, used_vectors); | 872 | set_bit(SYSCALL_VECTOR, used_vectors); |
873 | #endif | 873 | #endif |
874 | 874 | ||
875 | #ifdef CONFIG_X86_64 | ||
876 | BUG_ON(test_bit(VSYSCALL_EMU_VECTOR, used_vectors)); | ||
877 | set_system_intr_gate(VSYSCALL_EMU_VECTOR, &emulate_vsyscall); | ||
878 | set_bit(VSYSCALL_EMU_VECTOR, used_vectors); | ||
879 | #endif | ||
880 | |||
881 | /* | 875 | /* |
882 | * Should be a barrier for any external CPU state: | 876 | * Should be a barrier for any external CPU state: |
883 | */ | 877 | */ |
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 4aa9c54a9b76..0f703f10901a 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -71,7 +71,6 @@ PHDRS { | |||
71 | text PT_LOAD FLAGS(5); /* R_E */ | 71 | text PT_LOAD FLAGS(5); /* R_E */ |
72 | data PT_LOAD FLAGS(6); /* RW_ */ | 72 | data PT_LOAD FLAGS(6); /* RW_ */ |
73 | #ifdef CONFIG_X86_64 | 73 | #ifdef CONFIG_X86_64 |
74 | user PT_LOAD FLAGS(5); /* R_E */ | ||
75 | #ifdef CONFIG_SMP | 74 | #ifdef CONFIG_SMP |
76 | percpu PT_LOAD FLAGS(6); /* RW_ */ | 75 | percpu PT_LOAD FLAGS(6); /* RW_ */ |
77 | #endif | 76 | #endif |
@@ -154,44 +153,16 @@ SECTIONS | |||
154 | 153 | ||
155 | #ifdef CONFIG_X86_64 | 154 | #ifdef CONFIG_X86_64 |
156 | 155 | ||
157 | #define VSYSCALL_ADDR (-10*1024*1024) | 156 | . = ALIGN(PAGE_SIZE); |
158 | |||
159 | #define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET) | ||
160 | #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET) | ||
161 | |||
162 | #define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0) | ||
163 | #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) | ||
164 | |||
165 | . = ALIGN(4096); | ||
166 | __vsyscall_0 = .; | ||
167 | |||
168 | . = VSYSCALL_ADDR; | ||
169 | .vsyscall : AT(VLOAD(.vsyscall)) { | ||
170 | *(.vsyscall_0) | ||
171 | |||
172 | . = 1024; | ||
173 | *(.vsyscall_1) | ||
174 | |||
175 | . = 2048; | ||
176 | *(.vsyscall_2) | ||
177 | |||
178 | . = 4096; /* Pad the whole page. */ | ||
179 | } :user =0xcc | ||
180 | . = ALIGN(__vsyscall_0 + PAGE_SIZE, PAGE_SIZE); | ||
181 | |||
182 | #undef VSYSCALL_ADDR | ||
183 | #undef VLOAD_OFFSET | ||
184 | #undef VLOAD | ||
185 | #undef VVIRT_OFFSET | ||
186 | #undef VVIRT | ||
187 | |||
188 | __vvar_page = .; | 157 | __vvar_page = .; |
189 | 158 | ||
190 | .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { | 159 | .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { |
160 | /* work around gold bug 13023 */ | ||
161 | __vvar_beginning_hack = .; | ||
191 | 162 | ||
192 | /* Place all vvars at the offsets in asm/vvar.h. */ | 163 | /* Place all vvars at the offsets in asm/vvar.h. */ |
193 | #define EMIT_VVAR(name, offset) \ | 164 | #define EMIT_VVAR(name, offset) \ |
194 | . = offset; \ | 165 | . = __vvar_beginning_hack + offset; \ |
195 | *(.vvar_ ## name) | 166 | *(.vvar_ ## name) |
196 | #define __VVAR_KERNEL_LDS | 167 | #define __VVAR_KERNEL_LDS |
197 | #include <asm/vvar.h> | 168 | #include <asm/vvar.h> |
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index dda7dff9cef7..18ae83dd1cd7 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -18,9 +18,6 @@ | |||
18 | * use the vDSO. | 18 | * use the vDSO. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | /* Disable profiling for userspace code: */ | ||
22 | #define DISABLE_BRANCH_PROFILING | ||
23 | |||
24 | #include <linux/time.h> | 21 | #include <linux/time.h> |
25 | #include <linux/init.h> | 22 | #include <linux/init.h> |
26 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
@@ -50,12 +47,36 @@ | |||
50 | #include <asm/vgtod.h> | 47 | #include <asm/vgtod.h> |
51 | #include <asm/traps.h> | 48 | #include <asm/traps.h> |
52 | 49 | ||
50 | #define CREATE_TRACE_POINTS | ||
51 | #include "vsyscall_trace.h" | ||
52 | |||
53 | DEFINE_VVAR(int, vgetcpu_mode); | 53 | DEFINE_VVAR(int, vgetcpu_mode); |
54 | DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = | 54 | DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = |
55 | { | 55 | { |
56 | .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), | 56 | .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; | ||
60 | |||
61 | static int __init vsyscall_setup(char *str) | ||
62 | { | ||
63 | if (str) { | ||
64 | if (!strcmp("emulate", str)) | ||
65 | vsyscall_mode = EMULATE; | ||
66 | else if (!strcmp("native", str)) | ||
67 | vsyscall_mode = NATIVE; | ||
68 | else if (!strcmp("none", str)) | ||
69 | vsyscall_mode = NONE; | ||
70 | else | ||
71 | return -EINVAL; | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | return -EINVAL; | ||
77 | } | ||
78 | early_param("vsyscall", vsyscall_setup); | ||
79 | |||
59 | void update_vsyscall_tz(void) | 80 | void update_vsyscall_tz(void) |
60 | { | 81 | { |
61 | unsigned long flags; | 82 | unsigned long flags; |
@@ -100,7 +121,7 @@ static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, | |||
100 | 121 | ||
101 | printk("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n", | 122 | printk("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n", |
102 | level, tsk->comm, task_pid_nr(tsk), | 123 | level, tsk->comm, task_pid_nr(tsk), |
103 | message, regs->ip - 2, regs->cs, | 124 | message, regs->ip, regs->cs, |
104 | regs->sp, regs->ax, regs->si, regs->di); | 125 | regs->sp, regs->ax, regs->si, regs->di); |
105 | } | 126 | } |
106 | 127 | ||
@@ -118,46 +139,39 @@ static int addr_to_vsyscall_nr(unsigned long addr) | |||
118 | return nr; | 139 | return nr; |
119 | } | 140 | } |
120 | 141 | ||
121 | void dotraplinkage do_emulate_vsyscall(struct pt_regs *regs, long error_code) | 142 | bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) |
122 | { | 143 | { |
123 | struct task_struct *tsk; | 144 | struct task_struct *tsk; |
124 | unsigned long caller; | 145 | unsigned long caller; |
125 | int vsyscall_nr; | 146 | int vsyscall_nr; |
126 | long ret; | 147 | long ret; |
127 | 148 | ||
128 | local_irq_enable(); | ||
129 | |||
130 | /* | 149 | /* |
131 | * Real 64-bit user mode code has cs == __USER_CS. Anything else | 150 | * No point in checking CS -- the only way to get here is a user mode |
132 | * is bogus. | 151 | * trap to a high address, which means that we're in 64-bit user code. |
133 | */ | 152 | */ |
134 | if (regs->cs != __USER_CS) { | ||
135 | /* | ||
136 | * If we trapped from kernel mode, we might as well OOPS now | ||
137 | * instead of returning to some random address and OOPSing | ||
138 | * then. | ||
139 | */ | ||
140 | BUG_ON(!user_mode(regs)); | ||
141 | 153 | ||
142 | /* Compat mode and non-compat 32-bit CS should both segfault. */ | 154 | WARN_ON_ONCE(address != regs->ip); |
143 | warn_bad_vsyscall(KERN_WARNING, regs, | 155 | |
144 | "illegal int 0xcc from 32-bit mode"); | 156 | if (vsyscall_mode == NONE) { |
145 | goto sigsegv; | 157 | warn_bad_vsyscall(KERN_INFO, regs, |
158 | "vsyscall attempted with vsyscall=none"); | ||
159 | return false; | ||
146 | } | 160 | } |
147 | 161 | ||
148 | /* | 162 | vsyscall_nr = addr_to_vsyscall_nr(address); |
149 | * x86-ism here: regs->ip points to the instruction after the int 0xcc, | 163 | |
150 | * and int 0xcc is two bytes long. | 164 | trace_emulate_vsyscall(vsyscall_nr); |
151 | */ | 165 | |
152 | vsyscall_nr = addr_to_vsyscall_nr(regs->ip - 2); | ||
153 | if (vsyscall_nr < 0) { | 166 | if (vsyscall_nr < 0) { |
154 | warn_bad_vsyscall(KERN_WARNING, regs, | 167 | warn_bad_vsyscall(KERN_WARNING, regs, |
155 | "illegal int 0xcc (exploit attempt?)"); | 168 | "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround"); |
156 | goto sigsegv; | 169 | goto sigsegv; |
157 | } | 170 | } |
158 | 171 | ||
159 | if (get_user(caller, (unsigned long __user *)regs->sp) != 0) { | 172 | if (get_user(caller, (unsigned long __user *)regs->sp) != 0) { |
160 | warn_bad_vsyscall(KERN_WARNING, regs, "int 0xcc with bad stack (exploit attempt?)"); | 173 | warn_bad_vsyscall(KERN_WARNING, regs, |
174 | "vsyscall with bad stack (exploit attempt?)"); | ||
161 | goto sigsegv; | 175 | goto sigsegv; |
162 | } | 176 | } |
163 | 177 | ||
@@ -202,13 +216,11 @@ void dotraplinkage do_emulate_vsyscall(struct pt_regs *regs, long error_code) | |||
202 | regs->ip = caller; | 216 | regs->ip = caller; |
203 | regs->sp += 8; | 217 | regs->sp += 8; |
204 | 218 | ||
205 | local_irq_disable(); | 219 | return true; |
206 | return; | ||
207 | 220 | ||
208 | sigsegv: | 221 | sigsegv: |
209 | regs->ip -= 2; /* The faulting instruction should be the int 0xcc. */ | ||
210 | force_sig(SIGSEGV, current); | 222 | force_sig(SIGSEGV, current); |
211 | local_irq_disable(); | 223 | return true; |
212 | } | 224 | } |
213 | 225 | ||
214 | /* | 226 | /* |
@@ -256,15 +268,21 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) | |||
256 | 268 | ||
257 | void __init map_vsyscall(void) | 269 | void __init map_vsyscall(void) |
258 | { | 270 | { |
259 | extern char __vsyscall_0; | 271 | extern char __vsyscall_page; |
260 | unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0); | 272 | unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); |
261 | extern char __vvar_page; | 273 | extern char __vvar_page; |
262 | unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page); | 274 | unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page); |
263 | 275 | ||
264 | /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */ | 276 | __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, |
265 | __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL); | 277 | vsyscall_mode == NATIVE |
278 | ? PAGE_KERNEL_VSYSCALL | ||
279 | : PAGE_KERNEL_VVAR); | ||
280 | BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) != | ||
281 | (unsigned long)VSYSCALL_START); | ||
282 | |||
266 | __set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR); | 283 | __set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR); |
267 | BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) != (unsigned long)VVAR_ADDRESS); | 284 | BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) != |
285 | (unsigned long)VVAR_ADDRESS); | ||
268 | } | 286 | } |
269 | 287 | ||
270 | static int __init vsyscall_init(void) | 288 | static int __init vsyscall_init(void) |
diff --git a/arch/x86/kernel/vsyscall_emu_64.S b/arch/x86/kernel/vsyscall_emu_64.S index ffa845eae5ca..c9596a9af159 100644 --- a/arch/x86/kernel/vsyscall_emu_64.S +++ b/arch/x86/kernel/vsyscall_emu_64.S | |||
@@ -7,21 +7,31 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/linkage.h> | 9 | #include <linux/linkage.h> |
10 | |||
10 | #include <asm/irq_vectors.h> | 11 | #include <asm/irq_vectors.h> |
12 | #include <asm/page_types.h> | ||
13 | #include <asm/unistd_64.h> | ||
14 | |||
15 | __PAGE_ALIGNED_DATA | ||
16 | .globl __vsyscall_page | ||
17 | .balign PAGE_SIZE, 0xcc | ||
18 | .type __vsyscall_page, @object | ||
19 | __vsyscall_page: | ||
20 | |||
21 | mov $__NR_gettimeofday, %rax | ||
22 | syscall | ||
23 | ret | ||
11 | 24 | ||
12 | /* The unused parts of the page are filled with 0xcc by the linker script. */ | 25 | .balign 1024, 0xcc |
26 | mov $__NR_time, %rax | ||
27 | syscall | ||
28 | ret | ||
13 | 29 | ||
14 | .section .vsyscall_0, "a" | 30 | .balign 1024, 0xcc |
15 | ENTRY(vsyscall_0) | 31 | mov $__NR_getcpu, %rax |
16 | int $VSYSCALL_EMU_VECTOR | 32 | syscall |
17 | END(vsyscall_0) | 33 | ret |
18 | 34 | ||
19 | .section .vsyscall_1, "a" | 35 | .balign 4096, 0xcc |
20 | ENTRY(vsyscall_1) | ||
21 | int $VSYSCALL_EMU_VECTOR | ||
22 | END(vsyscall_1) | ||
23 | 36 | ||
24 | .section .vsyscall_2, "a" | 37 | .size __vsyscall_page, 4096 |
25 | ENTRY(vsyscall_2) | ||
26 | int $VSYSCALL_EMU_VECTOR | ||
27 | END(vsyscall_2) | ||
diff --git a/arch/x86/kernel/vsyscall_trace.h b/arch/x86/kernel/vsyscall_trace.h new file mode 100644 index 000000000000..a8b2edec54fe --- /dev/null +++ b/arch/x86/kernel/vsyscall_trace.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM vsyscall | ||
3 | |||
4 | #if !defined(__VSYSCALL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define __VSYSCALL_TRACE_H | ||
6 | |||
7 | #include <linux/tracepoint.h> | ||
8 | |||
9 | TRACE_EVENT(emulate_vsyscall, | ||
10 | |||
11 | TP_PROTO(int nr), | ||
12 | |||
13 | TP_ARGS(nr), | ||
14 | |||
15 | TP_STRUCT__entry(__field(int, nr)), | ||
16 | |||
17 | TP_fast_assign( | ||
18 | __entry->nr = nr; | ||
19 | ), | ||
20 | |||
21 | TP_printk("nr = %d", __entry->nr) | ||
22 | ); | ||
23 | |||
24 | #endif | ||
25 | |||
26 | #undef TRACE_INCLUDE_PATH | ||
27 | #define TRACE_INCLUDE_PATH ../../arch/x86/kernel | ||
28 | #define TRACE_INCLUDE_FILE vsyscall_trace | ||
29 | #include <trace/define_trace.h> | ||
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 988724b236b6..ff5790d8e990 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig | |||
@@ -22,6 +22,8 @@ config KVM | |||
22 | depends on HAVE_KVM | 22 | depends on HAVE_KVM |
23 | # for device assignment: | 23 | # for device assignment: |
24 | depends on PCI | 24 | depends on PCI |
25 | # for TASKSTATS/TASK_DELAY_ACCT: | ||
26 | depends on NET | ||
25 | select PREEMPT_NOTIFIERS | 27 | select PREEMPT_NOTIFIERS |
26 | select MMU_NOTIFIER | 28 | select MMU_NOTIFIER |
27 | select ANON_INODES | 29 | select ANON_INODES |
@@ -31,6 +33,7 @@ config KVM | |||
31 | select KVM_ASYNC_PF | 33 | select KVM_ASYNC_PF |
32 | select USER_RETURN_NOTIFIER | 34 | select USER_RETURN_NOTIFIER |
33 | select KVM_MMIO | 35 | select KVM_MMIO |
36 | select TASKSTATS | ||
34 | select TASK_DELAY_ACCT | 37 | select TASK_DELAY_ACCT |
35 | ---help--- | 38 | ---help--- |
36 | Support hosting fully virtualized guest machines using hardware | 39 | Support hosting fully virtualized guest machines using hardware |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 4d09df054e39..0d17c8c50acd 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/traps.h> /* dotraplinkage, ... */ | 17 | #include <asm/traps.h> /* dotraplinkage, ... */ |
18 | #include <asm/pgalloc.h> /* pgd_*(), ... */ | 18 | #include <asm/pgalloc.h> /* pgd_*(), ... */ |
19 | #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ | 19 | #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ |
20 | #include <asm/vsyscall.h> | ||
20 | 21 | ||
21 | /* | 22 | /* |
22 | * Page fault error code bits: | 23 | * Page fault error code bits: |
@@ -105,7 +106,7 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, | |||
105 | * but for now it's good enough to assume that long | 106 | * but for now it's good enough to assume that long |
106 | * mode only uses well known segments or kernel. | 107 | * mode only uses well known segments or kernel. |
107 | */ | 108 | */ |
108 | return (!user_mode(regs)) || (regs->cs == __USER_CS); | 109 | return (!user_mode(regs) || user_64bit_mode(regs)); |
109 | #endif | 110 | #endif |
110 | case 0x60: | 111 | case 0x60: |
111 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ | 112 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ |
@@ -720,6 +721,18 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |||
720 | if (is_errata100(regs, address)) | 721 | if (is_errata100(regs, address)) |
721 | return; | 722 | return; |
722 | 723 | ||
724 | #ifdef CONFIG_X86_64 | ||
725 | /* | ||
726 | * Instruction fetch faults in the vsyscall page might need | ||
727 | * emulation. | ||
728 | */ | ||
729 | if (unlikely((error_code & PF_INSTR) && | ||
730 | ((address & ~0xfff) == VSYSCALL_START))) { | ||
731 | if (emulate_vsyscall(regs, address)) | ||
732 | return; | ||
733 | } | ||
734 | #endif | ||
735 | |||
723 | if (unlikely(show_unhandled_signals)) | 736 | if (unlikely(show_unhandled_signals)) |
724 | show_signal_msg(regs, error_code, address, tsk); | 737 | show_signal_msg(regs, error_code, address, tsk); |
725 | 738 | ||
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index ae3cb23cd89b..c95330267f08 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -360,6 +360,15 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root) | |||
360 | } | 360 | } |
361 | } | 361 | } |
362 | 362 | ||
363 | /* After the PCI-E bus has been walked and all devices discovered, | ||
364 | * configure any settings of the fabric that might be necessary. | ||
365 | */ | ||
366 | if (bus) { | ||
367 | struct pci_bus *child; | ||
368 | list_for_each_entry(child, &bus->children, node) | ||
369 | pcie_bus_configure_settings(child, child->self->pcie_mpss); | ||
370 | } | ||
371 | |||
363 | if (!bus) | 372 | if (!bus) |
364 | kfree(sd); | 373 | kfree(sd); |
365 | 374 | ||
diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c index 8b9940e78e2f..7cce722667b8 100644 --- a/arch/x86/platform/olpc/olpc.c +++ b/arch/x86/platform/olpc/olpc.c | |||
@@ -161,13 +161,13 @@ restart: | |||
161 | if (inbuf && inlen) { | 161 | if (inbuf && inlen) { |
162 | /* write data to EC */ | 162 | /* write data to EC */ |
163 | for (i = 0; i < inlen; i++) { | 163 | for (i = 0; i < inlen; i++) { |
164 | pr_devel("olpc-ec: sending cmd arg 0x%x\n", inbuf[i]); | ||
165 | outb(inbuf[i], 0x68); | ||
164 | if (wait_on_ibf(0x6c, 0)) { | 166 | if (wait_on_ibf(0x6c, 0)) { |
165 | printk(KERN_ERR "olpc-ec: timeout waiting for" | 167 | printk(KERN_ERR "olpc-ec: timeout waiting for" |
166 | " EC accept data!\n"); | 168 | " EC accept data!\n"); |
167 | goto err; | 169 | goto err; |
168 | } | 170 | } |
169 | pr_devel("olpc-ec: sending cmd arg 0x%x\n", inbuf[i]); | ||
170 | outb(inbuf[i], 0x68); | ||
171 | } | 171 | } |
172 | } | 172 | } |
173 | if (outbuf && outlen) { | 173 | if (outbuf && outlen) { |
diff --git a/arch/x86/vdso/vdso.S b/arch/x86/vdso/vdso.S index 1b979c12ba85..01f5e3b4613c 100644 --- a/arch/x86/vdso/vdso.S +++ b/arch/x86/vdso/vdso.S | |||
@@ -9,6 +9,7 @@ __PAGE_ALIGNED_DATA | |||
9 | vdso_start: | 9 | vdso_start: |
10 | .incbin "arch/x86/vdso/vdso.so" | 10 | .incbin "arch/x86/vdso/vdso.so" |
11 | vdso_end: | 11 | vdso_end: |
12 | .align PAGE_SIZE /* extra data here leaks to userspace. */ | ||
12 | 13 | ||
13 | .previous | 14 | .previous |
14 | 15 | ||
diff --git a/arch/x86/vdso/vdso32/sysenter.S b/arch/x86/vdso/vdso32/sysenter.S index e2800affa754..e354bceee0e0 100644 --- a/arch/x86/vdso/vdso32/sysenter.S +++ b/arch/x86/vdso/vdso32/sysenter.S | |||
@@ -43,7 +43,7 @@ __kernel_vsyscall: | |||
43 | .space 7,0x90 | 43 | .space 7,0x90 |
44 | 44 | ||
45 | /* 14: System call restart point is here! (SYSENTER_RETURN-2) */ | 45 | /* 14: System call restart point is here! (SYSENTER_RETURN-2) */ |
46 | jmp .Lenter_kernel | 46 | int $0x80 |
47 | /* 16: System call normal return point is here! */ | 47 | /* 16: System call normal return point is here! */ |
48 | VDSO32_SYSENTER_RETURN: /* Symbol used by sysenter.c via vdso32-syms.h */ | 48 | VDSO32_SYSENTER_RETURN: /* Symbol used by sysenter.c via vdso32-syms.h */ |
49 | pop %ebp | 49 | pop %ebp |
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 3326204e251f..add2c2d729ce 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile | |||
@@ -15,7 +15,7 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ | |||
15 | grant-table.o suspend.o platform-pci-unplug.o \ | 15 | grant-table.o suspend.o platform-pci-unplug.o \ |
16 | p2m.o | 16 | p2m.o |
17 | 17 | ||
18 | obj-$(CONFIG_FTRACE) += trace.o | 18 | obj-$(CONFIG_EVENT_TRACING) += trace.o |
19 | 19 | ||
20 | obj-$(CONFIG_SMP) += smp.o | 20 | obj-$(CONFIG_SMP) += smp.o |
21 | obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o | 21 | obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 974a528458a0..2d69617950f7 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -77,8 +77,8 @@ EXPORT_SYMBOL_GPL(xen_domain_type); | |||
77 | 77 | ||
78 | unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START; | 78 | unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START; |
79 | EXPORT_SYMBOL(machine_to_phys_mapping); | 79 | EXPORT_SYMBOL(machine_to_phys_mapping); |
80 | unsigned int machine_to_phys_order; | 80 | unsigned long machine_to_phys_nr; |
81 | EXPORT_SYMBOL(machine_to_phys_order); | 81 | EXPORT_SYMBOL(machine_to_phys_nr); |
82 | 82 | ||
83 | struct start_info *xen_start_info; | 83 | struct start_info *xen_start_info; |
84 | EXPORT_SYMBOL_GPL(xen_start_info); | 84 | EXPORT_SYMBOL_GPL(xen_start_info); |
@@ -951,6 +951,10 @@ static const struct pv_info xen_info __initconst = { | |||
951 | .paravirt_enabled = 1, | 951 | .paravirt_enabled = 1, |
952 | .shared_kernel_pmd = 0, | 952 | .shared_kernel_pmd = 0, |
953 | 953 | ||
954 | #ifdef CONFIG_X86_64 | ||
955 | .extra_user_64bit_cs = FLAT_USER_CS64, | ||
956 | #endif | ||
957 | |||
954 | .name = "Xen", | 958 | .name = "Xen", |
955 | }; | 959 | }; |
956 | 960 | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index f987bde77c49..20a614275064 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1713,15 +1713,19 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) | |||
1713 | void __init xen_setup_machphys_mapping(void) | 1713 | void __init xen_setup_machphys_mapping(void) |
1714 | { | 1714 | { |
1715 | struct xen_machphys_mapping mapping; | 1715 | struct xen_machphys_mapping mapping; |
1716 | unsigned long machine_to_phys_nr_ents; | ||
1717 | 1716 | ||
1718 | if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { | 1717 | if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { |
1719 | machine_to_phys_mapping = (unsigned long *)mapping.v_start; | 1718 | machine_to_phys_mapping = (unsigned long *)mapping.v_start; |
1720 | machine_to_phys_nr_ents = mapping.max_mfn + 1; | 1719 | machine_to_phys_nr = mapping.max_mfn + 1; |
1721 | } else { | 1720 | } else { |
1722 | machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES; | 1721 | machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; |
1723 | } | 1722 | } |
1724 | machine_to_phys_order = fls(machine_to_phys_nr_ents - 1); | 1723 | #ifdef CONFIG_X86_32 |
1724 | if ((machine_to_phys_mapping + machine_to_phys_nr) | ||
1725 | < machine_to_phys_mapping) | ||
1726 | machine_to_phys_nr = (unsigned long *)NULL | ||
1727 | - machine_to_phys_mapping; | ||
1728 | #endif | ||
1725 | } | 1729 | } |
1726 | 1730 | ||
1727 | #ifdef CONFIG_X86_64 | 1731 | #ifdef CONFIG_X86_64 |
@@ -1916,6 +1920,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) | |||
1916 | # endif | 1920 | # endif |
1917 | #else | 1921 | #else |
1918 | case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: | 1922 | case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: |
1923 | case VVAR_PAGE: | ||
1919 | #endif | 1924 | #endif |
1920 | case FIX_TEXT_POKE0: | 1925 | case FIX_TEXT_POKE0: |
1921 | case FIX_TEXT_POKE1: | 1926 | case FIX_TEXT_POKE1: |
@@ -1956,7 +1961,8 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) | |||
1956 | #ifdef CONFIG_X86_64 | 1961 | #ifdef CONFIG_X86_64 |
1957 | /* Replicate changes to map the vsyscall page into the user | 1962 | /* Replicate changes to map the vsyscall page into the user |
1958 | pagetable vsyscall mapping. */ | 1963 | pagetable vsyscall mapping. */ |
1959 | if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) { | 1964 | if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) || |
1965 | idx == VVAR_PAGE) { | ||
1960 | unsigned long vaddr = __fix_to_virt(idx); | 1966 | unsigned long vaddr = __fix_to_virt(idx); |
1961 | set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); | 1967 | set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); |
1962 | } | 1968 | } |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index b4533a86d7e4..e79dbb95482b 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -521,8 +521,6 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) | |||
521 | native_smp_prepare_cpus(max_cpus); | 521 | native_smp_prepare_cpus(max_cpus); |
522 | WARN_ON(xen_smp_intr_init(0)); | 522 | WARN_ON(xen_smp_intr_init(0)); |
523 | 523 | ||
524 | if (!xen_have_vector_callback) | ||
525 | return; | ||
526 | xen_init_lock_cpu(0); | 524 | xen_init_lock_cpu(0); |
527 | xen_init_spinlocks(); | 525 | xen_init_spinlocks(); |
528 | } | 526 | } |
@@ -546,6 +544,8 @@ static void xen_hvm_cpu_die(unsigned int cpu) | |||
546 | 544 | ||
547 | void __init xen_hvm_smp_init(void) | 545 | void __init xen_hvm_smp_init(void) |
548 | { | 546 | { |
547 | if (!xen_have_vector_callback) | ||
548 | return; | ||
549 | smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; | 549 | smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; |
550 | smp_ops.smp_send_reschedule = xen_smp_send_reschedule; | 550 | smp_ops.smp_send_reschedule = xen_smp_send_reschedule; |
551 | smp_ops.cpu_up = xen_hvm_cpu_up; | 551 | smp_ops.cpu_up = xen_hvm_cpu_up; |
diff --git a/block/Kconfig b/block/Kconfig index 60be1e0455da..e97934eececa 100644 --- a/block/Kconfig +++ b/block/Kconfig | |||
@@ -65,6 +65,16 @@ config BLK_DEV_BSG | |||
65 | 65 | ||
66 | If unsure, say Y. | 66 | If unsure, say Y. |
67 | 67 | ||
68 | config BLK_DEV_BSGLIB | ||
69 | bool "Block layer SG support v4 helper lib" | ||
70 | default n | ||
71 | select BLK_DEV_BSG | ||
72 | help | ||
73 | Subsystems will normally enable this if needed. Users will not | ||
74 | normally need to manually enable this. | ||
75 | |||
76 | If unsure, say N. | ||
77 | |||
68 | config BLK_DEV_INTEGRITY | 78 | config BLK_DEV_INTEGRITY |
69 | bool "Block layer data integrity support" | 79 | bool "Block layer data integrity support" |
70 | ---help--- | 80 | ---help--- |
diff --git a/block/Makefile b/block/Makefile index 0fec4b3fab51..514c6e4f427a 100644 --- a/block/Makefile +++ b/block/Makefile | |||
@@ -8,6 +8,7 @@ obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ | |||
8 | blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o | 8 | blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o |
9 | 9 | ||
10 | obj-$(CONFIG_BLK_DEV_BSG) += bsg.o | 10 | obj-$(CONFIG_BLK_DEV_BSG) += bsg.o |
11 | obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o | ||
11 | obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o | 12 | obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o |
12 | obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o | 13 | obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o |
13 | obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o | 14 | obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o |
diff --git a/block/blk-core.c b/block/blk-core.c index b627558c461f..90e1ffdeb415 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1702,6 +1702,7 @@ EXPORT_SYMBOL_GPL(blk_rq_check_limits); | |||
1702 | int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | 1702 | int blk_insert_cloned_request(struct request_queue *q, struct request *rq) |
1703 | { | 1703 | { |
1704 | unsigned long flags; | 1704 | unsigned long flags; |
1705 | int where = ELEVATOR_INSERT_BACK; | ||
1705 | 1706 | ||
1706 | if (blk_rq_check_limits(q, rq)) | 1707 | if (blk_rq_check_limits(q, rq)) |
1707 | return -EIO; | 1708 | return -EIO; |
@@ -1718,7 +1719,10 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | |||
1718 | */ | 1719 | */ |
1719 | BUG_ON(blk_queued_rq(rq)); | 1720 | BUG_ON(blk_queued_rq(rq)); |
1720 | 1721 | ||
1721 | add_acct_request(q, rq, ELEVATOR_INSERT_BACK); | 1722 | if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) |
1723 | where = ELEVATOR_INSERT_FLUSH; | ||
1724 | |||
1725 | add_acct_request(q, rq, where); | ||
1722 | spin_unlock_irqrestore(q->queue_lock, flags); | 1726 | spin_unlock_irqrestore(q->queue_lock, flags); |
1723 | 1727 | ||
1724 | return 0; | 1728 | return 0; |
@@ -2275,7 +2279,7 @@ static bool blk_end_bidi_request(struct request *rq, int error, | |||
2275 | * %false - we are done with this request | 2279 | * %false - we are done with this request |
2276 | * %true - still buffers pending for this request | 2280 | * %true - still buffers pending for this request |
2277 | **/ | 2281 | **/ |
2278 | static bool __blk_end_bidi_request(struct request *rq, int error, | 2282 | bool __blk_end_bidi_request(struct request *rq, int error, |
2279 | unsigned int nr_bytes, unsigned int bidi_bytes) | 2283 | unsigned int nr_bytes, unsigned int bidi_bytes) |
2280 | { | 2284 | { |
2281 | if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) | 2285 | if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) |
diff --git a/block/blk-flush.c b/block/blk-flush.c index bb21e4c36f70..491eb30a242d 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -95,11 +95,12 @@ static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) | |||
95 | { | 95 | { |
96 | unsigned int policy = 0; | 96 | unsigned int policy = 0; |
97 | 97 | ||
98 | if (blk_rq_sectors(rq)) | ||
99 | policy |= REQ_FSEQ_DATA; | ||
100 | |||
98 | if (fflags & REQ_FLUSH) { | 101 | if (fflags & REQ_FLUSH) { |
99 | if (rq->cmd_flags & REQ_FLUSH) | 102 | if (rq->cmd_flags & REQ_FLUSH) |
100 | policy |= REQ_FSEQ_PREFLUSH; | 103 | policy |= REQ_FSEQ_PREFLUSH; |
101 | if (blk_rq_sectors(rq)) | ||
102 | policy |= REQ_FSEQ_DATA; | ||
103 | if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) | 104 | if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) |
104 | policy |= REQ_FSEQ_POSTFLUSH; | 105 | policy |= REQ_FSEQ_POSTFLUSH; |
105 | } | 106 | } |
@@ -122,7 +123,7 @@ static void blk_flush_restore_request(struct request *rq) | |||
122 | 123 | ||
123 | /* make @rq a normal request */ | 124 | /* make @rq a normal request */ |
124 | rq->cmd_flags &= ~REQ_FLUSH_SEQ; | 125 | rq->cmd_flags &= ~REQ_FLUSH_SEQ; |
125 | rq->end_io = NULL; | 126 | rq->end_io = rq->flush.saved_end_io; |
126 | } | 127 | } |
127 | 128 | ||
128 | /** | 129 | /** |
@@ -300,9 +301,6 @@ void blk_insert_flush(struct request *rq) | |||
300 | unsigned int fflags = q->flush_flags; /* may change, cache */ | 301 | unsigned int fflags = q->flush_flags; /* may change, cache */ |
301 | unsigned int policy = blk_flush_policy(fflags, rq); | 302 | unsigned int policy = blk_flush_policy(fflags, rq); |
302 | 303 | ||
303 | BUG_ON(rq->end_io); | ||
304 | BUG_ON(!rq->bio || rq->bio != rq->biotail); | ||
305 | |||
306 | /* | 304 | /* |
307 | * @policy now records what operations need to be done. Adjust | 305 | * @policy now records what operations need to be done. Adjust |
308 | * REQ_FLUSH and FUA for the driver. | 306 | * REQ_FLUSH and FUA for the driver. |
@@ -312,6 +310,19 @@ void blk_insert_flush(struct request *rq) | |||
312 | rq->cmd_flags &= ~REQ_FUA; | 310 | rq->cmd_flags &= ~REQ_FUA; |
313 | 311 | ||
314 | /* | 312 | /* |
313 | * An empty flush handed down from a stacking driver may | ||
314 | * translate into nothing if the underlying device does not | ||
315 | * advertise a write-back cache. In this case, simply | ||
316 | * complete the request. | ||
317 | */ | ||
318 | if (!policy) { | ||
319 | __blk_end_bidi_request(rq, 0, 0, 0); | ||
320 | return; | ||
321 | } | ||
322 | |||
323 | BUG_ON(!rq->bio || rq->bio != rq->biotail); | ||
324 | |||
325 | /* | ||
315 | * If there's data but flush is not necessary, the request can be | 326 | * If there's data but flush is not necessary, the request can be |
316 | * processed directly without going through flush machinery. Queue | 327 | * processed directly without going through flush machinery. Queue |
317 | * for normal execution. | 328 | * for normal execution. |
@@ -319,6 +330,7 @@ void blk_insert_flush(struct request *rq) | |||
319 | if ((policy & REQ_FSEQ_DATA) && | 330 | if ((policy & REQ_FSEQ_DATA) && |
320 | !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { | 331 | !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { |
321 | list_add_tail(&rq->queuelist, &q->queue_head); | 332 | list_add_tail(&rq->queuelist, &q->queue_head); |
333 | blk_run_queue_async(q); | ||
322 | return; | 334 | return; |
323 | } | 335 | } |
324 | 336 | ||
@@ -329,6 +341,7 @@ void blk_insert_flush(struct request *rq) | |||
329 | memset(&rq->flush, 0, sizeof(rq->flush)); | 341 | memset(&rq->flush, 0, sizeof(rq->flush)); |
330 | INIT_LIST_HEAD(&rq->flush.list); | 342 | INIT_LIST_HEAD(&rq->flush.list); |
331 | rq->cmd_flags |= REQ_FLUSH_SEQ; | 343 | rq->cmd_flags |= REQ_FLUSH_SEQ; |
344 | rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ | ||
332 | rq->end_io = flush_data_end_io; | 345 | rq->end_io = flush_data_end_io; |
333 | 346 | ||
334 | blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); | 347 | blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); |
diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 475fab809a80..58340d0cb23a 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c | |||
@@ -124,6 +124,14 @@ void __blk_complete_request(struct request *req) | |||
124 | } else | 124 | } else |
125 | ccpu = cpu; | 125 | ccpu = cpu; |
126 | 126 | ||
127 | /* | ||
128 | * If current CPU and requested CPU are in the same group, running | ||
129 | * softirq in current CPU. One might concern this is just like | ||
130 | * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is | ||
131 | * running in interrupt handler, and currently I/O controller doesn't | ||
132 | * support multiple interrupts, so current CPU is unique actually. This | ||
133 | * avoids IPI sending from current CPU to the first CPU of a group. | ||
134 | */ | ||
127 | if (ccpu == cpu || ccpu == group_cpu) { | 135 | if (ccpu == cpu || ccpu == group_cpu) { |
128 | struct list_head *list; | 136 | struct list_head *list; |
129 | do_local: | 137 | do_local: |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index f6a794120505..a19f58c6fc3a 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -746,7 +746,7 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg, | |||
746 | static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) | 746 | static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) |
747 | { | 747 | { |
748 | bool rw = bio_data_dir(bio); | 748 | bool rw = bio_data_dir(bio); |
749 | bool sync = bio->bi_rw & REQ_SYNC; | 749 | bool sync = rw_is_sync(bio->bi_rw); |
750 | 750 | ||
751 | /* Charge the bio to the group */ | 751 | /* Charge the bio to the group */ |
752 | tg->bytes_disp[rw] += bio->bi_size; | 752 | tg->bytes_disp[rw] += bio->bi_size; |
@@ -1150,7 +1150,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) | |||
1150 | 1150 | ||
1151 | if (tg_no_rule_group(tg, rw)) { | 1151 | if (tg_no_rule_group(tg, rw)) { |
1152 | blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, | 1152 | blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, |
1153 | rw, bio->bi_rw & REQ_SYNC); | 1153 | rw, rw_is_sync(bio->bi_rw)); |
1154 | rcu_read_unlock(); | 1154 | rcu_read_unlock(); |
1155 | return 0; | 1155 | return 0; |
1156 | } | 1156 | } |
diff --git a/block/blk.h b/block/blk.h index d6586287adc9..20b900a377c9 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -17,6 +17,8 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq, | |||
17 | struct bio *bio); | 17 | struct bio *bio); |
18 | void blk_dequeue_request(struct request *rq); | 18 | void blk_dequeue_request(struct request *rq); |
19 | void __blk_queue_free_tags(struct request_queue *q); | 19 | void __blk_queue_free_tags(struct request_queue *q); |
20 | bool __blk_end_bidi_request(struct request *rq, int error, | ||
21 | unsigned int nr_bytes, unsigned int bidi_bytes); | ||
20 | 22 | ||
21 | void blk_rq_timed_out_timer(unsigned long data); | 23 | void blk_rq_timed_out_timer(unsigned long data); |
22 | void blk_delete_timer(struct request *); | 24 | void blk_delete_timer(struct request *); |
diff --git a/block/bsg-lib.c b/block/bsg-lib.c new file mode 100644 index 000000000000..6690e6e41037 --- /dev/null +++ b/block/bsg-lib.c | |||
@@ -0,0 +1,298 @@ | |||
1 | /* | ||
2 | * BSG helper library | ||
3 | * | ||
4 | * Copyright (C) 2008 James Smart, Emulex Corporation | ||
5 | * Copyright (C) 2011 Red Hat, Inc. All rights reserved. | ||
6 | * Copyright (C) 2011 Mike Christie | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
21 | * | ||
22 | */ | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/blkdev.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/scatterlist.h> | ||
27 | #include <linux/bsg-lib.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <scsi/scsi_cmnd.h> | ||
30 | |||
31 | /** | ||
32 | * bsg_destroy_job - routine to teardown/delete a bsg job | ||
33 | * @job: bsg_job that is to be torn down | ||
34 | */ | ||
35 | static void bsg_destroy_job(struct bsg_job *job) | ||
36 | { | ||
37 | put_device(job->dev); /* release reference for the request */ | ||
38 | |||
39 | kfree(job->request_payload.sg_list); | ||
40 | kfree(job->reply_payload.sg_list); | ||
41 | kfree(job); | ||
42 | } | ||
43 | |||
44 | /** | ||
45 | * bsg_job_done - completion routine for bsg requests | ||
46 | * @job: bsg_job that is complete | ||
47 | * @result: job reply result | ||
48 | * @reply_payload_rcv_len: length of payload recvd | ||
49 | * | ||
50 | * The LLD should call this when the bsg job has completed. | ||
51 | */ | ||
52 | void bsg_job_done(struct bsg_job *job, int result, | ||
53 | unsigned int reply_payload_rcv_len) | ||
54 | { | ||
55 | struct request *req = job->req; | ||
56 | struct request *rsp = req->next_rq; | ||
57 | int err; | ||
58 | |||
59 | err = job->req->errors = result; | ||
60 | if (err < 0) | ||
61 | /* we're only returning the result field in the reply */ | ||
62 | job->req->sense_len = sizeof(u32); | ||
63 | else | ||
64 | job->req->sense_len = job->reply_len; | ||
65 | /* we assume all request payload was transferred, residual == 0 */ | ||
66 | req->resid_len = 0; | ||
67 | |||
68 | if (rsp) { | ||
69 | WARN_ON(reply_payload_rcv_len > rsp->resid_len); | ||
70 | |||
71 | /* set reply (bidi) residual */ | ||
72 | rsp->resid_len -= min(reply_payload_rcv_len, rsp->resid_len); | ||
73 | } | ||
74 | blk_complete_request(req); | ||
75 | } | ||
76 | EXPORT_SYMBOL_GPL(bsg_job_done); | ||
77 | |||
78 | /** | ||
79 | * bsg_softirq_done - softirq done routine for destroying the bsg requests | ||
80 | * @rq: BSG request that holds the job to be destroyed | ||
81 | */ | ||
82 | static void bsg_softirq_done(struct request *rq) | ||
83 | { | ||
84 | struct bsg_job *job = rq->special; | ||
85 | |||
86 | blk_end_request_all(rq, rq->errors); | ||
87 | bsg_destroy_job(job); | ||
88 | } | ||
89 | |||
90 | static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) | ||
91 | { | ||
92 | size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); | ||
93 | |||
94 | BUG_ON(!req->nr_phys_segments); | ||
95 | |||
96 | buf->sg_list = kzalloc(sz, GFP_KERNEL); | ||
97 | if (!buf->sg_list) | ||
98 | return -ENOMEM; | ||
99 | sg_init_table(buf->sg_list, req->nr_phys_segments); | ||
100 | buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); | ||
101 | buf->payload_len = blk_rq_bytes(req); | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | /** | ||
106 | * bsg_create_job - create the bsg_job structure for the bsg request | ||
107 | * @dev: device that is being sent the bsg request | ||
108 | * @req: BSG request that needs a job structure | ||
109 | */ | ||
110 | static int bsg_create_job(struct device *dev, struct request *req) | ||
111 | { | ||
112 | struct request *rsp = req->next_rq; | ||
113 | struct request_queue *q = req->q; | ||
114 | struct bsg_job *job; | ||
115 | int ret; | ||
116 | |||
117 | BUG_ON(req->special); | ||
118 | |||
119 | job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL); | ||
120 | if (!job) | ||
121 | return -ENOMEM; | ||
122 | |||
123 | req->special = job; | ||
124 | job->req = req; | ||
125 | if (q->bsg_job_size) | ||
126 | job->dd_data = (void *)&job[1]; | ||
127 | job->request = req->cmd; | ||
128 | job->request_len = req->cmd_len; | ||
129 | job->reply = req->sense; | ||
130 | job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer | ||
131 | * allocated */ | ||
132 | if (req->bio) { | ||
133 | ret = bsg_map_buffer(&job->request_payload, req); | ||
134 | if (ret) | ||
135 | goto failjob_rls_job; | ||
136 | } | ||
137 | if (rsp && rsp->bio) { | ||
138 | ret = bsg_map_buffer(&job->reply_payload, rsp); | ||
139 | if (ret) | ||
140 | goto failjob_rls_rqst_payload; | ||
141 | } | ||
142 | job->dev = dev; | ||
143 | /* take a reference for the request */ | ||
144 | get_device(job->dev); | ||
145 | return 0; | ||
146 | |||
147 | failjob_rls_rqst_payload: | ||
148 | kfree(job->request_payload.sg_list); | ||
149 | failjob_rls_job: | ||
150 | kfree(job); | ||
151 | return -ENOMEM; | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * bsg_goose_queue - restart queue in case it was stopped | ||
156 | * @q: request q to be restarted | ||
157 | */ | ||
158 | void bsg_goose_queue(struct request_queue *q) | ||
159 | { | ||
160 | if (!q) | ||
161 | return; | ||
162 | |||
163 | blk_run_queue_async(q); | ||
164 | } | ||
165 | EXPORT_SYMBOL_GPL(bsg_goose_queue); | ||
166 | |||
167 | /** | ||
168 | * bsg_request_fn - generic handler for bsg requests | ||
169 | * @q: request queue to manage | ||
170 | * | ||
171 | * On error the create_bsg_job function should return a -Exyz error value | ||
172 | * that will be set to the req->errors. | ||
173 | * | ||
174 | * Drivers/subsys should pass this to the queue init function. | ||
175 | */ | ||
176 | void bsg_request_fn(struct request_queue *q) | ||
177 | { | ||
178 | struct device *dev = q->queuedata; | ||
179 | struct request *req; | ||
180 | struct bsg_job *job; | ||
181 | int ret; | ||
182 | |||
183 | if (!get_device(dev)) | ||
184 | return; | ||
185 | |||
186 | while (1) { | ||
187 | req = blk_fetch_request(q); | ||
188 | if (!req) | ||
189 | break; | ||
190 | spin_unlock_irq(q->queue_lock); | ||
191 | |||
192 | ret = bsg_create_job(dev, req); | ||
193 | if (ret) { | ||
194 | req->errors = ret; | ||
195 | blk_end_request_all(req, ret); | ||
196 | spin_lock_irq(q->queue_lock); | ||
197 | continue; | ||
198 | } | ||
199 | |||
200 | job = req->special; | ||
201 | ret = q->bsg_job_fn(job); | ||
202 | spin_lock_irq(q->queue_lock); | ||
203 | if (ret) | ||
204 | break; | ||
205 | } | ||
206 | |||
207 | spin_unlock_irq(q->queue_lock); | ||
208 | put_device(dev); | ||
209 | spin_lock_irq(q->queue_lock); | ||
210 | } | ||
211 | EXPORT_SYMBOL_GPL(bsg_request_fn); | ||
212 | |||
213 | /** | ||
214 | * bsg_setup_queue - Create and add the bsg hooks so we can receive requests | ||
215 | * @dev: device to attach bsg device to | ||
216 | * @q: request queue setup by caller | ||
217 | * @name: device to give bsg device | ||
218 | * @job_fn: bsg job handler | ||
219 | * @dd_job_size: size of LLD data needed for each job | ||
220 | * | ||
221 | * The caller should have setup the reuqest queue with bsg_request_fn | ||
222 | * as the request_fn. | ||
223 | */ | ||
224 | int bsg_setup_queue(struct device *dev, struct request_queue *q, | ||
225 | char *name, bsg_job_fn *job_fn, int dd_job_size) | ||
226 | { | ||
227 | int ret; | ||
228 | |||
229 | q->queuedata = dev; | ||
230 | q->bsg_job_size = dd_job_size; | ||
231 | q->bsg_job_fn = job_fn; | ||
232 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); | ||
233 | blk_queue_softirq_done(q, bsg_softirq_done); | ||
234 | blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); | ||
235 | |||
236 | ret = bsg_register_queue(q, dev, name, NULL); | ||
237 | if (ret) { | ||
238 | printk(KERN_ERR "%s: bsg interface failed to " | ||
239 | "initialize - register queue\n", dev->kobj.name); | ||
240 | return ret; | ||
241 | } | ||
242 | |||
243 | return 0; | ||
244 | } | ||
245 | EXPORT_SYMBOL_GPL(bsg_setup_queue); | ||
246 | |||
247 | /** | ||
248 | * bsg_remove_queue - Deletes the bsg dev from the q | ||
249 | * @q: the request_queue that is to be torn down. | ||
250 | * | ||
251 | * Notes: | ||
252 | * Before unregistering the queue empty any requests that are blocked | ||
253 | */ | ||
254 | void bsg_remove_queue(struct request_queue *q) | ||
255 | { | ||
256 | struct request *req; /* block request */ | ||
257 | int counts; /* totals for request_list count and starved */ | ||
258 | |||
259 | if (!q) | ||
260 | return; | ||
261 | |||
262 | /* Stop taking in new requests */ | ||
263 | spin_lock_irq(q->queue_lock); | ||
264 | blk_stop_queue(q); | ||
265 | |||
266 | /* drain all requests in the queue */ | ||
267 | while (1) { | ||
268 | /* need the lock to fetch a request | ||
269 | * this may fetch the same reqeust as the previous pass | ||
270 | */ | ||
271 | req = blk_fetch_request(q); | ||
272 | /* save requests in use and starved */ | ||
273 | counts = q->rq.count[0] + q->rq.count[1] + | ||
274 | q->rq.starved[0] + q->rq.starved[1]; | ||
275 | spin_unlock_irq(q->queue_lock); | ||
276 | /* any requests still outstanding? */ | ||
277 | if (counts == 0) | ||
278 | break; | ||
279 | |||
280 | /* This may be the same req as the previous iteration, | ||
281 | * always send the blk_end_request_all after a prefetch. | ||
282 | * It is not okay to not end the request because the | ||
283 | * prefetch started the request. | ||
284 | */ | ||
285 | if (req) { | ||
286 | /* return -ENXIO to indicate that this queue is | ||
287 | * going away | ||
288 | */ | ||
289 | req->errors = -ENXIO; | ||
290 | blk_end_request_all(req, -ENXIO); | ||
291 | } | ||
292 | |||
293 | msleep(200); /* allow bsg to possibly finish */ | ||
294 | spin_lock_irq(q->queue_lock); | ||
295 | } | ||
296 | bsg_unregister_queue(q); | ||
297 | } | ||
298 | EXPORT_SYMBOL_GPL(bsg_remove_queue); | ||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 1f96ad6254f1..a33bd4377c61 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -130,6 +130,8 @@ struct cfq_queue { | |||
130 | unsigned long slice_end; | 130 | unsigned long slice_end; |
131 | long slice_resid; | 131 | long slice_resid; |
132 | 132 | ||
133 | /* pending metadata requests */ | ||
134 | int meta_pending; | ||
133 | /* number of requests that are on the dispatch list or inside driver */ | 135 | /* number of requests that are on the dispatch list or inside driver */ |
134 | int dispatched; | 136 | int dispatched; |
135 | 137 | ||
@@ -682,6 +684,9 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, | |||
682 | if (rq_is_sync(rq1) != rq_is_sync(rq2)) | 684 | if (rq_is_sync(rq1) != rq_is_sync(rq2)) |
683 | return rq_is_sync(rq1) ? rq1 : rq2; | 685 | return rq_is_sync(rq1) ? rq1 : rq2; |
684 | 686 | ||
687 | if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_META) | ||
688 | return rq1->cmd_flags & REQ_META ? rq1 : rq2; | ||
689 | |||
685 | s1 = blk_rq_pos(rq1); | 690 | s1 = blk_rq_pos(rq1); |
686 | s2 = blk_rq_pos(rq2); | 691 | s2 = blk_rq_pos(rq2); |
687 | 692 | ||
@@ -1209,6 +1214,9 @@ static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
1209 | 1214 | ||
1210 | hlist_del_init(&cfqg->cfqd_node); | 1215 | hlist_del_init(&cfqg->cfqd_node); |
1211 | 1216 | ||
1217 | BUG_ON(cfqd->nr_blkcg_linked_grps <= 0); | ||
1218 | cfqd->nr_blkcg_linked_grps--; | ||
1219 | |||
1212 | /* | 1220 | /* |
1213 | * Put the reference taken at the time of creation so that when all | 1221 | * Put the reference taken at the time of creation so that when all |
1214 | * queues are gone, group can be destroyed. | 1222 | * queues are gone, group can be destroyed. |
@@ -1604,6 +1612,10 @@ static void cfq_remove_request(struct request *rq) | |||
1604 | cfqq->cfqd->rq_queued--; | 1612 | cfqq->cfqd->rq_queued--; |
1605 | cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, | 1613 | cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, |
1606 | rq_data_dir(rq), rq_is_sync(rq)); | 1614 | rq_data_dir(rq), rq_is_sync(rq)); |
1615 | if (rq->cmd_flags & REQ_META) { | ||
1616 | WARN_ON(!cfqq->meta_pending); | ||
1617 | cfqq->meta_pending--; | ||
1618 | } | ||
1607 | } | 1619 | } |
1608 | 1620 | ||
1609 | static int cfq_merge(struct request_queue *q, struct request **req, | 1621 | static int cfq_merge(struct request_queue *q, struct request **req, |
@@ -3357,6 +3369,13 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
3357 | return true; | 3369 | return true; |
3358 | 3370 | ||
3359 | /* | 3371 | /* |
3372 | * So both queues are sync. Let the new request get disk time if | ||
3373 | * it's a metadata request and the current queue is doing regular IO. | ||
3374 | */ | ||
3375 | if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending) | ||
3376 | return true; | ||
3377 | |||
3378 | /* | ||
3360 | * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. | 3379 | * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. |
3361 | */ | 3380 | */ |
3362 | if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) | 3381 | if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) |
@@ -3420,6 +3439,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
3420 | struct cfq_io_context *cic = RQ_CIC(rq); | 3439 | struct cfq_io_context *cic = RQ_CIC(rq); |
3421 | 3440 | ||
3422 | cfqd->rq_queued++; | 3441 | cfqd->rq_queued++; |
3442 | if (rq->cmd_flags & REQ_META) | ||
3443 | cfqq->meta_pending++; | ||
3423 | 3444 | ||
3424 | cfq_update_io_thinktime(cfqd, cfqq, cic); | 3445 | cfq_update_io_thinktime(cfqd, cfqq, cic); |
3425 | cfq_update_io_seektime(cfqd, cfqq, rq); | 3446 | cfq_update_io_seektime(cfqd, cfqq, rq); |
diff --git a/block/genhd.c b/block/genhd.c index 5cb51c55f6d8..e2f67902dd02 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -1146,17 +1146,17 @@ static int diskstats_show(struct seq_file *seqf, void *v) | |||
1146 | cpu = part_stat_lock(); | 1146 | cpu = part_stat_lock(); |
1147 | part_round_stats(cpu, hd); | 1147 | part_round_stats(cpu, hd); |
1148 | part_stat_unlock(); | 1148 | part_stat_unlock(); |
1149 | seq_printf(seqf, "%4d %7d %s %lu %lu %llu " | 1149 | seq_printf(seqf, "%4d %7d %s %lu %lu %lu " |
1150 | "%u %lu %lu %llu %u %u %u %u\n", | 1150 | "%u %lu %lu %lu %u %u %u %u\n", |
1151 | MAJOR(part_devt(hd)), MINOR(part_devt(hd)), | 1151 | MAJOR(part_devt(hd)), MINOR(part_devt(hd)), |
1152 | disk_name(gp, hd->partno, buf), | 1152 | disk_name(gp, hd->partno, buf), |
1153 | part_stat_read(hd, ios[READ]), | 1153 | part_stat_read(hd, ios[READ]), |
1154 | part_stat_read(hd, merges[READ]), | 1154 | part_stat_read(hd, merges[READ]), |
1155 | (unsigned long long)part_stat_read(hd, sectors[READ]), | 1155 | part_stat_read(hd, sectors[READ]), |
1156 | jiffies_to_msecs(part_stat_read(hd, ticks[READ])), | 1156 | jiffies_to_msecs(part_stat_read(hd, ticks[READ])), |
1157 | part_stat_read(hd, ios[WRITE]), | 1157 | part_stat_read(hd, ios[WRITE]), |
1158 | part_stat_read(hd, merges[WRITE]), | 1158 | part_stat_read(hd, merges[WRITE]), |
1159 | (unsigned long long)part_stat_read(hd, sectors[WRITE]), | 1159 | part_stat_read(hd, sectors[WRITE]), |
1160 | jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])), | 1160 | jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])), |
1161 | part_in_flight(hd), | 1161 | part_in_flight(hd), |
1162 | jiffies_to_msecs(part_stat_read(hd, io_ticks)), | 1162 | jiffies_to_msecs(part_stat_read(hd, io_ticks)), |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index ca3e6be44a04..5987e0ba8c2d 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -468,6 +468,15 @@ config PATA_ICSIDE | |||
468 | interface card. This is not required for ICS partition support. | 468 | interface card. This is not required for ICS partition support. |
469 | If you are unsure, say N to this. | 469 | If you are unsure, say N to this. |
470 | 470 | ||
471 | config PATA_IMX | ||
472 | tristate "PATA support for Freescale iMX" | ||
473 | depends on ARCH_MXC | ||
474 | help | ||
475 | This option enables support for the PATA host available on Freescale | ||
476 | iMX SoCs. | ||
477 | |||
478 | If unsure, say N. | ||
479 | |||
471 | config PATA_IT8213 | 480 | config PATA_IT8213 |
472 | tristate "IT8213 PATA support (Experimental)" | 481 | tristate "IT8213 PATA support (Experimental)" |
473 | depends on PCI && EXPERIMENTAL | 482 | depends on PCI && EXPERIMENTAL |
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 8ac64e1aa051..9550d691fd19 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile | |||
@@ -48,6 +48,7 @@ obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o | |||
48 | obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o | 48 | obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o |
49 | obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o | 49 | obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o |
50 | obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o | 50 | obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o |
51 | obj-$(CONFIG_PATA_IMX) += pata_imx.o | ||
51 | obj-$(CONFIG_PATA_IT8213) += pata_it8213.o | 52 | obj-$(CONFIG_PATA_IT8213) += pata_it8213.o |
52 | obj-$(CONFIG_PATA_IT821X) += pata_it821x.o | 53 | obj-$(CONFIG_PATA_IT821X) += pata_it821x.o |
53 | obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o | 54 | obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o |
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c new file mode 100644 index 000000000000..ca9d9caedfa3 --- /dev/null +++ b/drivers/ata/pata_imx.c | |||
@@ -0,0 +1,253 @@ | |||
1 | /* | ||
2 | * Freescale iMX PATA driver | ||
3 | * | ||
4 | * Copyright (C) 2011 Arnaud Patard <arnaud.patard@rtp-net.org> | ||
5 | * | ||
6 | * Based on pata_platform - Copyright (C) 2006 - 2007 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | * | ||
12 | * TODO: | ||
13 | * - dmaengine support | ||
14 | * - check if timing stuff needed | ||
15 | */ | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/blkdev.h> | ||
20 | #include <scsi/scsi_host.h> | ||
21 | #include <linux/ata.h> | ||
22 | #include <linux/libata.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/clk.h> | ||
25 | |||
26 | #define DRV_NAME "pata_imx" | ||
27 | |||
28 | #define PATA_IMX_ATA_CONTROL 0x24 | ||
29 | #define PATA_IMX_ATA_CTRL_FIFO_RST_B (1<<7) | ||
30 | #define PATA_IMX_ATA_CTRL_ATA_RST_B (1<<6) | ||
31 | #define PATA_IMX_ATA_CTRL_IORDY_EN (1<<0) | ||
32 | #define PATA_IMX_ATA_INT_EN 0x2C | ||
33 | #define PATA_IMX_ATA_INTR_ATA_INTRQ2 (1<<3) | ||
34 | #define PATA_IMX_DRIVE_DATA 0xA0 | ||
35 | #define PATA_IMX_DRIVE_CONTROL 0xD8 | ||
36 | |||
37 | struct pata_imx_priv { | ||
38 | struct clk *clk; | ||
39 | /* timings/interrupt/control regs */ | ||
40 | u8 *host_regs; | ||
41 | u32 ata_ctl; | ||
42 | }; | ||
43 | |||
44 | static int pata_imx_set_mode(struct ata_link *link, struct ata_device **unused) | ||
45 | { | ||
46 | struct ata_device *dev; | ||
47 | struct ata_port *ap = link->ap; | ||
48 | struct pata_imx_priv *priv = ap->host->private_data; | ||
49 | u32 val; | ||
50 | |||
51 | ata_for_each_dev(dev, link, ENABLED) { | ||
52 | dev->pio_mode = dev->xfer_mode = XFER_PIO_0; | ||
53 | dev->xfer_shift = ATA_SHIFT_PIO; | ||
54 | dev->flags |= ATA_DFLAG_PIO; | ||
55 | |||
56 | val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL); | ||
57 | if (ata_pio_need_iordy(dev)) | ||
58 | val |= PATA_IMX_ATA_CTRL_IORDY_EN; | ||
59 | else | ||
60 | val &= ~PATA_IMX_ATA_CTRL_IORDY_EN; | ||
61 | __raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL); | ||
62 | |||
63 | ata_dev_printk(dev, KERN_INFO, "configured for PIO\n"); | ||
64 | } | ||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | static struct scsi_host_template pata_imx_sht = { | ||
69 | ATA_PIO_SHT(DRV_NAME), | ||
70 | }; | ||
71 | |||
72 | static struct ata_port_operations pata_imx_port_ops = { | ||
73 | .inherits = &ata_sff_port_ops, | ||
74 | .sff_data_xfer = ata_sff_data_xfer_noirq, | ||
75 | .cable_detect = ata_cable_unknown, | ||
76 | .set_mode = pata_imx_set_mode, | ||
77 | }; | ||
78 | |||
79 | static void pata_imx_setup_port(struct ata_ioports *ioaddr) | ||
80 | { | ||
81 | /* Fixup the port shift for platforms that need it */ | ||
82 | ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << 2); | ||
83 | ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << 2); | ||
84 | ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << 2); | ||
85 | ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << 2); | ||
86 | ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << 2); | ||
87 | ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << 2); | ||
88 | ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << 2); | ||
89 | ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << 2); | ||
90 | ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << 2); | ||
91 | ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2); | ||
92 | } | ||
93 | |||
94 | static int __devinit pata_imx_probe(struct platform_device *pdev) | ||
95 | { | ||
96 | struct ata_host *host; | ||
97 | struct ata_port *ap; | ||
98 | struct pata_imx_priv *priv; | ||
99 | int irq = 0; | ||
100 | struct resource *io_res; | ||
101 | |||
102 | io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
103 | if (io_res == NULL) | ||
104 | return -EINVAL; | ||
105 | |||
106 | irq = platform_get_irq(pdev, 0); | ||
107 | if (irq <= 0) | ||
108 | return -EINVAL; | ||
109 | |||
110 | priv = devm_kzalloc(&pdev->dev, | ||
111 | sizeof(struct pata_imx_priv), GFP_KERNEL); | ||
112 | if (!priv) | ||
113 | return -ENOMEM; | ||
114 | |||
115 | priv->clk = clk_get(&pdev->dev, NULL); | ||
116 | if (IS_ERR(priv->clk)) { | ||
117 | dev_err(&pdev->dev, "Failed to get clock\n"); | ||
118 | return PTR_ERR(priv->clk); | ||
119 | } | ||
120 | |||
121 | clk_enable(priv->clk); | ||
122 | |||
123 | host = ata_host_alloc(&pdev->dev, 1); | ||
124 | if (!host) | ||
125 | goto free_priv; | ||
126 | |||
127 | host->private_data = priv; | ||
128 | ap = host->ports[0]; | ||
129 | |||
130 | ap->ops = &pata_imx_port_ops; | ||
131 | ap->pio_mask = ATA_PIO0; | ||
132 | ap->flags |= ATA_FLAG_SLAVE_POSS; | ||
133 | |||
134 | priv->host_regs = devm_ioremap(&pdev->dev, io_res->start, | ||
135 | resource_size(io_res)); | ||
136 | if (!priv->host_regs) { | ||
137 | dev_err(&pdev->dev, "failed to map IO/CTL base\n"); | ||
138 | goto free_priv; | ||
139 | } | ||
140 | |||
141 | ap->ioaddr.cmd_addr = priv->host_regs + PATA_IMX_DRIVE_DATA; | ||
142 | ap->ioaddr.ctl_addr = priv->host_regs + PATA_IMX_DRIVE_CONTROL; | ||
143 | |||
144 | ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; | ||
145 | |||
146 | pata_imx_setup_port(&ap->ioaddr); | ||
147 | |||
148 | ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", | ||
149 | (unsigned long long)io_res->start + PATA_IMX_DRIVE_DATA, | ||
150 | (unsigned long long)io_res->start + PATA_IMX_DRIVE_CONTROL); | ||
151 | |||
152 | /* deassert resets */ | ||
153 | __raw_writel(PATA_IMX_ATA_CTRL_FIFO_RST_B | | ||
154 | PATA_IMX_ATA_CTRL_ATA_RST_B, | ||
155 | priv->host_regs + PATA_IMX_ATA_CONTROL); | ||
156 | /* enable interrupts */ | ||
157 | __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2, | ||
158 | priv->host_regs + PATA_IMX_ATA_INT_EN); | ||
159 | |||
160 | /* activate */ | ||
161 | return ata_host_activate(host, irq, ata_sff_interrupt, 0, | ||
162 | &pata_imx_sht); | ||
163 | |||
164 | free_priv: | ||
165 | clk_disable(priv->clk); | ||
166 | clk_put(priv->clk); | ||
167 | return -ENOMEM; | ||
168 | } | ||
169 | |||
170 | static int __devexit pata_imx_remove(struct platform_device *pdev) | ||
171 | { | ||
172 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
173 | struct pata_imx_priv *priv = host->private_data; | ||
174 | |||
175 | ata_host_detach(host); | ||
176 | |||
177 | __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN); | ||
178 | |||
179 | clk_disable(priv->clk); | ||
180 | clk_put(priv->clk); | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | #ifdef CONFIG_PM | ||
186 | static int pata_imx_suspend(struct device *dev) | ||
187 | { | ||
188 | struct ata_host *host = dev_get_drvdata(dev); | ||
189 | struct pata_imx_priv *priv = host->private_data; | ||
190 | int ret; | ||
191 | |||
192 | ret = ata_host_suspend(host, PMSG_SUSPEND); | ||
193 | if (!ret) { | ||
194 | __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN); | ||
195 | priv->ata_ctl = | ||
196 | __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL); | ||
197 | clk_disable(priv->clk); | ||
198 | } | ||
199 | |||
200 | return ret; | ||
201 | } | ||
202 | |||
203 | static int pata_imx_resume(struct device *dev) | ||
204 | { | ||
205 | struct ata_host *host = dev_get_drvdata(dev); | ||
206 | struct pata_imx_priv *priv = host->private_data; | ||
207 | |||
208 | clk_enable(priv->clk); | ||
209 | |||
210 | __raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL); | ||
211 | |||
212 | __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2, | ||
213 | priv->host_regs + PATA_IMX_ATA_INT_EN); | ||
214 | |||
215 | ata_host_resume(host); | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | static const struct dev_pm_ops pata_imx_pm_ops = { | ||
221 | .suspend = pata_imx_suspend, | ||
222 | .resume = pata_imx_resume, | ||
223 | }; | ||
224 | #endif | ||
225 | |||
226 | static struct platform_driver pata_imx_driver = { | ||
227 | .probe = pata_imx_probe, | ||
228 | .remove = __devexit_p(pata_imx_remove), | ||
229 | .driver = { | ||
230 | .name = DRV_NAME, | ||
231 | .owner = THIS_MODULE, | ||
232 | #ifdef CONFIG_PM | ||
233 | .pm = &pata_imx_pm_ops, | ||
234 | #endif | ||
235 | }, | ||
236 | }; | ||
237 | |||
238 | static int __init pata_imx_init(void) | ||
239 | { | ||
240 | return platform_driver_register(&pata_imx_driver); | ||
241 | } | ||
242 | |||
243 | static void __exit pata_imx_exit(void) | ||
244 | { | ||
245 | platform_driver_unregister(&pata_imx_driver); | ||
246 | } | ||
247 | module_init(pata_imx_init); | ||
248 | module_exit(pata_imx_exit); | ||
249 | |||
250 | MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>"); | ||
251 | MODULE_DESCRIPTION("low-level driver for iMX PATA"); | ||
252 | MODULE_LICENSE("GPL"); | ||
253 | MODULE_ALIAS("platform:" DRV_NAME); | ||
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 65e4be6be220..8e9f5048a10a 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c | |||
@@ -124,6 +124,17 @@ static const struct via_isa_bridge { | |||
124 | { NULL } | 124 | { NULL } |
125 | }; | 125 | }; |
126 | 126 | ||
127 | static const struct dmi_system_id no_atapi_dma_dmi_table[] = { | ||
128 | { | ||
129 | .ident = "AVERATEC 3200", | ||
130 | .matches = { | ||
131 | DMI_MATCH(DMI_BOARD_VENDOR, "AVERATEC"), | ||
132 | DMI_MATCH(DMI_BOARD_NAME, "3200"), | ||
133 | }, | ||
134 | }, | ||
135 | { } | ||
136 | }; | ||
137 | |||
127 | struct via_port { | 138 | struct via_port { |
128 | u8 cached_device; | 139 | u8 cached_device; |
129 | }; | 140 | }; |
@@ -355,6 +366,13 @@ static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask) | |||
355 | mask &= ~ ATA_MASK_UDMA; | 366 | mask &= ~ ATA_MASK_UDMA; |
356 | } | 367 | } |
357 | } | 368 | } |
369 | |||
370 | if (dev->class == ATA_DEV_ATAPI && | ||
371 | dmi_check_system(no_atapi_dma_dmi_table)) { | ||
372 | ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n"); | ||
373 | mask &= ATA_MASK_PIO; | ||
374 | } | ||
375 | |||
358 | return mask; | 376 | return mask; |
359 | } | 377 | } |
360 | 378 | ||
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index 0a9a774a7e1e..5c4237452f50 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c | |||
@@ -1329,7 +1329,7 @@ static int sata_dwc_port_start(struct ata_port *ap) | |||
1329 | dev_err(ap->dev, "%s: dma_alloc_coherent failed\n", | 1329 | dev_err(ap->dev, "%s: dma_alloc_coherent failed\n", |
1330 | __func__); | 1330 | __func__); |
1331 | err = -ENOMEM; | 1331 | err = -ENOMEM; |
1332 | goto CLEANUP; | 1332 | goto CLEANUP_ALLOC; |
1333 | } | 1333 | } |
1334 | } | 1334 | } |
1335 | 1335 | ||
@@ -1349,15 +1349,13 @@ static int sata_dwc_port_start(struct ata_port *ap) | |||
1349 | /* Clear any error bits before libata starts issuing commands */ | 1349 | /* Clear any error bits before libata starts issuing commands */ |
1350 | clear_serror(); | 1350 | clear_serror(); |
1351 | ap->private_data = hsdevp; | 1351 | ap->private_data = hsdevp; |
1352 | dev_dbg(ap->dev, "%s: done\n", __func__); | ||
1353 | return 0; | ||
1352 | 1354 | ||
1355 | CLEANUP_ALLOC: | ||
1356 | kfree(hsdevp); | ||
1353 | CLEANUP: | 1357 | CLEANUP: |
1354 | if (err) { | 1358 | dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id); |
1355 | sata_dwc_port_stop(ap); | ||
1356 | dev_dbg(ap->dev, "%s: fail\n", __func__); | ||
1357 | } else { | ||
1358 | dev_dbg(ap->dev, "%s: done\n", __func__); | ||
1359 | } | ||
1360 | |||
1361 | return err; | 1359 | return err; |
1362 | } | 1360 | } |
1363 | 1361 | ||
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 98c1d780f552..9dfb40b8c2c9 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c | |||
@@ -438,7 +438,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2) | |||
438 | u8 status; | 438 | u8 status; |
439 | 439 | ||
440 | if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { | 440 | if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { |
441 | u32 serror; | 441 | u32 serror = 0xffffffff; |
442 | 442 | ||
443 | /* SIEN doesn't mask SATA IRQs on some 3112s. Those | 443 | /* SIEN doesn't mask SATA IRQs on some 3112s. Those |
444 | * controllers continue to assert IRQ as long as | 444 | * controllers continue to assert IRQ as long as |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index bbb03e6f7255..06ed6b4e7df5 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -521,11 +521,6 @@ static int _request_firmware(const struct firmware **firmware_p, | |||
521 | if (!firmware_p) | 521 | if (!firmware_p) |
522 | return -EINVAL; | 522 | return -EINVAL; |
523 | 523 | ||
524 | if (WARN_ON(usermodehelper_is_disabled())) { | ||
525 | dev_err(device, "firmware: %s will not be loaded\n", name); | ||
526 | return -EBUSY; | ||
527 | } | ||
528 | |||
529 | *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); | 524 | *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); |
530 | if (!firmware) { | 525 | if (!firmware) { |
531 | dev_err(device, "%s: kmalloc(struct firmware) failed\n", | 526 | dev_err(device, "%s: kmalloc(struct firmware) failed\n", |
@@ -539,6 +534,12 @@ static int _request_firmware(const struct firmware **firmware_p, | |||
539 | return 0; | 534 | return 0; |
540 | } | 535 | } |
541 | 536 | ||
537 | if (WARN_ON(usermodehelper_is_disabled())) { | ||
538 | dev_err(device, "firmware: %s will not be loaded\n", name); | ||
539 | retval = -EBUSY; | ||
540 | goto out; | ||
541 | } | ||
542 | |||
542 | if (uevent) | 543 | if (uevent) |
543 | dev_dbg(device, "firmware: requesting %s\n", name); | 544 | dev_dbg(device, "firmware: requesting %s\n", name); |
544 | 545 | ||
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index e18566a0fedd..1c374579407c 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -460,6 +460,21 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
460 | return 0; | 460 | return 0; |
461 | } | 461 | } |
462 | 462 | ||
463 | /** | ||
464 | * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. | ||
465 | */ | ||
466 | void pm_genpd_poweroff_unused(void) | ||
467 | { | ||
468 | struct generic_pm_domain *genpd; | ||
469 | |||
470 | mutex_lock(&gpd_list_lock); | ||
471 | |||
472 | list_for_each_entry(genpd, &gpd_list, gpd_list_node) | ||
473 | genpd_queue_power_off_work(genpd); | ||
474 | |||
475 | mutex_unlock(&gpd_list_lock); | ||
476 | } | ||
477 | |||
463 | #else | 478 | #else |
464 | 479 | ||
465 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} | 480 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} |
@@ -1255,18 +1270,3 @@ void pm_genpd_init(struct generic_pm_domain *genpd, | |||
1255 | list_add(&genpd->gpd_list_node, &gpd_list); | 1270 | list_add(&genpd->gpd_list_node, &gpd_list); |
1256 | mutex_unlock(&gpd_list_lock); | 1271 | mutex_unlock(&gpd_list_lock); |
1257 | } | 1272 | } |
1258 | |||
1259 | /** | ||
1260 | * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. | ||
1261 | */ | ||
1262 | void pm_genpd_poweroff_unused(void) | ||
1263 | { | ||
1264 | struct generic_pm_domain *genpd; | ||
1265 | |||
1266 | mutex_lock(&gpd_list_lock); | ||
1267 | |||
1268 | list_for_each_entry(genpd, &gpd_list, gpd_list_node) | ||
1269 | genpd_queue_power_off_work(genpd); | ||
1270 | |||
1271 | mutex_unlock(&gpd_list_lock); | ||
1272 | } | ||
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index c2231ff06cbc..c4f7a45cd2c3 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c | |||
@@ -113,3 +113,4 @@ struct regmap *regmap_init_i2c(struct i2c_client *i2c, | |||
113 | } | 113 | } |
114 | EXPORT_SYMBOL_GPL(regmap_init_i2c); | 114 | EXPORT_SYMBOL_GPL(regmap_init_i2c); |
115 | 115 | ||
116 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c index 4deba0621bc7..f8396945d6ed 100644 --- a/drivers/base/regmap/regmap-spi.c +++ b/drivers/base/regmap/regmap-spi.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/regmap.h> | 13 | #include <linux/regmap.h> |
14 | #include <linux/spi/spi.h> | 14 | #include <linux/spi/spi.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/module.h> | ||
16 | 17 | ||
17 | static int regmap_spi_write(struct device *dev, const void *data, size_t count) | 18 | static int regmap_spi_write(struct device *dev, const void *data, size_t count) |
18 | { | 19 | { |
@@ -70,3 +71,5 @@ struct regmap *regmap_init_spi(struct spi_device *spi, | |||
70 | return regmap_init(&spi->dev, ®map_spi, config); | 71 | return regmap_init(&spi->dev, ®map_spi, config); |
71 | } | 72 | } |
72 | EXPORT_SYMBOL_GPL(regmap_init_spi); | 73 | EXPORT_SYMBOL_GPL(regmap_init_spi); |
74 | |||
75 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index cf3565cae93d..0eef4da1ac61 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
@@ -317,7 +317,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, | |||
317 | u8[0] |= map->bus->read_flag_mask; | 317 | u8[0] |= map->bus->read_flag_mask; |
318 | 318 | ||
319 | ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes, | 319 | ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes, |
320 | val, map->format.val_bytes); | 320 | val, val_len); |
321 | if (ret != 0) | 321 | if (ret != 0) |
322 | return ret; | 322 | return ret; |
323 | 323 | ||
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 717d6e4e18d3..6f07ec1c2f58 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -256,6 +256,21 @@ config BLK_DEV_LOOP | |||
256 | 256 | ||
257 | Most users will answer N here. | 257 | Most users will answer N here. |
258 | 258 | ||
259 | config BLK_DEV_LOOP_MIN_COUNT | ||
260 | int "Number of loop devices to pre-create at init time" | ||
261 | depends on BLK_DEV_LOOP | ||
262 | default 8 | ||
263 | help | ||
264 | Static number of loop devices to be unconditionally pre-created | ||
265 | at init time. | ||
266 | |||
267 | This default value can be overwritten on the kernel command | ||
268 | line or with module-parameter loop.max_loop. | ||
269 | |||
270 | The historic default is 8. If a late 2011 version of losetup(8) | ||
271 | is used, it can be set to 0, since needed loop devices can be | ||
272 | dynamically allocated with the /dev/loop-control interface. | ||
273 | |||
259 | config BLK_DEV_CRYPTOLOOP | 274 | config BLK_DEV_CRYPTOLOOP |
260 | tristate "Cryptoloop Support" | 275 | tristate "Cryptoloop Support" |
261 | select CRYPTO | 276 | select CRYPTO |
@@ -471,7 +486,7 @@ config XEN_BLKDEV_FRONTEND | |||
471 | in another domain which drives the actual block device. | 486 | in another domain which drives the actual block device. |
472 | 487 | ||
473 | config XEN_BLKDEV_BACKEND | 488 | config XEN_BLKDEV_BACKEND |
474 | tristate "Block-device backend driver" | 489 | tristate "Xen block-device backend driver" |
475 | depends on XEN_BACKEND | 490 | depends on XEN_BACKEND |
476 | help | 491 | help |
477 | The block-device backend driver allows the kernel to export its | 492 | The block-device backend driver allows the kernel to export its |
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 515bcd948a43..0feab261e295 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
@@ -1829,10 +1829,10 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n | |||
1829 | 1829 | ||
1830 | /* silently ignore cpu mask on UP kernel */ | 1830 | /* silently ignore cpu mask on UP kernel */ |
1831 | if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { | 1831 | if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { |
1832 | err = __bitmap_parse(sc.cpu_mask, 32, 0, | 1832 | err = bitmap_parse(sc.cpu_mask, 32, |
1833 | cpumask_bits(new_cpu_mask), nr_cpu_ids); | 1833 | cpumask_bits(new_cpu_mask), nr_cpu_ids); |
1834 | if (err) { | 1834 | if (err) { |
1835 | dev_warn(DEV, "__bitmap_parse() failed with %d\n", err); | 1835 | dev_warn(DEV, "bitmap_parse() failed with %d\n", err); |
1836 | retcode = ERR_CPU_MASK_PARSE; | 1836 | retcode = ERR_CPU_MASK_PARSE; |
1837 | goto fail; | 1837 | goto fail; |
1838 | } | 1838 | } |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 76c8da78212b..4720c7ade0ae 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -75,11 +75,11 @@ | |||
75 | #include <linux/kthread.h> | 75 | #include <linux/kthread.h> |
76 | #include <linux/splice.h> | 76 | #include <linux/splice.h> |
77 | #include <linux/sysfs.h> | 77 | #include <linux/sysfs.h> |
78 | 78 | #include <linux/miscdevice.h> | |
79 | #include <asm/uaccess.h> | 79 | #include <asm/uaccess.h> |
80 | 80 | ||
81 | static LIST_HEAD(loop_devices); | 81 | static DEFINE_IDR(loop_index_idr); |
82 | static DEFINE_MUTEX(loop_devices_mutex); | 82 | static DEFINE_MUTEX(loop_index_mutex); |
83 | 83 | ||
84 | static int max_part; | 84 | static int max_part; |
85 | static int part_shift; | 85 | static int part_shift; |
@@ -722,17 +722,10 @@ static inline int is_loop_device(struct file *file) | |||
722 | static ssize_t loop_attr_show(struct device *dev, char *page, | 722 | static ssize_t loop_attr_show(struct device *dev, char *page, |
723 | ssize_t (*callback)(struct loop_device *, char *)) | 723 | ssize_t (*callback)(struct loop_device *, char *)) |
724 | { | 724 | { |
725 | struct loop_device *l, *lo = NULL; | 725 | struct gendisk *disk = dev_to_disk(dev); |
726 | 726 | struct loop_device *lo = disk->private_data; | |
727 | mutex_lock(&loop_devices_mutex); | ||
728 | list_for_each_entry(l, &loop_devices, lo_list) | ||
729 | if (disk_to_dev(l->lo_disk) == dev) { | ||
730 | lo = l; | ||
731 | break; | ||
732 | } | ||
733 | mutex_unlock(&loop_devices_mutex); | ||
734 | 727 | ||
735 | return lo ? callback(lo, page) : -EIO; | 728 | return callback(lo, page); |
736 | } | 729 | } |
737 | 730 | ||
738 | #define LOOP_ATTR_RO(_name) \ | 731 | #define LOOP_ATTR_RO(_name) \ |
@@ -750,10 +743,10 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) | |||
750 | ssize_t ret; | 743 | ssize_t ret; |
751 | char *p = NULL; | 744 | char *p = NULL; |
752 | 745 | ||
753 | mutex_lock(&lo->lo_ctl_mutex); | 746 | spin_lock_irq(&lo->lo_lock); |
754 | if (lo->lo_backing_file) | 747 | if (lo->lo_backing_file) |
755 | p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1); | 748 | p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1); |
756 | mutex_unlock(&lo->lo_ctl_mutex); | 749 | spin_unlock_irq(&lo->lo_lock); |
757 | 750 | ||
758 | if (IS_ERR_OR_NULL(p)) | 751 | if (IS_ERR_OR_NULL(p)) |
759 | ret = PTR_ERR(p); | 752 | ret = PTR_ERR(p); |
@@ -1007,7 +1000,9 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) | |||
1007 | 1000 | ||
1008 | kthread_stop(lo->lo_thread); | 1001 | kthread_stop(lo->lo_thread); |
1009 | 1002 | ||
1003 | spin_lock_irq(&lo->lo_lock); | ||
1010 | lo->lo_backing_file = NULL; | 1004 | lo->lo_backing_file = NULL; |
1005 | spin_unlock_irq(&lo->lo_lock); | ||
1011 | 1006 | ||
1012 | loop_release_xfer(lo); | 1007 | loop_release_xfer(lo); |
1013 | lo->transfer = NULL; | 1008 | lo->transfer = NULL; |
@@ -1485,13 +1480,22 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, | |||
1485 | 1480 | ||
1486 | static int lo_open(struct block_device *bdev, fmode_t mode) | 1481 | static int lo_open(struct block_device *bdev, fmode_t mode) |
1487 | { | 1482 | { |
1488 | struct loop_device *lo = bdev->bd_disk->private_data; | 1483 | struct loop_device *lo; |
1484 | int err = 0; | ||
1485 | |||
1486 | mutex_lock(&loop_index_mutex); | ||
1487 | lo = bdev->bd_disk->private_data; | ||
1488 | if (!lo) { | ||
1489 | err = -ENXIO; | ||
1490 | goto out; | ||
1491 | } | ||
1489 | 1492 | ||
1490 | mutex_lock(&lo->lo_ctl_mutex); | 1493 | mutex_lock(&lo->lo_ctl_mutex); |
1491 | lo->lo_refcnt++; | 1494 | lo->lo_refcnt++; |
1492 | mutex_unlock(&lo->lo_ctl_mutex); | 1495 | mutex_unlock(&lo->lo_ctl_mutex); |
1493 | 1496 | out: | |
1494 | return 0; | 1497 | mutex_unlock(&loop_index_mutex); |
1498 | return err; | ||
1495 | } | 1499 | } |
1496 | 1500 | ||
1497 | static int lo_release(struct gendisk *disk, fmode_t mode) | 1501 | static int lo_release(struct gendisk *disk, fmode_t mode) |
@@ -1557,40 +1561,71 @@ int loop_register_transfer(struct loop_func_table *funcs) | |||
1557 | return 0; | 1561 | return 0; |
1558 | } | 1562 | } |
1559 | 1563 | ||
1564 | static int unregister_transfer_cb(int id, void *ptr, void *data) | ||
1565 | { | ||
1566 | struct loop_device *lo = ptr; | ||
1567 | struct loop_func_table *xfer = data; | ||
1568 | |||
1569 | mutex_lock(&lo->lo_ctl_mutex); | ||
1570 | if (lo->lo_encryption == xfer) | ||
1571 | loop_release_xfer(lo); | ||
1572 | mutex_unlock(&lo->lo_ctl_mutex); | ||
1573 | return 0; | ||
1574 | } | ||
1575 | |||
1560 | int loop_unregister_transfer(int number) | 1576 | int loop_unregister_transfer(int number) |
1561 | { | 1577 | { |
1562 | unsigned int n = number; | 1578 | unsigned int n = number; |
1563 | struct loop_device *lo; | ||
1564 | struct loop_func_table *xfer; | 1579 | struct loop_func_table *xfer; |
1565 | 1580 | ||
1566 | if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) | 1581 | if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) |
1567 | return -EINVAL; | 1582 | return -EINVAL; |
1568 | 1583 | ||
1569 | xfer_funcs[n] = NULL; | 1584 | xfer_funcs[n] = NULL; |
1570 | 1585 | idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer); | |
1571 | list_for_each_entry(lo, &loop_devices, lo_list) { | ||
1572 | mutex_lock(&lo->lo_ctl_mutex); | ||
1573 | |||
1574 | if (lo->lo_encryption == xfer) | ||
1575 | loop_release_xfer(lo); | ||
1576 | |||
1577 | mutex_unlock(&lo->lo_ctl_mutex); | ||
1578 | } | ||
1579 | |||
1580 | return 0; | 1586 | return 0; |
1581 | } | 1587 | } |
1582 | 1588 | ||
1583 | EXPORT_SYMBOL(loop_register_transfer); | 1589 | EXPORT_SYMBOL(loop_register_transfer); |
1584 | EXPORT_SYMBOL(loop_unregister_transfer); | 1590 | EXPORT_SYMBOL(loop_unregister_transfer); |
1585 | 1591 | ||
1586 | static struct loop_device *loop_alloc(int i) | 1592 | static int loop_add(struct loop_device **l, int i) |
1587 | { | 1593 | { |
1588 | struct loop_device *lo; | 1594 | struct loop_device *lo; |
1589 | struct gendisk *disk; | 1595 | struct gendisk *disk; |
1596 | int err; | ||
1590 | 1597 | ||
1591 | lo = kzalloc(sizeof(*lo), GFP_KERNEL); | 1598 | lo = kzalloc(sizeof(*lo), GFP_KERNEL); |
1592 | if (!lo) | 1599 | if (!lo) { |
1600 | err = -ENOMEM; | ||
1593 | goto out; | 1601 | goto out; |
1602 | } | ||
1603 | |||
1604 | err = idr_pre_get(&loop_index_idr, GFP_KERNEL); | ||
1605 | if (err < 0) | ||
1606 | goto out_free_dev; | ||
1607 | |||
1608 | if (i >= 0) { | ||
1609 | int m; | ||
1610 | |||
1611 | /* create specific i in the index */ | ||
1612 | err = idr_get_new_above(&loop_index_idr, lo, i, &m); | ||
1613 | if (err >= 0 && i != m) { | ||
1614 | idr_remove(&loop_index_idr, m); | ||
1615 | err = -EEXIST; | ||
1616 | } | ||
1617 | } else if (i == -1) { | ||
1618 | int m; | ||
1619 | |||
1620 | /* get next free nr */ | ||
1621 | err = idr_get_new(&loop_index_idr, lo, &m); | ||
1622 | if (err >= 0) | ||
1623 | i = m; | ||
1624 | } else { | ||
1625 | err = -EINVAL; | ||
1626 | } | ||
1627 | if (err < 0) | ||
1628 | goto out_free_dev; | ||
1594 | 1629 | ||
1595 | lo->lo_queue = blk_alloc_queue(GFP_KERNEL); | 1630 | lo->lo_queue = blk_alloc_queue(GFP_KERNEL); |
1596 | if (!lo->lo_queue) | 1631 | if (!lo->lo_queue) |
@@ -1611,81 +1646,158 @@ static struct loop_device *loop_alloc(int i) | |||
1611 | disk->private_data = lo; | 1646 | disk->private_data = lo; |
1612 | disk->queue = lo->lo_queue; | 1647 | disk->queue = lo->lo_queue; |
1613 | sprintf(disk->disk_name, "loop%d", i); | 1648 | sprintf(disk->disk_name, "loop%d", i); |
1614 | return lo; | 1649 | add_disk(disk); |
1650 | *l = lo; | ||
1651 | return lo->lo_number; | ||
1615 | 1652 | ||
1616 | out_free_queue: | 1653 | out_free_queue: |
1617 | blk_cleanup_queue(lo->lo_queue); | 1654 | blk_cleanup_queue(lo->lo_queue); |
1618 | out_free_dev: | 1655 | out_free_dev: |
1619 | kfree(lo); | 1656 | kfree(lo); |
1620 | out: | 1657 | out: |
1621 | return NULL; | 1658 | return err; |
1622 | } | 1659 | } |
1623 | 1660 | ||
1624 | static void loop_free(struct loop_device *lo) | 1661 | static void loop_remove(struct loop_device *lo) |
1625 | { | 1662 | { |
1663 | del_gendisk(lo->lo_disk); | ||
1626 | blk_cleanup_queue(lo->lo_queue); | 1664 | blk_cleanup_queue(lo->lo_queue); |
1627 | put_disk(lo->lo_disk); | 1665 | put_disk(lo->lo_disk); |
1628 | list_del(&lo->lo_list); | ||
1629 | kfree(lo); | 1666 | kfree(lo); |
1630 | } | 1667 | } |
1631 | 1668 | ||
1632 | static struct loop_device *loop_init_one(int i) | 1669 | static int find_free_cb(int id, void *ptr, void *data) |
1670 | { | ||
1671 | struct loop_device *lo = ptr; | ||
1672 | struct loop_device **l = data; | ||
1673 | |||
1674 | if (lo->lo_state == Lo_unbound) { | ||
1675 | *l = lo; | ||
1676 | return 1; | ||
1677 | } | ||
1678 | return 0; | ||
1679 | } | ||
1680 | |||
1681 | static int loop_lookup(struct loop_device **l, int i) | ||
1633 | { | 1682 | { |
1634 | struct loop_device *lo; | 1683 | struct loop_device *lo; |
1684 | int ret = -ENODEV; | ||
1635 | 1685 | ||
1636 | list_for_each_entry(lo, &loop_devices, lo_list) { | 1686 | if (i < 0) { |
1637 | if (lo->lo_number == i) | 1687 | int err; |
1638 | return lo; | 1688 | |
1689 | err = idr_for_each(&loop_index_idr, &find_free_cb, &lo); | ||
1690 | if (err == 1) { | ||
1691 | *l = lo; | ||
1692 | ret = lo->lo_number; | ||
1693 | } | ||
1694 | goto out; | ||
1639 | } | 1695 | } |
1640 | 1696 | ||
1641 | lo = loop_alloc(i); | 1697 | /* lookup and return a specific i */ |
1698 | lo = idr_find(&loop_index_idr, i); | ||
1642 | if (lo) { | 1699 | if (lo) { |
1643 | add_disk(lo->lo_disk); | 1700 | *l = lo; |
1644 | list_add_tail(&lo->lo_list, &loop_devices); | 1701 | ret = lo->lo_number; |
1645 | } | 1702 | } |
1646 | return lo; | 1703 | out: |
1647 | } | 1704 | return ret; |
1648 | |||
1649 | static void loop_del_one(struct loop_device *lo) | ||
1650 | { | ||
1651 | del_gendisk(lo->lo_disk); | ||
1652 | loop_free(lo); | ||
1653 | } | 1705 | } |
1654 | 1706 | ||
1655 | static struct kobject *loop_probe(dev_t dev, int *part, void *data) | 1707 | static struct kobject *loop_probe(dev_t dev, int *part, void *data) |
1656 | { | 1708 | { |
1657 | struct loop_device *lo; | 1709 | struct loop_device *lo; |
1658 | struct kobject *kobj; | 1710 | struct kobject *kobj; |
1711 | int err; | ||
1659 | 1712 | ||
1660 | mutex_lock(&loop_devices_mutex); | 1713 | mutex_lock(&loop_index_mutex); |
1661 | lo = loop_init_one(MINOR(dev) >> part_shift); | 1714 | err = loop_lookup(&lo, MINOR(dev) >> part_shift); |
1662 | kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM); | 1715 | if (err < 0) |
1663 | mutex_unlock(&loop_devices_mutex); | 1716 | err = loop_add(&lo, MINOR(dev) >> part_shift); |
1717 | if (err < 0) | ||
1718 | kobj = ERR_PTR(err); | ||
1719 | else | ||
1720 | kobj = get_disk(lo->lo_disk); | ||
1721 | mutex_unlock(&loop_index_mutex); | ||
1664 | 1722 | ||
1665 | *part = 0; | 1723 | *part = 0; |
1666 | return kobj; | 1724 | return kobj; |
1667 | } | 1725 | } |
1668 | 1726 | ||
1727 | static long loop_control_ioctl(struct file *file, unsigned int cmd, | ||
1728 | unsigned long parm) | ||
1729 | { | ||
1730 | struct loop_device *lo; | ||
1731 | int ret = -ENOSYS; | ||
1732 | |||
1733 | mutex_lock(&loop_index_mutex); | ||
1734 | switch (cmd) { | ||
1735 | case LOOP_CTL_ADD: | ||
1736 | ret = loop_lookup(&lo, parm); | ||
1737 | if (ret >= 0) { | ||
1738 | ret = -EEXIST; | ||
1739 | break; | ||
1740 | } | ||
1741 | ret = loop_add(&lo, parm); | ||
1742 | break; | ||
1743 | case LOOP_CTL_REMOVE: | ||
1744 | ret = loop_lookup(&lo, parm); | ||
1745 | if (ret < 0) | ||
1746 | break; | ||
1747 | mutex_lock(&lo->lo_ctl_mutex); | ||
1748 | if (lo->lo_state != Lo_unbound) { | ||
1749 | ret = -EBUSY; | ||
1750 | mutex_unlock(&lo->lo_ctl_mutex); | ||
1751 | break; | ||
1752 | } | ||
1753 | if (lo->lo_refcnt > 0) { | ||
1754 | ret = -EBUSY; | ||
1755 | mutex_unlock(&lo->lo_ctl_mutex); | ||
1756 | break; | ||
1757 | } | ||
1758 | lo->lo_disk->private_data = NULL; | ||
1759 | mutex_unlock(&lo->lo_ctl_mutex); | ||
1760 | idr_remove(&loop_index_idr, lo->lo_number); | ||
1761 | loop_remove(lo); | ||
1762 | break; | ||
1763 | case LOOP_CTL_GET_FREE: | ||
1764 | ret = loop_lookup(&lo, -1); | ||
1765 | if (ret >= 0) | ||
1766 | break; | ||
1767 | ret = loop_add(&lo, -1); | ||
1768 | } | ||
1769 | mutex_unlock(&loop_index_mutex); | ||
1770 | |||
1771 | return ret; | ||
1772 | } | ||
1773 | |||
1774 | static const struct file_operations loop_ctl_fops = { | ||
1775 | .open = nonseekable_open, | ||
1776 | .unlocked_ioctl = loop_control_ioctl, | ||
1777 | .compat_ioctl = loop_control_ioctl, | ||
1778 | .owner = THIS_MODULE, | ||
1779 | .llseek = noop_llseek, | ||
1780 | }; | ||
1781 | |||
1782 | static struct miscdevice loop_misc = { | ||
1783 | .minor = LOOP_CTRL_MINOR, | ||
1784 | .name = "loop-control", | ||
1785 | .fops = &loop_ctl_fops, | ||
1786 | }; | ||
1787 | |||
1788 | MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR); | ||
1789 | MODULE_ALIAS("devname:loop-control"); | ||
1790 | |||
1669 | static int __init loop_init(void) | 1791 | static int __init loop_init(void) |
1670 | { | 1792 | { |
1671 | int i, nr; | 1793 | int i, nr; |
1672 | unsigned long range; | 1794 | unsigned long range; |
1673 | struct loop_device *lo, *next; | 1795 | struct loop_device *lo; |
1796 | int err; | ||
1674 | 1797 | ||
1675 | /* | 1798 | err = misc_register(&loop_misc); |
1676 | * loop module now has a feature to instantiate underlying device | 1799 | if (err < 0) |
1677 | * structure on-demand, provided that there is an access dev node. | 1800 | return err; |
1678 | * However, this will not work well with user space tool that doesn't | ||
1679 | * know about such "feature". In order to not break any existing | ||
1680 | * tool, we do the following: | ||
1681 | * | ||
1682 | * (1) if max_loop is specified, create that many upfront, and this | ||
1683 | * also becomes a hard limit. | ||
1684 | * (2) if max_loop is not specified, create 8 loop device on module | ||
1685 | * load, user can further extend loop device by create dev node | ||
1686 | * themselves and have kernel automatically instantiate actual | ||
1687 | * device on-demand. | ||
1688 | */ | ||
1689 | 1801 | ||
1690 | part_shift = 0; | 1802 | part_shift = 0; |
1691 | if (max_part > 0) { | 1803 | if (max_part > 0) { |
@@ -1708,57 +1820,60 @@ static int __init loop_init(void) | |||
1708 | if (max_loop > 1UL << (MINORBITS - part_shift)) | 1820 | if (max_loop > 1UL << (MINORBITS - part_shift)) |
1709 | return -EINVAL; | 1821 | return -EINVAL; |
1710 | 1822 | ||
1823 | /* | ||
1824 | * If max_loop is specified, create that many devices upfront. | ||
1825 | * This also becomes a hard limit. If max_loop is not specified, | ||
1826 | * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module | ||
1827 | * init time. Loop devices can be requested on-demand with the | ||
1828 | * /dev/loop-control interface, or be instantiated by accessing | ||
1829 | * a 'dead' device node. | ||
1830 | */ | ||
1711 | if (max_loop) { | 1831 | if (max_loop) { |
1712 | nr = max_loop; | 1832 | nr = max_loop; |
1713 | range = max_loop << part_shift; | 1833 | range = max_loop << part_shift; |
1714 | } else { | 1834 | } else { |
1715 | nr = 8; | 1835 | nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT; |
1716 | range = 1UL << MINORBITS; | 1836 | range = 1UL << MINORBITS; |
1717 | } | 1837 | } |
1718 | 1838 | ||
1719 | if (register_blkdev(LOOP_MAJOR, "loop")) | 1839 | if (register_blkdev(LOOP_MAJOR, "loop")) |
1720 | return -EIO; | 1840 | return -EIO; |
1721 | 1841 | ||
1722 | for (i = 0; i < nr; i++) { | ||
1723 | lo = loop_alloc(i); | ||
1724 | if (!lo) | ||
1725 | goto Enomem; | ||
1726 | list_add_tail(&lo->lo_list, &loop_devices); | ||
1727 | } | ||
1728 | |||
1729 | /* point of no return */ | ||
1730 | |||
1731 | list_for_each_entry(lo, &loop_devices, lo_list) | ||
1732 | add_disk(lo->lo_disk); | ||
1733 | |||
1734 | blk_register_region(MKDEV(LOOP_MAJOR, 0), range, | 1842 | blk_register_region(MKDEV(LOOP_MAJOR, 0), range, |
1735 | THIS_MODULE, loop_probe, NULL, NULL); | 1843 | THIS_MODULE, loop_probe, NULL, NULL); |
1736 | 1844 | ||
1845 | /* pre-create number of devices given by config or max_loop */ | ||
1846 | mutex_lock(&loop_index_mutex); | ||
1847 | for (i = 0; i < nr; i++) | ||
1848 | loop_add(&lo, i); | ||
1849 | mutex_unlock(&loop_index_mutex); | ||
1850 | |||
1737 | printk(KERN_INFO "loop: module loaded\n"); | 1851 | printk(KERN_INFO "loop: module loaded\n"); |
1738 | return 0; | 1852 | return 0; |
1853 | } | ||
1739 | 1854 | ||
1740 | Enomem: | 1855 | static int loop_exit_cb(int id, void *ptr, void *data) |
1741 | printk(KERN_INFO "loop: out of memory\n"); | 1856 | { |
1742 | 1857 | struct loop_device *lo = ptr; | |
1743 | list_for_each_entry_safe(lo, next, &loop_devices, lo_list) | ||
1744 | loop_free(lo); | ||
1745 | 1858 | ||
1746 | unregister_blkdev(LOOP_MAJOR, "loop"); | 1859 | loop_remove(lo); |
1747 | return -ENOMEM; | 1860 | return 0; |
1748 | } | 1861 | } |
1749 | 1862 | ||
1750 | static void __exit loop_exit(void) | 1863 | static void __exit loop_exit(void) |
1751 | { | 1864 | { |
1752 | unsigned long range; | 1865 | unsigned long range; |
1753 | struct loop_device *lo, *next; | ||
1754 | 1866 | ||
1755 | range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; | 1867 | range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; |
1756 | 1868 | ||
1757 | list_for_each_entry_safe(lo, next, &loop_devices, lo_list) | 1869 | idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); |
1758 | loop_del_one(lo); | 1870 | idr_remove_all(&loop_index_idr); |
1871 | idr_destroy(&loop_index_idr); | ||
1759 | 1872 | ||
1760 | blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); | 1873 | blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); |
1761 | unregister_blkdev(LOOP_MAJOR, "loop"); | 1874 | unregister_blkdev(LOOP_MAJOR, "loop"); |
1875 | |||
1876 | misc_deregister(&loop_misc); | ||
1762 | } | 1877 | } |
1763 | 1878 | ||
1764 | module_init(loop_init); | 1879 | module_init(loop_init); |
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 773bfa792777..ae3e167e17ad 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c | |||
@@ -1184,6 +1184,7 @@ static struct of_device_id swim3_match[] = | |||
1184 | { | 1184 | { |
1185 | .compatible = "swim3" | 1185 | .compatible = "swim3" |
1186 | }, | 1186 | }, |
1187 | { /* end of list */ } | ||
1187 | }; | 1188 | }; |
1188 | 1189 | ||
1189 | static struct macio_driver swim3_driver = | 1190 | static struct macio_driver swim3_driver = |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index b536a9cef917..9ea8c2576c70 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -123,8 +123,8 @@ static DEFINE_SPINLOCK(minor_lock); | |||
123 | #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED)) | 123 | #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED)) |
124 | #define EMULATED_HD_DISK_MINOR_OFFSET (0) | 124 | #define EMULATED_HD_DISK_MINOR_OFFSET (0) |
125 | #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256) | 125 | #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256) |
126 | #define EMULATED_SD_DISK_MINOR_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET + (4 * 16)) | 126 | #define EMULATED_SD_DISK_MINOR_OFFSET (0) |
127 | #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_HD_DISK_NAME_OFFSET + 4) | 127 | #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256) |
128 | 128 | ||
129 | #define DEV_NAME "xvd" /* name in /dev */ | 129 | #define DEV_NAME "xvd" /* name in /dev */ |
130 | 130 | ||
@@ -529,7 +529,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, | |||
529 | minor = BLKIF_MINOR_EXT(info->vdevice); | 529 | minor = BLKIF_MINOR_EXT(info->vdevice); |
530 | nr_parts = PARTS_PER_EXT_DISK; | 530 | nr_parts = PARTS_PER_EXT_DISK; |
531 | offset = minor / nr_parts; | 531 | offset = minor / nr_parts; |
532 | if (xen_hvm_domain() && offset <= EMULATED_HD_DISK_NAME_OFFSET + 4) | 532 | if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4) |
533 | printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with " | 533 | printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with " |
534 | "emulated IDE disks,\n\t choose an xvd device name" | 534 | "emulated IDE disks,\n\t choose an xvd device name" |
535 | "from xvde on\n", info->vdevice); | 535 | "from xvde on\n", info->vdevice); |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 75fb965b8f72..f997c27d79e2 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -1929,11 +1929,17 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s, | |||
1929 | goto out; | 1929 | goto out; |
1930 | 1930 | ||
1931 | s->manufact.len = buf[0] << 8 | buf[1]; | 1931 | s->manufact.len = buf[0] << 8 | buf[1]; |
1932 | if (s->manufact.len < 0 || s->manufact.len > 2048) { | 1932 | if (s->manufact.len < 0) { |
1933 | cdinfo(CD_WARNING, "Received invalid manufacture info length" | 1933 | cdinfo(CD_WARNING, "Received invalid manufacture info length" |
1934 | " (%d)\n", s->manufact.len); | 1934 | " (%d)\n", s->manufact.len); |
1935 | ret = -EIO; | 1935 | ret = -EIO; |
1936 | } else { | 1936 | } else { |
1937 | if (s->manufact.len > 2048) { | ||
1938 | cdinfo(CD_WARNING, "Received invalid manufacture info " | ||
1939 | "length (%d): truncating to 2048\n", | ||
1940 | s->manufact.len); | ||
1941 | s->manufact.len = 2048; | ||
1942 | } | ||
1937 | memcpy(s->manufact.value, &buf[4], s->manufact.len); | 1943 | memcpy(s->manufact.value, &buf[4], s->manufact.len); |
1938 | } | 1944 | } |
1939 | 1945 | ||
diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c index b6f8a65c9960..8eca55deb3a3 100644 --- a/drivers/char/msm_smd_pkt.c +++ b/drivers/char/msm_smd_pkt.c | |||
@@ -379,9 +379,8 @@ static int __init smd_pkt_init(void) | |||
379 | for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) { | 379 | for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) { |
380 | smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev), | 380 | smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev), |
381 | GFP_KERNEL); | 381 | GFP_KERNEL); |
382 | if (IS_ERR(smd_pkt_devp[i])) { | 382 | if (!smd_pkt_devp[i]) { |
383 | r = PTR_ERR(smd_pkt_devp[i]); | 383 | pr_err("kmalloc() failed\n"); |
384 | pr_err("kmalloc() failed %d\n", r); | ||
385 | goto clean_cdevs; | 384 | goto clean_cdevs; |
386 | } | 385 | } |
387 | 386 | ||
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 196a7378d332..be21e3f138a8 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -80,6 +80,7 @@ | |||
80 | #include <linux/interrupt.h> | 80 | #include <linux/interrupt.h> |
81 | #include <linux/slab.h> | 81 | #include <linux/slab.h> |
82 | #include <linux/delay.h> | 82 | #include <linux/delay.h> |
83 | #include <linux/dma-mapping.h> | ||
83 | #include <linux/dmapool.h> | 84 | #include <linux/dmapool.h> |
84 | #include <linux/dmaengine.h> | 85 | #include <linux/dmaengine.h> |
85 | #include <linux/amba/bus.h> | 86 | #include <linux/amba/bus.h> |
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 04f1e7ce02b1..f6cf448d69b4 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c | |||
@@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci, | |||
1670 | char *type, *optype, *err, *msg; | 1670 | char *type, *optype, *err, *msg; |
1671 | unsigned long error = m->status & 0x1ff0000l; | 1671 | unsigned long error = m->status & 0x1ff0000l; |
1672 | u32 optypenum = (m->status >> 4) & 0x07; | 1672 | u32 optypenum = (m->status >> 4) & 0x07; |
1673 | u32 core_err_cnt = (m->status >> 38) && 0x7fff; | 1673 | u32 core_err_cnt = (m->status >> 38) & 0x7fff; |
1674 | u32 dimm = (m->misc >> 16) & 0x3; | 1674 | u32 dimm = (m->misc >> 16) & 0x3; |
1675 | u32 channel = (m->misc >> 18) & 0x3; | 1675 | u32 channel = (m->misc >> 18) & 0x3; |
1676 | u32 syndrome = m->misc >> 32; | 1676 | u32 syndrome = m->misc >> 32; |
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index e6ad3bb6c1a6..4799393247c8 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
@@ -216,15 +216,33 @@ struct inbound_phy_packet_event { | |||
216 | struct fw_cdev_event_phy_packet phy_packet; | 216 | struct fw_cdev_event_phy_packet phy_packet; |
217 | }; | 217 | }; |
218 | 218 | ||
219 | static inline void __user *u64_to_uptr(__u64 value) | 219 | #ifdef CONFIG_COMPAT |
220 | static void __user *u64_to_uptr(u64 value) | ||
221 | { | ||
222 | if (is_compat_task()) | ||
223 | return compat_ptr(value); | ||
224 | else | ||
225 | return (void __user *)(unsigned long)value; | ||
226 | } | ||
227 | |||
228 | static u64 uptr_to_u64(void __user *ptr) | ||
229 | { | ||
230 | if (is_compat_task()) | ||
231 | return ptr_to_compat(ptr); | ||
232 | else | ||
233 | return (u64)(unsigned long)ptr; | ||
234 | } | ||
235 | #else | ||
236 | static inline void __user *u64_to_uptr(u64 value) | ||
220 | { | 237 | { |
221 | return (void __user *)(unsigned long)value; | 238 | return (void __user *)(unsigned long)value; |
222 | } | 239 | } |
223 | 240 | ||
224 | static inline __u64 uptr_to_u64(void __user *ptr) | 241 | static inline u64 uptr_to_u64(void __user *ptr) |
225 | { | 242 | { |
226 | return (__u64)(unsigned long)ptr; | 243 | return (u64)(unsigned long)ptr; |
227 | } | 244 | } |
245 | #endif /* CONFIG_COMPAT */ | ||
228 | 246 | ||
229 | static int fw_device_op_open(struct inode *inode, struct file *file) | 247 | static int fw_device_op_open(struct inode *inode, struct file *file) |
230 | { | 248 | { |
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 8ba7f7928f1f..f3b890da1e87 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c | |||
@@ -455,15 +455,20 @@ static struct device_attribute fw_device_attributes[] = { | |||
455 | static int read_rom(struct fw_device *device, | 455 | static int read_rom(struct fw_device *device, |
456 | int generation, int index, u32 *data) | 456 | int generation, int index, u32 *data) |
457 | { | 457 | { |
458 | int rcode; | 458 | u64 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4; |
459 | int i, rcode; | ||
459 | 460 | ||
460 | /* device->node_id, accessed below, must not be older than generation */ | 461 | /* device->node_id, accessed below, must not be older than generation */ |
461 | smp_rmb(); | 462 | smp_rmb(); |
462 | 463 | ||
463 | rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST, | 464 | for (i = 10; i < 100; i += 10) { |
464 | device->node_id, generation, device->max_speed, | 465 | rcode = fw_run_transaction(device->card, |
465 | (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4, | 466 | TCODE_READ_QUADLET_REQUEST, device->node_id, |
466 | data, 4); | 467 | generation, device->max_speed, offset, data, 4); |
468 | if (rcode != RCODE_BUSY) | ||
469 | break; | ||
470 | msleep(i); | ||
471 | } | ||
467 | be32_to_cpus(data); | 472 | be32_to_cpus(data); |
468 | 473 | ||
469 | return rcode; | 474 | return rcode; |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index bcf792fac442..57cd3a406edf 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -2179,8 +2179,13 @@ static int ohci_enable(struct fw_card *card, | |||
2179 | ohci_driver_name, ohci)) { | 2179 | ohci_driver_name, ohci)) { |
2180 | fw_error("Failed to allocate interrupt %d.\n", dev->irq); | 2180 | fw_error("Failed to allocate interrupt %d.\n", dev->irq); |
2181 | pci_disable_msi(dev); | 2181 | pci_disable_msi(dev); |
2182 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 2182 | |
2183 | ohci->config_rom, ohci->config_rom_bus); | 2183 | if (config_rom) { |
2184 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | ||
2185 | ohci->next_config_rom, | ||
2186 | ohci->next_config_rom_bus); | ||
2187 | ohci->next_config_rom = NULL; | ||
2188 | } | ||
2184 | return -EIO; | 2189 | return -EIO; |
2185 | } | 2190 | } |
2186 | 2191 | ||
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a8ab6263e0d7..3c395a59da35 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) | |||
499 | seq_printf(m, "Interrupts received: %d\n", | 499 | seq_printf(m, "Interrupts received: %d\n", |
500 | atomic_read(&dev_priv->irq_received)); | 500 | atomic_read(&dev_priv->irq_received)); |
501 | for (i = 0; i < I915_NUM_RINGS; i++) { | 501 | for (i = 0; i < I915_NUM_RINGS; i++) { |
502 | if (IS_GEN6(dev)) { | 502 | if (IS_GEN6(dev) || IS_GEN7(dev)) { |
503 | seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", | 503 | seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", |
504 | dev_priv->ring[i].name, | 504 | dev_priv->ring[i].name, |
505 | I915_READ_IMR(&dev_priv->ring[i])); | 505 | I915_READ_IMR(&dev_priv->ring[i])); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index feb4f164fd1b..7916bd97d5c1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/io-mapping.h> | 36 | #include <linux/io-mapping.h> |
37 | #include <linux/i2c.h> | 37 | #include <linux/i2c.h> |
38 | #include <drm/intel-gtt.h> | 38 | #include <drm/intel-gtt.h> |
39 | #include <linux/backlight.h> | ||
39 | 40 | ||
40 | /* General customization: | 41 | /* General customization: |
41 | */ | 42 | */ |
@@ -690,6 +691,7 @@ typedef struct drm_i915_private { | |||
690 | int child_dev_num; | 691 | int child_dev_num; |
691 | struct child_device_config *child_dev; | 692 | struct child_device_config *child_dev; |
692 | struct drm_connector *int_lvds_connector; | 693 | struct drm_connector *int_lvds_connector; |
694 | struct drm_connector *int_edp_connector; | ||
693 | 695 | ||
694 | bool mchbar_need_disable; | 696 | bool mchbar_need_disable; |
695 | 697 | ||
@@ -723,6 +725,8 @@ typedef struct drm_i915_private { | |||
723 | /* list of fbdev register on this device */ | 725 | /* list of fbdev register on this device */ |
724 | struct intel_fbdev *fbdev; | 726 | struct intel_fbdev *fbdev; |
725 | 727 | ||
728 | struct backlight_device *backlight; | ||
729 | |||
726 | struct drm_property *broadcast_rgb_property; | 730 | struct drm_property *broadcast_rgb_property; |
727 | struct drm_property *force_audio_property; | 731 | struct drm_property *force_audio_property; |
728 | 732 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 02f96fd0d52d..9cbb0cd8f46a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -2058,8 +2058,10 @@ void intel_irq_init(struct drm_device *dev) | |||
2058 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | 2058 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; |
2059 | } | 2059 | } |
2060 | 2060 | ||
2061 | 2061 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
2062 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; | 2062 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; |
2063 | else | ||
2064 | dev->driver->get_vblank_timestamp = NULL; | ||
2063 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; | 2065 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; |
2064 | 2066 | ||
2065 | if (IS_IVYBRIDGE(dev)) { | 2067 | if (IS_IVYBRIDGE(dev)) { |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d1331f771e2f..542453f7498c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -375,6 +375,7 @@ | |||
375 | # define MI_FLUSH_ENABLE (1 << 11) | 375 | # define MI_FLUSH_ENABLE (1 << 11) |
376 | 376 | ||
377 | #define GFX_MODE 0x02520 | 377 | #define GFX_MODE 0x02520 |
378 | #define GFX_MODE_GEN7 0x0229c | ||
378 | #define GFX_RUN_LIST_ENABLE (1<<15) | 379 | #define GFX_RUN_LIST_ENABLE (1<<15) |
379 | #define GFX_TLB_INVALIDATE_ALWAYS (1<<13) | 380 | #define GFX_TLB_INVALIDATE_ALWAYS (1<<13) |
380 | #define GFX_SURFACE_FAULT_ENABLE (1<<12) | 381 | #define GFX_SURFACE_FAULT_ENABLE (1<<12) |
@@ -382,6 +383,9 @@ | |||
382 | #define GFX_PSMI_GRANULARITY (1<<10) | 383 | #define GFX_PSMI_GRANULARITY (1<<10) |
383 | #define GFX_PPGTT_ENABLE (1<<9) | 384 | #define GFX_PPGTT_ENABLE (1<<9) |
384 | 385 | ||
386 | #define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit)) | ||
387 | #define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0)) | ||
388 | |||
385 | #define SCPD0 0x0209c /* 915+ only */ | 389 | #define SCPD0 0x0209c /* 915+ only */ |
386 | #define IER 0x020a0 | 390 | #define IER 0x020a0 |
387 | #define IIR 0x020a4 | 391 | #define IIR 0x020a4 |
@@ -1318,6 +1322,7 @@ | |||
1318 | #define ADPA_PIPE_SELECT_MASK (1<<30) | 1322 | #define ADPA_PIPE_SELECT_MASK (1<<30) |
1319 | #define ADPA_PIPE_A_SELECT 0 | 1323 | #define ADPA_PIPE_A_SELECT 0 |
1320 | #define ADPA_PIPE_B_SELECT (1<<30) | 1324 | #define ADPA_PIPE_B_SELECT (1<<30) |
1325 | #define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) | ||
1321 | #define ADPA_USE_VGA_HVPOLARITY (1<<15) | 1326 | #define ADPA_USE_VGA_HVPOLARITY (1<<15) |
1322 | #define ADPA_SETS_HVPOLARITY 0 | 1327 | #define ADPA_SETS_HVPOLARITY 0 |
1323 | #define ADPA_VSYNC_CNTL_DISABLE (1<<11) | 1328 | #define ADPA_VSYNC_CNTL_DISABLE (1<<11) |
@@ -1460,6 +1465,7 @@ | |||
1460 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ | 1465 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ |
1461 | #define LVDS_PIPEB_SELECT (1 << 30) | 1466 | #define LVDS_PIPEB_SELECT (1 << 30) |
1462 | #define LVDS_PIPE_MASK (1 << 30) | 1467 | #define LVDS_PIPE_MASK (1 << 30) |
1468 | #define LVDS_PIPE(pipe) ((pipe) << 30) | ||
1463 | /* LVDS dithering flag on 965/g4x platform */ | 1469 | /* LVDS dithering flag on 965/g4x platform */ |
1464 | #define LVDS_ENABLE_DITHER (1 << 25) | 1470 | #define LVDS_ENABLE_DITHER (1 << 25) |
1465 | /* LVDS sync polarity flags. Set to invert (i.e. negative) */ | 1471 | /* LVDS sync polarity flags. Set to invert (i.e. negative) */ |
@@ -1499,9 +1505,6 @@ | |||
1499 | #define LVDS_B0B3_POWER_DOWN (0 << 2) | 1505 | #define LVDS_B0B3_POWER_DOWN (0 << 2) |
1500 | #define LVDS_B0B3_POWER_UP (3 << 2) | 1506 | #define LVDS_B0B3_POWER_UP (3 << 2) |
1501 | 1507 | ||
1502 | #define LVDS_PIPE_ENABLED(V, P) \ | ||
1503 | (((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN)) | ||
1504 | |||
1505 | /* Video Data Island Packet control */ | 1508 | /* Video Data Island Packet control */ |
1506 | #define VIDEO_DIP_DATA 0x61178 | 1509 | #define VIDEO_DIP_DATA 0x61178 |
1507 | #define VIDEO_DIP_CTL 0x61170 | 1510 | #define VIDEO_DIP_CTL 0x61170 |
@@ -3256,14 +3259,12 @@ | |||
3256 | #define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) | 3259 | #define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) |
3257 | #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) | 3260 | #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) |
3258 | 3261 | ||
3259 | #define ADPA_PIPE_ENABLED(V, P) \ | ||
3260 | (((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE)) | ||
3261 | |||
3262 | /* or SDVOB */ | 3262 | /* or SDVOB */ |
3263 | #define HDMIB 0xe1140 | 3263 | #define HDMIB 0xe1140 |
3264 | #define PORT_ENABLE (1 << 31) | 3264 | #define PORT_ENABLE (1 << 31) |
3265 | #define TRANSCODER_A (0) | 3265 | #define TRANSCODER_A (0) |
3266 | #define TRANSCODER_B (1 << 30) | 3266 | #define TRANSCODER_B (1 << 30) |
3267 | #define TRANSCODER(pipe) ((pipe) << 30) | ||
3267 | #define TRANSCODER_MASK (1 << 30) | 3268 | #define TRANSCODER_MASK (1 << 30) |
3268 | #define COLOR_FORMAT_8bpc (0) | 3269 | #define COLOR_FORMAT_8bpc (0) |
3269 | #define COLOR_FORMAT_12bpc (3 << 26) | 3270 | #define COLOR_FORMAT_12bpc (3 << 26) |
@@ -3280,9 +3281,6 @@ | |||
3280 | #define HSYNC_ACTIVE_HIGH (1 << 3) | 3281 | #define HSYNC_ACTIVE_HIGH (1 << 3) |
3281 | #define PORT_DETECTED (1 << 2) | 3282 | #define PORT_DETECTED (1 << 2) |
3282 | 3283 | ||
3283 | #define HDMI_PIPE_ENABLED(V, P) \ | ||
3284 | (((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE)) | ||
3285 | |||
3286 | /* PCH SDVOB multiplex with HDMIB */ | 3284 | /* PCH SDVOB multiplex with HDMIB */ |
3287 | #define PCH_SDVOB HDMIB | 3285 | #define PCH_SDVOB HDMIB |
3288 | 3286 | ||
@@ -3349,6 +3347,7 @@ | |||
3349 | #define PORT_TRANS_B_SEL_CPT (1<<29) | 3347 | #define PORT_TRANS_B_SEL_CPT (1<<29) |
3350 | #define PORT_TRANS_C_SEL_CPT (2<<29) | 3348 | #define PORT_TRANS_C_SEL_CPT (2<<29) |
3351 | #define PORT_TRANS_SEL_MASK (3<<29) | 3349 | #define PORT_TRANS_SEL_MASK (3<<29) |
3350 | #define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29) | ||
3352 | 3351 | ||
3353 | #define TRANS_DP_CTL_A 0xe0300 | 3352 | #define TRANS_DP_CTL_A 0xe0300 |
3354 | #define TRANS_DP_CTL_B 0xe1300 | 3353 | #define TRANS_DP_CTL_B 0xe1300 |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 87677d60d0df..f10742359ec9 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -871,7 +871,8 @@ int i915_restore_state(struct drm_device *dev) | |||
871 | } | 871 | } |
872 | mutex_unlock(&dev->struct_mutex); | 872 | mutex_unlock(&dev->struct_mutex); |
873 | 873 | ||
874 | intel_init_clock_gating(dev); | 874 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
875 | intel_init_clock_gating(dev); | ||
875 | 876 | ||
876 | if (IS_IRONLAKE_M(dev)) { | 877 | if (IS_IRONLAKE_M(dev)) { |
877 | ironlake_enable_drps(dev); | 878 | ironlake_enable_drps(dev); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 35364e68a091..ee1d701317f7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -980,8 +980,8 @@ static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, | |||
980 | pipe_name(pipe)); | 980 | pipe_name(pipe)); |
981 | } | 981 | } |
982 | 982 | ||
983 | static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, enum pipe pipe, | 983 | static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, |
984 | int reg, u32 port_sel, u32 val) | 984 | enum pipe pipe, u32 port_sel, u32 val) |
985 | { | 985 | { |
986 | if ((val & DP_PORT_EN) == 0) | 986 | if ((val & DP_PORT_EN) == 0) |
987 | return false; | 987 | return false; |
@@ -998,11 +998,58 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, enum pipe pipe, | |||
998 | return true; | 998 | return true; |
999 | } | 999 | } |
1000 | 1000 | ||
1001 | static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, | ||
1002 | enum pipe pipe, u32 val) | ||
1003 | { | ||
1004 | if ((val & PORT_ENABLE) == 0) | ||
1005 | return false; | ||
1006 | |||
1007 | if (HAS_PCH_CPT(dev_priv->dev)) { | ||
1008 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) | ||
1009 | return false; | ||
1010 | } else { | ||
1011 | if ((val & TRANSCODER_MASK) != TRANSCODER(pipe)) | ||
1012 | return false; | ||
1013 | } | ||
1014 | return true; | ||
1015 | } | ||
1016 | |||
1017 | static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, | ||
1018 | enum pipe pipe, u32 val) | ||
1019 | { | ||
1020 | if ((val & LVDS_PORT_EN) == 0) | ||
1021 | return false; | ||
1022 | |||
1023 | if (HAS_PCH_CPT(dev_priv->dev)) { | ||
1024 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) | ||
1025 | return false; | ||
1026 | } else { | ||
1027 | if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) | ||
1028 | return false; | ||
1029 | } | ||
1030 | return true; | ||
1031 | } | ||
1032 | |||
1033 | static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, | ||
1034 | enum pipe pipe, u32 val) | ||
1035 | { | ||
1036 | if ((val & ADPA_DAC_ENABLE) == 0) | ||
1037 | return false; | ||
1038 | if (HAS_PCH_CPT(dev_priv->dev)) { | ||
1039 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) | ||
1040 | return false; | ||
1041 | } else { | ||
1042 | if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) | ||
1043 | return false; | ||
1044 | } | ||
1045 | return true; | ||
1046 | } | ||
1047 | |||
1001 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, | 1048 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, |
1002 | enum pipe pipe, int reg, u32 port_sel) | 1049 | enum pipe pipe, int reg, u32 port_sel) |
1003 | { | 1050 | { |
1004 | u32 val = I915_READ(reg); | 1051 | u32 val = I915_READ(reg); |
1005 | WARN(dp_pipe_enabled(dev_priv, pipe, reg, port_sel, val), | 1052 | WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), |
1006 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | 1053 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
1007 | reg, pipe_name(pipe)); | 1054 | reg, pipe_name(pipe)); |
1008 | } | 1055 | } |
@@ -1011,7 +1058,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, | |||
1011 | enum pipe pipe, int reg) | 1058 | enum pipe pipe, int reg) |
1012 | { | 1059 | { |
1013 | u32 val = I915_READ(reg); | 1060 | u32 val = I915_READ(reg); |
1014 | WARN(HDMI_PIPE_ENABLED(val, pipe), | 1061 | WARN(hdmi_pipe_enabled(dev_priv, val, pipe), |
1015 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | 1062 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
1016 | reg, pipe_name(pipe)); | 1063 | reg, pipe_name(pipe)); |
1017 | } | 1064 | } |
@@ -1028,13 +1075,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | |||
1028 | 1075 | ||
1029 | reg = PCH_ADPA; | 1076 | reg = PCH_ADPA; |
1030 | val = I915_READ(reg); | 1077 | val = I915_READ(reg); |
1031 | WARN(ADPA_PIPE_ENABLED(val, pipe), | 1078 | WARN(adpa_pipe_enabled(dev_priv, val, pipe), |
1032 | "PCH VGA enabled on transcoder %c, should be disabled\n", | 1079 | "PCH VGA enabled on transcoder %c, should be disabled\n", |
1033 | pipe_name(pipe)); | 1080 | pipe_name(pipe)); |
1034 | 1081 | ||
1035 | reg = PCH_LVDS; | 1082 | reg = PCH_LVDS; |
1036 | val = I915_READ(reg); | 1083 | val = I915_READ(reg); |
1037 | WARN(LVDS_PIPE_ENABLED(val, pipe), | 1084 | WARN(lvds_pipe_enabled(dev_priv, val, pipe), |
1038 | "PCH LVDS enabled on transcoder %c, should be disabled\n", | 1085 | "PCH LVDS enabled on transcoder %c, should be disabled\n", |
1039 | pipe_name(pipe)); | 1086 | pipe_name(pipe)); |
1040 | 1087 | ||
@@ -1360,7 +1407,7 @@ static void disable_pch_dp(struct drm_i915_private *dev_priv, | |||
1360 | enum pipe pipe, int reg, u32 port_sel) | 1407 | enum pipe pipe, int reg, u32 port_sel) |
1361 | { | 1408 | { |
1362 | u32 val = I915_READ(reg); | 1409 | u32 val = I915_READ(reg); |
1363 | if (dp_pipe_enabled(dev_priv, pipe, reg, port_sel, val)) { | 1410 | if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) { |
1364 | DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); | 1411 | DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); |
1365 | I915_WRITE(reg, val & ~DP_PORT_EN); | 1412 | I915_WRITE(reg, val & ~DP_PORT_EN); |
1366 | } | 1413 | } |
@@ -1370,7 +1417,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv, | |||
1370 | enum pipe pipe, int reg) | 1417 | enum pipe pipe, int reg) |
1371 | { | 1418 | { |
1372 | u32 val = I915_READ(reg); | 1419 | u32 val = I915_READ(reg); |
1373 | if (HDMI_PIPE_ENABLED(val, pipe)) { | 1420 | if (hdmi_pipe_enabled(dev_priv, val, pipe)) { |
1374 | DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", | 1421 | DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", |
1375 | reg, pipe); | 1422 | reg, pipe); |
1376 | I915_WRITE(reg, val & ~PORT_ENABLE); | 1423 | I915_WRITE(reg, val & ~PORT_ENABLE); |
@@ -1392,12 +1439,13 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, | |||
1392 | 1439 | ||
1393 | reg = PCH_ADPA; | 1440 | reg = PCH_ADPA; |
1394 | val = I915_READ(reg); | 1441 | val = I915_READ(reg); |
1395 | if (ADPA_PIPE_ENABLED(val, pipe)) | 1442 | if (adpa_pipe_enabled(dev_priv, val, pipe)) |
1396 | I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); | 1443 | I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); |
1397 | 1444 | ||
1398 | reg = PCH_LVDS; | 1445 | reg = PCH_LVDS; |
1399 | val = I915_READ(reg); | 1446 | val = I915_READ(reg); |
1400 | if (LVDS_PIPE_ENABLED(val, pipe)) { | 1447 | if (lvds_pipe_enabled(dev_priv, val, pipe)) { |
1448 | DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); | ||
1401 | I915_WRITE(reg, val & ~LVDS_PORT_EN); | 1449 | I915_WRITE(reg, val & ~LVDS_PORT_EN); |
1402 | POSTING_READ(reg); | 1450 | POSTING_READ(reg); |
1403 | udelay(100); | 1451 | udelay(100); |
@@ -5049,6 +5097,81 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
5049 | return ret; | 5097 | return ret; |
5050 | } | 5098 | } |
5051 | 5099 | ||
5100 | static void ironlake_update_pch_refclk(struct drm_device *dev) | ||
5101 | { | ||
5102 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5103 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
5104 | struct drm_crtc *crtc; | ||
5105 | struct intel_encoder *encoder; | ||
5106 | struct intel_encoder *has_edp_encoder = NULL; | ||
5107 | u32 temp; | ||
5108 | bool has_lvds = false; | ||
5109 | |||
5110 | /* We need to take the global config into account */ | ||
5111 | list_for_each_entry(crtc, &mode_config->crtc_list, head) { | ||
5112 | if (!crtc->enabled) | ||
5113 | continue; | ||
5114 | |||
5115 | list_for_each_entry(encoder, &mode_config->encoder_list, | ||
5116 | base.head) { | ||
5117 | if (encoder->base.crtc != crtc) | ||
5118 | continue; | ||
5119 | |||
5120 | switch (encoder->type) { | ||
5121 | case INTEL_OUTPUT_LVDS: | ||
5122 | has_lvds = true; | ||
5123 | case INTEL_OUTPUT_EDP: | ||
5124 | has_edp_encoder = encoder; | ||
5125 | break; | ||
5126 | } | ||
5127 | } | ||
5128 | } | ||
5129 | |||
5130 | /* Ironlake: try to setup display ref clock before DPLL | ||
5131 | * enabling. This is only under driver's control after | ||
5132 | * PCH B stepping, previous chipset stepping should be | ||
5133 | * ignoring this setting. | ||
5134 | */ | ||
5135 | temp = I915_READ(PCH_DREF_CONTROL); | ||
5136 | /* Always enable nonspread source */ | ||
5137 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | ||
5138 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; | ||
5139 | temp &= ~DREF_SSC_SOURCE_MASK; | ||
5140 | temp |= DREF_SSC_SOURCE_ENABLE; | ||
5141 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5142 | |||
5143 | POSTING_READ(PCH_DREF_CONTROL); | ||
5144 | udelay(200); | ||
5145 | |||
5146 | if (has_edp_encoder) { | ||
5147 | if (intel_panel_use_ssc(dev_priv)) { | ||
5148 | temp |= DREF_SSC1_ENABLE; | ||
5149 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5150 | |||
5151 | POSTING_READ(PCH_DREF_CONTROL); | ||
5152 | udelay(200); | ||
5153 | } | ||
5154 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
5155 | |||
5156 | /* Enable CPU source on CPU attached eDP */ | ||
5157 | if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | ||
5158 | if (intel_panel_use_ssc(dev_priv)) | ||
5159 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | ||
5160 | else | ||
5161 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | ||
5162 | } else { | ||
5163 | /* Enable SSC on PCH eDP if needed */ | ||
5164 | if (intel_panel_use_ssc(dev_priv)) { | ||
5165 | DRM_ERROR("enabling SSC on PCH\n"); | ||
5166 | temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; | ||
5167 | } | ||
5168 | } | ||
5169 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5170 | POSTING_READ(PCH_DREF_CONTROL); | ||
5171 | udelay(200); | ||
5172 | } | ||
5173 | } | ||
5174 | |||
5052 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | 5175 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, |
5053 | struct drm_display_mode *mode, | 5176 | struct drm_display_mode *mode, |
5054 | struct drm_display_mode *adjusted_mode, | 5177 | struct drm_display_mode *adjusted_mode, |
@@ -5244,49 +5367,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5244 | ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, | 5367 | ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, |
5245 | &m_n); | 5368 | &m_n); |
5246 | 5369 | ||
5247 | /* Ironlake: try to setup display ref clock before DPLL | 5370 | ironlake_update_pch_refclk(dev); |
5248 | * enabling. This is only under driver's control after | ||
5249 | * PCH B stepping, previous chipset stepping should be | ||
5250 | * ignoring this setting. | ||
5251 | */ | ||
5252 | temp = I915_READ(PCH_DREF_CONTROL); | ||
5253 | /* Always enable nonspread source */ | ||
5254 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | ||
5255 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; | ||
5256 | temp &= ~DREF_SSC_SOURCE_MASK; | ||
5257 | temp |= DREF_SSC_SOURCE_ENABLE; | ||
5258 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5259 | |||
5260 | POSTING_READ(PCH_DREF_CONTROL); | ||
5261 | udelay(200); | ||
5262 | |||
5263 | if (has_edp_encoder) { | ||
5264 | if (intel_panel_use_ssc(dev_priv)) { | ||
5265 | temp |= DREF_SSC1_ENABLE; | ||
5266 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5267 | |||
5268 | POSTING_READ(PCH_DREF_CONTROL); | ||
5269 | udelay(200); | ||
5270 | } | ||
5271 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
5272 | |||
5273 | /* Enable CPU source on CPU attached eDP */ | ||
5274 | if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | ||
5275 | if (intel_panel_use_ssc(dev_priv)) | ||
5276 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | ||
5277 | else | ||
5278 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | ||
5279 | } else { | ||
5280 | /* Enable SSC on PCH eDP if needed */ | ||
5281 | if (intel_panel_use_ssc(dev_priv)) { | ||
5282 | DRM_ERROR("enabling SSC on PCH\n"); | ||
5283 | temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; | ||
5284 | } | ||
5285 | } | ||
5286 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5287 | POSTING_READ(PCH_DREF_CONTROL); | ||
5288 | udelay(200); | ||
5289 | } | ||
5290 | 5371 | ||
5291 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | 5372 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
5292 | if (has_reduced_clock) | 5373 | if (has_reduced_clock) |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 0feae908bb37..44fef5e1c490 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1841,6 +1841,11 @@ done: | |||
1841 | static void | 1841 | static void |
1842 | intel_dp_destroy (struct drm_connector *connector) | 1842 | intel_dp_destroy (struct drm_connector *connector) |
1843 | { | 1843 | { |
1844 | struct drm_device *dev = connector->dev; | ||
1845 | |||
1846 | if (intel_dpd_is_edp(dev)) | ||
1847 | intel_panel_destroy_backlight(dev); | ||
1848 | |||
1844 | drm_sysfs_connector_remove(connector); | 1849 | drm_sysfs_connector_remove(connector); |
1845 | drm_connector_cleanup(connector); | 1850 | drm_connector_cleanup(connector); |
1846 | kfree(connector); | 1851 | kfree(connector); |
@@ -2072,6 +2077,8 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
2072 | DRM_MODE_TYPE_PREFERRED; | 2077 | DRM_MODE_TYPE_PREFERRED; |
2073 | } | 2078 | } |
2074 | } | 2079 | } |
2080 | dev_priv->int_edp_connector = connector; | ||
2081 | intel_panel_setup_backlight(dev); | ||
2075 | } | 2082 | } |
2076 | 2083 | ||
2077 | intel_dp_add_properties(intel_dp, connector); | 2084 | intel_dp_add_properties(intel_dp, connector); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 7b330e76a435..0b2ee9d39980 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -297,9 +297,10 @@ extern void intel_pch_panel_fitting(struct drm_device *dev, | |||
297 | extern u32 intel_panel_get_max_backlight(struct drm_device *dev); | 297 | extern u32 intel_panel_get_max_backlight(struct drm_device *dev); |
298 | extern u32 intel_panel_get_backlight(struct drm_device *dev); | 298 | extern u32 intel_panel_get_backlight(struct drm_device *dev); |
299 | extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); | 299 | extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); |
300 | extern void intel_panel_setup_backlight(struct drm_device *dev); | 300 | extern int intel_panel_setup_backlight(struct drm_device *dev); |
301 | extern void intel_panel_enable_backlight(struct drm_device *dev); | 301 | extern void intel_panel_enable_backlight(struct drm_device *dev); |
302 | extern void intel_panel_disable_backlight(struct drm_device *dev); | 302 | extern void intel_panel_disable_backlight(struct drm_device *dev); |
303 | extern void intel_panel_destroy_backlight(struct drm_device *dev); | ||
303 | extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); | 304 | extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); |
304 | 305 | ||
305 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); | 306 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 2e8ddfcba40c..31da77f5c051 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -72,14 +72,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds) | |||
72 | { | 72 | { |
73 | struct drm_device *dev = intel_lvds->base.base.dev; | 73 | struct drm_device *dev = intel_lvds->base.base.dev; |
74 | struct drm_i915_private *dev_priv = dev->dev_private; | 74 | struct drm_i915_private *dev_priv = dev->dev_private; |
75 | u32 ctl_reg, lvds_reg; | 75 | u32 ctl_reg, lvds_reg, stat_reg; |
76 | 76 | ||
77 | if (HAS_PCH_SPLIT(dev)) { | 77 | if (HAS_PCH_SPLIT(dev)) { |
78 | ctl_reg = PCH_PP_CONTROL; | 78 | ctl_reg = PCH_PP_CONTROL; |
79 | lvds_reg = PCH_LVDS; | 79 | lvds_reg = PCH_LVDS; |
80 | stat_reg = PCH_PP_STATUS; | ||
80 | } else { | 81 | } else { |
81 | ctl_reg = PP_CONTROL; | 82 | ctl_reg = PP_CONTROL; |
82 | lvds_reg = LVDS; | 83 | lvds_reg = LVDS; |
84 | stat_reg = PP_STATUS; | ||
83 | } | 85 | } |
84 | 86 | ||
85 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); | 87 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); |
@@ -94,17 +96,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds) | |||
94 | DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", | 96 | DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", |
95 | intel_lvds->pfit_control, | 97 | intel_lvds->pfit_control, |
96 | intel_lvds->pfit_pgm_ratios); | 98 | intel_lvds->pfit_pgm_ratios); |
97 | if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) { | 99 | |
98 | DRM_ERROR("timed out waiting for panel to power off\n"); | 100 | I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); |
99 | } else { | 101 | I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); |
100 | I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); | 102 | intel_lvds->pfit_dirty = false; |
101 | I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); | ||
102 | intel_lvds->pfit_dirty = false; | ||
103 | } | ||
104 | } | 103 | } |
105 | 104 | ||
106 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); | 105 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); |
107 | POSTING_READ(lvds_reg); | 106 | POSTING_READ(lvds_reg); |
107 | if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) | ||
108 | DRM_ERROR("timed out waiting for panel to power on\n"); | ||
108 | 109 | ||
109 | intel_panel_enable_backlight(dev); | 110 | intel_panel_enable_backlight(dev); |
110 | } | 111 | } |
@@ -113,24 +114,25 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds) | |||
113 | { | 114 | { |
114 | struct drm_device *dev = intel_lvds->base.base.dev; | 115 | struct drm_device *dev = intel_lvds->base.base.dev; |
115 | struct drm_i915_private *dev_priv = dev->dev_private; | 116 | struct drm_i915_private *dev_priv = dev->dev_private; |
116 | u32 ctl_reg, lvds_reg; | 117 | u32 ctl_reg, lvds_reg, stat_reg; |
117 | 118 | ||
118 | if (HAS_PCH_SPLIT(dev)) { | 119 | if (HAS_PCH_SPLIT(dev)) { |
119 | ctl_reg = PCH_PP_CONTROL; | 120 | ctl_reg = PCH_PP_CONTROL; |
120 | lvds_reg = PCH_LVDS; | 121 | lvds_reg = PCH_LVDS; |
122 | stat_reg = PCH_PP_STATUS; | ||
121 | } else { | 123 | } else { |
122 | ctl_reg = PP_CONTROL; | 124 | ctl_reg = PP_CONTROL; |
123 | lvds_reg = LVDS; | 125 | lvds_reg = LVDS; |
126 | stat_reg = PP_STATUS; | ||
124 | } | 127 | } |
125 | 128 | ||
126 | intel_panel_disable_backlight(dev); | 129 | intel_panel_disable_backlight(dev); |
127 | 130 | ||
128 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); | 131 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); |
132 | if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) | ||
133 | DRM_ERROR("timed out waiting for panel to power off\n"); | ||
129 | 134 | ||
130 | if (intel_lvds->pfit_control) { | 135 | if (intel_lvds->pfit_control) { |
131 | if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) | ||
132 | DRM_ERROR("timed out waiting for panel to power off\n"); | ||
133 | |||
134 | I915_WRITE(PFIT_CONTROL, 0); | 136 | I915_WRITE(PFIT_CONTROL, 0); |
135 | intel_lvds->pfit_dirty = true; | 137 | intel_lvds->pfit_dirty = true; |
136 | } | 138 | } |
@@ -398,53 +400,21 @@ out: | |||
398 | 400 | ||
399 | static void intel_lvds_prepare(struct drm_encoder *encoder) | 401 | static void intel_lvds_prepare(struct drm_encoder *encoder) |
400 | { | 402 | { |
401 | struct drm_device *dev = encoder->dev; | ||
402 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
403 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | 403 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); |
404 | 404 | ||
405 | /* We try to do the minimum that is necessary in order to unlock | 405 | /* |
406 | * the registers for mode setting. | ||
407 | * | ||
408 | * On Ironlake, this is quite simple as we just set the unlock key | ||
409 | * and ignore all subtleties. (This may cause some issues...) | ||
410 | * | ||
411 | * Prior to Ironlake, we must disable the pipe if we want to adjust | 406 | * Prior to Ironlake, we must disable the pipe if we want to adjust |
412 | * the panel fitter. However at all other times we can just reset | 407 | * the panel fitter. However at all other times we can just reset |
413 | * the registers regardless. | 408 | * the registers regardless. |
414 | */ | 409 | */ |
415 | 410 | if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty) | |
416 | if (HAS_PCH_SPLIT(dev)) { | 411 | intel_lvds_disable(intel_lvds); |
417 | I915_WRITE(PCH_PP_CONTROL, | ||
418 | I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); | ||
419 | } else if (intel_lvds->pfit_dirty) { | ||
420 | I915_WRITE(PP_CONTROL, | ||
421 | (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS) | ||
422 | & ~POWER_TARGET_ON); | ||
423 | } else { | ||
424 | I915_WRITE(PP_CONTROL, | ||
425 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); | ||
426 | } | ||
427 | } | 412 | } |
428 | 413 | ||
429 | static void intel_lvds_commit(struct drm_encoder *encoder) | 414 | static void intel_lvds_commit(struct drm_encoder *encoder) |
430 | { | 415 | { |
431 | struct drm_device *dev = encoder->dev; | ||
432 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
433 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | 416 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); |
434 | 417 | ||
435 | /* Undo any unlocking done in prepare to prevent accidental | ||
436 | * adjustment of the registers. | ||
437 | */ | ||
438 | if (HAS_PCH_SPLIT(dev)) { | ||
439 | u32 val = I915_READ(PCH_PP_CONTROL); | ||
440 | if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS) | ||
441 | I915_WRITE(PCH_PP_CONTROL, val & 0x3); | ||
442 | } else { | ||
443 | u32 val = I915_READ(PP_CONTROL); | ||
444 | if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS) | ||
445 | I915_WRITE(PP_CONTROL, val & 0x3); | ||
446 | } | ||
447 | |||
448 | /* Always do a full power on as we do not know what state | 418 | /* Always do a full power on as we do not know what state |
449 | * we were left in. | 419 | * we were left in. |
450 | */ | 420 | */ |
@@ -582,6 +552,8 @@ static void intel_lvds_destroy(struct drm_connector *connector) | |||
582 | struct drm_device *dev = connector->dev; | 552 | struct drm_device *dev = connector->dev; |
583 | struct drm_i915_private *dev_priv = dev->dev_private; | 553 | struct drm_i915_private *dev_priv = dev->dev_private; |
584 | 554 | ||
555 | intel_panel_destroy_backlight(dev); | ||
556 | |||
585 | if (dev_priv->lid_notifier.notifier_call) | 557 | if (dev_priv->lid_notifier.notifier_call) |
586 | acpi_lid_notifier_unregister(&dev_priv->lid_notifier); | 558 | acpi_lid_notifier_unregister(&dev_priv->lid_notifier); |
587 | drm_sysfs_connector_remove(connector); | 559 | drm_sysfs_connector_remove(connector); |
@@ -1040,6 +1012,19 @@ out: | |||
1040 | pwm = I915_READ(BLC_PWM_PCH_CTL1); | 1012 | pwm = I915_READ(BLC_PWM_PCH_CTL1); |
1041 | pwm |= PWM_PCH_ENABLE; | 1013 | pwm |= PWM_PCH_ENABLE; |
1042 | I915_WRITE(BLC_PWM_PCH_CTL1, pwm); | 1014 | I915_WRITE(BLC_PWM_PCH_CTL1, pwm); |
1015 | /* | ||
1016 | * Unlock registers and just | ||
1017 | * leave them unlocked | ||
1018 | */ | ||
1019 | I915_WRITE(PCH_PP_CONTROL, | ||
1020 | I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); | ||
1021 | } else { | ||
1022 | /* | ||
1023 | * Unlock registers and just | ||
1024 | * leave them unlocked | ||
1025 | */ | ||
1026 | I915_WRITE(PP_CONTROL, | ||
1027 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); | ||
1043 | } | 1028 | } |
1044 | dev_priv->lid_notifier.notifier_call = intel_lid_notify; | 1029 | dev_priv->lid_notifier.notifier_call = intel_lid_notify; |
1045 | if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { | 1030 | if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { |
@@ -1049,6 +1034,9 @@ out: | |||
1049 | /* keep the LVDS connector */ | 1034 | /* keep the LVDS connector */ |
1050 | dev_priv->int_lvds_connector = connector; | 1035 | dev_priv->int_lvds_connector = connector; |
1051 | drm_sysfs_connector_add(connector); | 1036 | drm_sysfs_connector_add(connector); |
1037 | |||
1038 | intel_panel_setup_backlight(dev); | ||
1039 | |||
1052 | return true; | 1040 | return true; |
1053 | 1041 | ||
1054 | failed: | 1042 | failed: |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index b7c5ddb564d1..b8e8158bb16e 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -227,7 +227,6 @@ void intel_opregion_asle_intr(struct drm_device *dev) | |||
227 | asle->aslc = asle_stat; | 227 | asle->aslc = asle_stat; |
228 | } | 228 | } |
229 | 229 | ||
230 | /* Only present on Ironlake+ */ | ||
231 | void intel_opregion_gse_intr(struct drm_device *dev) | 230 | void intel_opregion_gse_intr(struct drm_device *dev) |
232 | { | 231 | { |
233 | struct drm_i915_private *dev_priv = dev->dev_private; | 232 | struct drm_i915_private *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 05f500cd9c24..a9e0c7bcd317 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -277,7 +277,7 @@ void intel_panel_enable_backlight(struct drm_device *dev) | |||
277 | dev_priv->backlight_enabled = true; | 277 | dev_priv->backlight_enabled = true; |
278 | } | 278 | } |
279 | 279 | ||
280 | void intel_panel_setup_backlight(struct drm_device *dev) | 280 | static void intel_panel_init_backlight(struct drm_device *dev) |
281 | { | 281 | { |
282 | struct drm_i915_private *dev_priv = dev->dev_private; | 282 | struct drm_i915_private *dev_priv = dev->dev_private; |
283 | 283 | ||
@@ -309,3 +309,73 @@ intel_panel_detect(struct drm_device *dev) | |||
309 | 309 | ||
310 | return connector_status_unknown; | 310 | return connector_status_unknown; |
311 | } | 311 | } |
312 | |||
313 | #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE | ||
314 | static int intel_panel_update_status(struct backlight_device *bd) | ||
315 | { | ||
316 | struct drm_device *dev = bl_get_data(bd); | ||
317 | intel_panel_set_backlight(dev, bd->props.brightness); | ||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | static int intel_panel_get_brightness(struct backlight_device *bd) | ||
322 | { | ||
323 | struct drm_device *dev = bl_get_data(bd); | ||
324 | return intel_panel_get_backlight(dev); | ||
325 | } | ||
326 | |||
327 | static const struct backlight_ops intel_panel_bl_ops = { | ||
328 | .update_status = intel_panel_update_status, | ||
329 | .get_brightness = intel_panel_get_brightness, | ||
330 | }; | ||
331 | |||
332 | int intel_panel_setup_backlight(struct drm_device *dev) | ||
333 | { | ||
334 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
335 | struct backlight_properties props; | ||
336 | struct drm_connector *connector; | ||
337 | |||
338 | intel_panel_init_backlight(dev); | ||
339 | |||
340 | if (dev_priv->int_lvds_connector) | ||
341 | connector = dev_priv->int_lvds_connector; | ||
342 | else if (dev_priv->int_edp_connector) | ||
343 | connector = dev_priv->int_edp_connector; | ||
344 | else | ||
345 | return -ENODEV; | ||
346 | |||
347 | props.type = BACKLIGHT_RAW; | ||
348 | props.max_brightness = intel_panel_get_max_backlight(dev); | ||
349 | dev_priv->backlight = | ||
350 | backlight_device_register("intel_backlight", | ||
351 | &connector->kdev, dev, | ||
352 | &intel_panel_bl_ops, &props); | ||
353 | |||
354 | if (IS_ERR(dev_priv->backlight)) { | ||
355 | DRM_ERROR("Failed to register backlight: %ld\n", | ||
356 | PTR_ERR(dev_priv->backlight)); | ||
357 | dev_priv->backlight = NULL; | ||
358 | return -ENODEV; | ||
359 | } | ||
360 | dev_priv->backlight->props.brightness = intel_panel_get_backlight(dev); | ||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | void intel_panel_destroy_backlight(struct drm_device *dev) | ||
365 | { | ||
366 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
367 | if (dev_priv->backlight) | ||
368 | backlight_device_unregister(dev_priv->backlight); | ||
369 | } | ||
370 | #else | ||
371 | int intel_panel_setup_backlight(struct drm_device *dev) | ||
372 | { | ||
373 | intel_panel_init_backlight(dev); | ||
374 | return 0; | ||
375 | } | ||
376 | |||
377 | void intel_panel_destroy_backlight(struct drm_device *dev) | ||
378 | { | ||
379 | return; | ||
380 | } | ||
381 | #endif | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 47b9b2777038..c30626ea9f93 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -290,6 +290,10 @@ static int init_render_ring(struct intel_ring_buffer *ring) | |||
290 | if (IS_GEN6(dev) || IS_GEN7(dev)) | 290 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
291 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; | 291 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; |
292 | I915_WRITE(MI_MODE, mode); | 292 | I915_WRITE(MI_MODE, mode); |
293 | if (IS_GEN7(dev)) | ||
294 | I915_WRITE(GFX_MODE_GEN7, | ||
295 | GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | | ||
296 | GFX_MODE_ENABLE(GFX_REPLAY_MODE)); | ||
293 | } | 297 | } |
294 | 298 | ||
295 | if (INTEL_INFO(dev)->gen >= 6) { | 299 | if (INTEL_INFO(dev)->gen >= 6) { |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 645b84b3d203..7ad43c6b1db7 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -613,6 +613,18 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector, | |||
613 | return true; | 613 | return true; |
614 | } | 614 | } |
615 | 615 | ||
616 | bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) | ||
617 | { | ||
618 | u8 link_status[DP_LINK_STATUS_SIZE]; | ||
619 | struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; | ||
620 | |||
621 | if (!radeon_dp_get_link_status(radeon_connector, link_status)) | ||
622 | return false; | ||
623 | if (dp_channel_eq_ok(link_status, dig->dp_lane_count)) | ||
624 | return false; | ||
625 | return true; | ||
626 | } | ||
627 | |||
616 | struct radeon_dp_link_train_info { | 628 | struct radeon_dp_link_train_info { |
617 | struct radeon_device *rdev; | 629 | struct radeon_device *rdev; |
618 | struct drm_encoder *encoder; | 630 | struct drm_encoder *encoder; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 14dce9f22172..fb5fa0898868 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -743,7 +743,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev, | |||
743 | !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || | 743 | !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || |
744 | !evergreen_check_latency_hiding(&wm) || | 744 | !evergreen_check_latency_hiding(&wm) || |
745 | (rdev->disp_priority == 2)) { | 745 | (rdev->disp_priority == 2)) { |
746 | DRM_INFO("force priority to high\n"); | 746 | DRM_DEBUG_KMS("force priority to high\n"); |
747 | priority_a_cnt |= PRIORITY_ALWAYS_ON; | 747 | priority_a_cnt |= PRIORITY_ALWAYS_ON; |
748 | priority_b_cnt |= PRIORITY_ALWAYS_ON; | 748 | priority_b_cnt |= PRIORITY_ALWAYS_ON; |
749 | } | 749 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 6d6b5f16bc09..4f0c1ecac72e 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -60,18 +60,20 @@ void radeon_connector_hotplug(struct drm_connector *connector) | |||
60 | 60 | ||
61 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); | 61 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); |
62 | 62 | ||
63 | /* powering up/down the eDP panel generates hpd events which | 63 | /* if the connector is already off, don't turn it back on */ |
64 | * can interfere with modesetting. | 64 | if (connector->dpms != DRM_MODE_DPMS_ON) |
65 | */ | ||
66 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) | ||
67 | return; | 65 | return; |
68 | 66 | ||
69 | /* pre-r600 did not always have the hpd pins mapped accurately to connectors */ | 67 | /* just deal with DP (not eDP) here. */ |
70 | if (rdev->family >= CHIP_R600) { | 68 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { |
71 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) | 69 | int saved_dpms = connector->dpms; |
70 | |||
71 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && | ||
72 | radeon_dp_needs_link_train(radeon_connector)) | ||
72 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | 73 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); |
73 | else | 74 | else |
74 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | 75 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); |
76 | connector->dpms = saved_dpms; | ||
75 | } | 77 | } |
76 | } | 78 | } |
77 | 79 | ||
@@ -464,6 +466,16 @@ static bool radeon_connector_needs_extended_probe(struct radeon_device *dev, | |||
464 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) | 466 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) |
465 | return true; | 467 | return true; |
466 | } | 468 | } |
469 | /* TOSHIBA Satellite L300D with ATI Mobility Radeon x1100 | ||
470 | * (RS690M) sends data to i2c bus for a HDMI connector that | ||
471 | * is not implemented */ | ||
472 | if ((dev->pdev->device == 0x791f) && | ||
473 | (dev->pdev->subsystem_vendor == 0x1179) && | ||
474 | (dev->pdev->subsystem_device == 0xff68)) { | ||
475 | if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) && | ||
476 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) | ||
477 | return true; | ||
478 | } | ||
467 | 479 | ||
468 | /* Default: no EDID header probe required for DDC probing */ | 480 | /* Default: no EDID header probe required for DDC probing */ |
469 | return false; | 481 | return false; |
@@ -474,11 +486,19 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, | |||
474 | { | 486 | { |
475 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 487 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
476 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; | 488 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; |
489 | struct drm_display_mode *t, *mode; | ||
490 | |||
491 | /* If the EDID preferred mode doesn't match the native mode, use it */ | ||
492 | list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { | ||
493 | if (mode->type & DRM_MODE_TYPE_PREFERRED) { | ||
494 | if (mode->hdisplay != native_mode->hdisplay || | ||
495 | mode->vdisplay != native_mode->vdisplay) | ||
496 | memcpy(native_mode, mode, sizeof(*mode)); | ||
497 | } | ||
498 | } | ||
477 | 499 | ||
478 | /* Try to get native mode details from EDID if necessary */ | 500 | /* Try to get native mode details from EDID if necessary */ |
479 | if (!native_mode->clock) { | 501 | if (!native_mode->clock) { |
480 | struct drm_display_mode *t, *mode; | ||
481 | |||
482 | list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { | 502 | list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { |
483 | if (mode->hdisplay == native_mode->hdisplay && | 503 | if (mode->hdisplay == native_mode->hdisplay && |
484 | mode->vdisplay == native_mode->vdisplay) { | 504 | mode->vdisplay == native_mode->vdisplay) { |
@@ -489,6 +509,7 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, | |||
489 | } | 509 | } |
490 | } | 510 | } |
491 | } | 511 | } |
512 | |||
492 | if (!native_mode->clock) { | 513 | if (!native_mode->clock) { |
493 | DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); | 514 | DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); |
494 | radeon_encoder->rmx_type = RMX_OFF; | 515 | radeon_encoder->rmx_type = RMX_OFF; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 440e6ecccc40..b51e15725c6e 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <drm/radeon_drm.h> | 32 | #include <drm/radeon_drm.h> |
33 | #include <linux/vgaarb.h> | 33 | #include <linux/vgaarb.h> |
34 | #include <linux/vga_switcheroo.h> | 34 | #include <linux/vga_switcheroo.h> |
35 | #include <linux/efi.h> | ||
35 | #include "radeon_reg.h" | 36 | #include "radeon_reg.h" |
36 | #include "radeon.h" | 37 | #include "radeon.h" |
37 | #include "atom.h" | 38 | #include "atom.h" |
@@ -300,6 +301,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 | |||
300 | mc->mc_vram_size = mc->aper_size; | 301 | mc->mc_vram_size = mc->aper_size; |
301 | } | 302 | } |
302 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; | 303 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; |
304 | if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size) | ||
305 | mc->real_vram_size = radeon_vram_limit; | ||
303 | dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", | 306 | dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", |
304 | mc->mc_vram_size >> 20, mc->vram_start, | 307 | mc->mc_vram_size >> 20, mc->vram_start, |
305 | mc->vram_end, mc->real_vram_size >> 20); | 308 | mc->vram_end, mc->real_vram_size >> 20); |
@@ -348,6 +351,9 @@ bool radeon_card_posted(struct radeon_device *rdev) | |||
348 | { | 351 | { |
349 | uint32_t reg; | 352 | uint32_t reg; |
350 | 353 | ||
354 | if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) | ||
355 | return false; | ||
356 | |||
351 | /* first check CRTCs */ | 357 | /* first check CRTCs */ |
352 | if (ASIC_IS_DCE41(rdev)) { | 358 | if (ASIC_IS_DCE41(rdev)) { |
353 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | | 359 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index b293487e5aa3..319d85d7e759 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -2323,6 +2323,9 @@ radeon_add_atom_encoder(struct drm_device *dev, | |||
2323 | default: | 2323 | default: |
2324 | encoder->possible_crtcs = 0x3; | 2324 | encoder->possible_crtcs = 0x3; |
2325 | break; | 2325 | break; |
2326 | case 4: | ||
2327 | encoder->possible_crtcs = 0xf; | ||
2328 | break; | ||
2326 | case 6: | 2329 | case 6: |
2327 | encoder->possible_crtcs = 0x3f; | 2330 | encoder->possible_crtcs = 0x3f; |
2328 | break; | 2331 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index d09031c03e26..68820f5f6303 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -479,6 +479,7 @@ extern void radeon_dp_set_link_config(struct drm_connector *connector, | |||
479 | struct drm_display_mode *mode); | 479 | struct drm_display_mode *mode); |
480 | extern void radeon_dp_link_train(struct drm_encoder *encoder, | 480 | extern void radeon_dp_link_train(struct drm_encoder *encoder, |
481 | struct drm_connector *connector); | 481 | struct drm_connector *connector); |
482 | extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector); | ||
482 | extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); | 483 | extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); |
483 | extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); | 484 | extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); |
484 | extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode); | 485 | extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode); |
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index dee4a0c1b4b2..602fa3541c45 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
@@ -40,10 +40,14 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
40 | size = 1024 * 1024; | 40 | size = 1024 * 1024; |
41 | 41 | ||
42 | /* Number of tests = | 42 | /* Number of tests = |
43 | * (Total GTT - IB pool - writeback page - ring buffer) / test size | 43 | * (Total GTT - IB pool - writeback page - ring buffers) / test size |
44 | */ | 44 | */ |
45 | n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - | 45 | n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size; |
46 | rdev->cp.ring_size)) / size; | 46 | if (rdev->wb.wb_obj) |
47 | n -= RADEON_GPU_PAGE_SIZE; | ||
48 | if (rdev->ih.ring_obj) | ||
49 | n -= rdev->ih.ring_size; | ||
50 | n /= size; | ||
47 | 51 | ||
48 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); | 52 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); |
49 | if (!gtt_obj) { | 53 | if (!gtt_obj) { |
@@ -132,9 +136,15 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
132 | gtt_start++, vram_start++) { | 136 | gtt_start++, vram_start++) { |
133 | if (*vram_start != gtt_start) { | 137 | if (*vram_start != gtt_start) { |
134 | DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " | 138 | DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " |
135 | "expected 0x%p (GTT map 0x%p-0x%p)\n", | 139 | "expected 0x%p (GTT/VRAM offset " |
136 | i, *vram_start, gtt_start, gtt_map, | 140 | "0x%16llx/0x%16llx)\n", |
137 | gtt_end); | 141 | i, *vram_start, gtt_start, |
142 | (unsigned long long) | ||
143 | (gtt_addr - rdev->mc.gtt_start + | ||
144 | (void*)gtt_start - gtt_map), | ||
145 | (unsigned long long) | ||
146 | (vram_addr - rdev->mc.vram_start + | ||
147 | (void*)gtt_start - gtt_map)); | ||
138 | radeon_bo_kunmap(vram_obj); | 148 | radeon_bo_kunmap(vram_obj); |
139 | goto out_cleanup; | 149 | goto out_cleanup; |
140 | } | 150 | } |
@@ -175,9 +185,15 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
175 | gtt_start++, vram_start++) { | 185 | gtt_start++, vram_start++) { |
176 | if (*gtt_start != vram_start) { | 186 | if (*gtt_start != vram_start) { |
177 | DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " | 187 | DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " |
178 | "expected 0x%p (VRAM map 0x%p-0x%p)\n", | 188 | "expected 0x%p (VRAM/GTT offset " |
179 | i, *gtt_start, vram_start, vram_map, | 189 | "0x%16llx/0x%16llx)\n", |
180 | vram_end); | 190 | i, *gtt_start, vram_start, |
191 | (unsigned long long) | ||
192 | (vram_addr - rdev->mc.vram_start + | ||
193 | (void*)vram_start - vram_map), | ||
194 | (unsigned long long) | ||
195 | (gtt_addr - rdev->mc.gtt_start + | ||
196 | (void*)vram_start - vram_map)); | ||
181 | radeon_bo_kunmap(gtt_obj[i]); | 197 | radeon_bo_kunmap(gtt_obj[i]); |
182 | goto out_cleanup; | 198 | goto out_cleanup; |
183 | } | 199 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 60125ddba1e9..9b86fb0e4122 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -450,6 +450,29 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ | |||
450 | return -EINVAL; | 450 | return -EINVAL; |
451 | mem->bus.base = rdev->mc.aper_base; | 451 | mem->bus.base = rdev->mc.aper_base; |
452 | mem->bus.is_iomem = true; | 452 | mem->bus.is_iomem = true; |
453 | #ifdef __alpha__ | ||
454 | /* | ||
455 | * Alpha: use bus.addr to hold the ioremap() return, | ||
456 | * so we can modify bus.base below. | ||
457 | */ | ||
458 | if (mem->placement & TTM_PL_FLAG_WC) | ||
459 | mem->bus.addr = | ||
460 | ioremap_wc(mem->bus.base + mem->bus.offset, | ||
461 | mem->bus.size); | ||
462 | else | ||
463 | mem->bus.addr = | ||
464 | ioremap_nocache(mem->bus.base + mem->bus.offset, | ||
465 | mem->bus.size); | ||
466 | |||
467 | /* | ||
468 | * Alpha: Use just the bus offset plus | ||
469 | * the hose/domain memory base for bus.base. | ||
470 | * It then can be used to build PTEs for VRAM | ||
471 | * access, as done in ttm_bo_vm_fault(). | ||
472 | */ | ||
473 | mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + | ||
474 | rdev->ddev->hose->dense_mem_base; | ||
475 | #endif | ||
453 | break; | 476 | break; |
454 | default: | 477 | default: |
455 | return -EINVAL; | 478 | return -EINVAL; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 56619f64b6bf..a4d38d85909a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -353,8 +353,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) | |||
353 | 353 | ||
354 | ret = ttm_tt_set_user(bo->ttm, current, | 354 | ret = ttm_tt_set_user(bo->ttm, current, |
355 | bo->buffer_start, bo->num_pages); | 355 | bo->buffer_start, bo->num_pages); |
356 | if (unlikely(ret != 0)) | 356 | if (unlikely(ret != 0)) { |
357 | ttm_tt_destroy(bo->ttm); | 357 | ttm_tt_destroy(bo->ttm); |
358 | bo->ttm = NULL; | ||
359 | } | ||
358 | break; | 360 | break; |
359 | default: | 361 | default: |
360 | printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); | 362 | printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); |
@@ -390,10 +392,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
390 | * Create and bind a ttm if required. | 392 | * Create and bind a ttm if required. |
391 | */ | 393 | */ |
392 | 394 | ||
393 | if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) { | 395 | if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
394 | ret = ttm_bo_add_ttm(bo, false); | 396 | if (bo->ttm == NULL) { |
395 | if (ret) | 397 | ret = ttm_bo_add_ttm(bo, false); |
396 | goto out_err; | 398 | if (ret) |
399 | goto out_err; | ||
400 | } | ||
397 | 401 | ||
398 | ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); | 402 | ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); |
399 | if (ret) | 403 | if (ret) |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 77dbf408c0d0..ae3c6f5dd2b7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -635,13 +635,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
635 | if (ret) | 635 | if (ret) |
636 | return ret; | 636 | return ret; |
637 | 637 | ||
638 | ttm_bo_free_old_node(bo); | ||
639 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && | 638 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
640 | (bo->ttm != NULL)) { | 639 | (bo->ttm != NULL)) { |
641 | ttm_tt_unbind(bo->ttm); | 640 | ttm_tt_unbind(bo->ttm); |
642 | ttm_tt_destroy(bo->ttm); | 641 | ttm_tt_destroy(bo->ttm); |
643 | bo->ttm = NULL; | 642 | bo->ttm = NULL; |
644 | } | 643 | } |
644 | ttm_bo_free_old_node(bo); | ||
645 | } else { | 645 | } else { |
646 | /** | 646 | /** |
647 | * This should help pipeline ordinary buffer moves. | 647 | * This should help pipeline ordinary buffer moves. |
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 306b15f39c9c..1130a8987125 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig | |||
@@ -589,6 +589,7 @@ config HID_WACOM_POWER_SUPPLY | |||
589 | config HID_WIIMOTE | 589 | config HID_WIIMOTE |
590 | tristate "Nintendo Wii Remote support" | 590 | tristate "Nintendo Wii Remote support" |
591 | depends on BT_HIDP | 591 | depends on BT_HIDP |
592 | depends on LEDS_CLASS | ||
592 | ---help--- | 593 | ---help--- |
593 | Support for the Nintendo Wii Remote bluetooth device. | 594 | Support for the Nintendo Wii Remote bluetooth device. |
594 | 595 | ||
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index b85744fe8464..18b3bc646bf3 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c | |||
@@ -444,6 +444,12 @@ static const struct hid_device_id apple_devices[] = { | |||
444 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS), | 444 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS), |
445 | .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | | 445 | .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | |
446 | APPLE_RDESC_JIS }, | 446 | APPLE_RDESC_JIS }, |
447 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI), | ||
448 | .driver_data = APPLE_HAS_FN }, | ||
449 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO), | ||
450 | .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, | ||
451 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS), | ||
452 | .driver_data = APPLE_HAS_FN }, | ||
447 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), | 453 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), |
448 | .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, | 454 | .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, |
449 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO), | 455 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO), |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 1a5cf0c9cfca..242353df3dc4 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -1340,6 +1340,9 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1340 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) }, | 1340 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) }, |
1341 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) }, | 1341 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) }, |
1342 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, | 1342 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, |
1343 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) }, | ||
1344 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) }, | ||
1345 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) }, | ||
1343 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, | 1346 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, |
1344 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, | 1347 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, |
1345 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, | 1348 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index db63ccf21cc8..7d27d2b0445a 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -109,6 +109,9 @@ | |||
109 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 | 109 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 |
110 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 | 110 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 |
111 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 | 111 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 |
112 | #define USB_DEVICE_ID_APPLE_ALU_REVB_ANSI 0x024f | ||
113 | #define USB_DEVICE_ID_APPLE_ALU_REVB_ISO 0x0250 | ||
114 | #define USB_DEVICE_ID_APPLE_ALU_REVB_JIS 0x0251 | ||
112 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239 | 115 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239 |
113 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a | 116 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a |
114 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b | 117 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b |
@@ -576,6 +579,9 @@ | |||
576 | #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 | 579 | #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 |
577 | #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600 | 580 | #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600 |
578 | 581 | ||
582 | #define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f | ||
583 | #define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002 | ||
584 | |||
579 | #define USB_VENDOR_ID_SKYCABLE 0x1223 | 585 | #define USB_VENDOR_ID_SKYCABLE 0x1223 |
580 | #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 | 586 | #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 |
581 | 587 | ||
diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c index a594383ce03d..85a02e5f9fe8 100644 --- a/drivers/hid/hid-wiimote.c +++ b/drivers/hid/hid-wiimote.c | |||
@@ -10,10 +10,10 @@ | |||
10 | * any later version. | 10 | * any later version. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/atomic.h> | ||
14 | #include <linux/device.h> | 13 | #include <linux/device.h> |
15 | #include <linux/hid.h> | 14 | #include <linux/hid.h> |
16 | #include <linux/input.h> | 15 | #include <linux/input.h> |
16 | #include <linux/leds.h> | ||
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
19 | #include "hid-ids.h" | 19 | #include "hid-ids.h" |
@@ -33,9 +33,9 @@ struct wiimote_state { | |||
33 | }; | 33 | }; |
34 | 34 | ||
35 | struct wiimote_data { | 35 | struct wiimote_data { |
36 | atomic_t ready; | ||
37 | struct hid_device *hdev; | 36 | struct hid_device *hdev; |
38 | struct input_dev *input; | 37 | struct input_dev *input; |
38 | struct led_classdev *leds[4]; | ||
39 | 39 | ||
40 | spinlock_t qlock; | 40 | spinlock_t qlock; |
41 | __u8 head; | 41 | __u8 head; |
@@ -53,8 +53,15 @@ struct wiimote_data { | |||
53 | #define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \ | 53 | #define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \ |
54 | WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4) | 54 | WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4) |
55 | 55 | ||
56 | /* return flag for led \num */ | ||
57 | #define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1)) | ||
58 | |||
56 | enum wiiproto_reqs { | 59 | enum wiiproto_reqs { |
60 | WIIPROTO_REQ_NULL = 0x0, | ||
57 | WIIPROTO_REQ_LED = 0x11, | 61 | WIIPROTO_REQ_LED = 0x11, |
62 | WIIPROTO_REQ_DRM = 0x12, | ||
63 | WIIPROTO_REQ_STATUS = 0x20, | ||
64 | WIIPROTO_REQ_RETURN = 0x22, | ||
58 | WIIPROTO_REQ_DRM_K = 0x30, | 65 | WIIPROTO_REQ_DRM_K = 0x30, |
59 | }; | 66 | }; |
60 | 67 | ||
@@ -87,9 +94,6 @@ static __u16 wiiproto_keymap[] = { | |||
87 | BTN_MODE, /* WIIPROTO_KEY_HOME */ | 94 | BTN_MODE, /* WIIPROTO_KEY_HOME */ |
88 | }; | 95 | }; |
89 | 96 | ||
90 | #define dev_to_wii(pdev) hid_get_drvdata(container_of(pdev, struct hid_device, \ | ||
91 | dev)) | ||
92 | |||
93 | static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer, | 97 | static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer, |
94 | size_t count) | 98 | size_t count) |
95 | { | 99 | { |
@@ -192,66 +196,96 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds) | |||
192 | wiimote_queue(wdata, cmd, sizeof(cmd)); | 196 | wiimote_queue(wdata, cmd, sizeof(cmd)); |
193 | } | 197 | } |
194 | 198 | ||
195 | #define wiifs_led_show_set(num) \ | 199 | /* |
196 | static ssize_t wiifs_led_show_##num(struct device *dev, \ | 200 | * Check what peripherals of the wiimote are currently |
197 | struct device_attribute *attr, char *buf) \ | 201 | * active and select a proper DRM that supports all of |
198 | { \ | 202 | * the requested data inputs. |
199 | struct wiimote_data *wdata = dev_to_wii(dev); \ | 203 | */ |
200 | unsigned long flags; \ | 204 | static __u8 select_drm(struct wiimote_data *wdata) |
201 | int state; \ | 205 | { |
202 | \ | 206 | return WIIPROTO_REQ_DRM_K; |
203 | if (!atomic_read(&wdata->ready)) \ | 207 | } |
204 | return -EBUSY; \ | 208 | |
205 | \ | 209 | static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm) |
206 | spin_lock_irqsave(&wdata->state.lock, flags); \ | 210 | { |
207 | state = !!(wdata->state.flags & WIIPROTO_FLAG_LED##num); \ | 211 | __u8 cmd[3]; |
208 | spin_unlock_irqrestore(&wdata->state.lock, flags); \ | 212 | |
209 | \ | 213 | if (drm == WIIPROTO_REQ_NULL) |
210 | return sprintf(buf, "%d\n", state); \ | 214 | drm = select_drm(wdata); |
211 | } \ | 215 | |
212 | static ssize_t wiifs_led_set_##num(struct device *dev, \ | 216 | cmd[0] = WIIPROTO_REQ_DRM; |
213 | struct device_attribute *attr, const char *buf, size_t count) \ | 217 | cmd[1] = 0; |
214 | { \ | 218 | cmd[2] = drm; |
215 | struct wiimote_data *wdata = dev_to_wii(dev); \ | 219 | |
216 | int tmp = simple_strtoul(buf, NULL, 10); \ | 220 | wiimote_queue(wdata, cmd, sizeof(cmd)); |
217 | unsigned long flags; \ | 221 | } |
218 | __u8 state; \ | 222 | |
219 | \ | 223 | static enum led_brightness wiimote_leds_get(struct led_classdev *led_dev) |
220 | if (!atomic_read(&wdata->ready)) \ | 224 | { |
221 | return -EBUSY; \ | 225 | struct wiimote_data *wdata; |
222 | \ | 226 | struct device *dev = led_dev->dev->parent; |
223 | spin_lock_irqsave(&wdata->state.lock, flags); \ | 227 | int i; |
224 | \ | 228 | unsigned long flags; |
225 | state = wdata->state.flags; \ | 229 | bool value = false; |
226 | \ | 230 | |
227 | if (tmp) \ | 231 | wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev)); |
228 | wiiproto_req_leds(wdata, state | WIIPROTO_FLAG_LED##num);\ | 232 | |
229 | else \ | 233 | for (i = 0; i < 4; ++i) { |
230 | wiiproto_req_leds(wdata, state & ~WIIPROTO_FLAG_LED##num);\ | 234 | if (wdata->leds[i] == led_dev) { |
231 | \ | 235 | spin_lock_irqsave(&wdata->state.lock, flags); |
232 | spin_unlock_irqrestore(&wdata->state.lock, flags); \ | 236 | value = wdata->state.flags & WIIPROTO_FLAG_LED(i + 1); |
233 | \ | 237 | spin_unlock_irqrestore(&wdata->state.lock, flags); |
234 | return count; \ | 238 | break; |
235 | } \ | 239 | } |
236 | static DEVICE_ATTR(led##num, S_IRUGO | S_IWUSR, wiifs_led_show_##num, \ | 240 | } |
237 | wiifs_led_set_##num) | 241 | |
238 | 242 | return value ? LED_FULL : LED_OFF; | |
239 | wiifs_led_show_set(1); | 243 | } |
240 | wiifs_led_show_set(2); | 244 | |
241 | wiifs_led_show_set(3); | 245 | static void wiimote_leds_set(struct led_classdev *led_dev, |
242 | wiifs_led_show_set(4); | 246 | enum led_brightness value) |
247 | { | ||
248 | struct wiimote_data *wdata; | ||
249 | struct device *dev = led_dev->dev->parent; | ||
250 | int i; | ||
251 | unsigned long flags; | ||
252 | __u8 state, flag; | ||
253 | |||
254 | wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev)); | ||
255 | |||
256 | for (i = 0; i < 4; ++i) { | ||
257 | if (wdata->leds[i] == led_dev) { | ||
258 | flag = WIIPROTO_FLAG_LED(i + 1); | ||
259 | spin_lock_irqsave(&wdata->state.lock, flags); | ||
260 | state = wdata->state.flags; | ||
261 | if (value == LED_OFF) | ||
262 | wiiproto_req_leds(wdata, state & ~flag); | ||
263 | else | ||
264 | wiiproto_req_leds(wdata, state | flag); | ||
265 | spin_unlock_irqrestore(&wdata->state.lock, flags); | ||
266 | break; | ||
267 | } | ||
268 | } | ||
269 | } | ||
243 | 270 | ||
244 | static int wiimote_input_event(struct input_dev *dev, unsigned int type, | 271 | static int wiimote_input_event(struct input_dev *dev, unsigned int type, |
245 | unsigned int code, int value) | 272 | unsigned int code, int value) |
246 | { | 273 | { |
274 | return 0; | ||
275 | } | ||
276 | |||
277 | static int wiimote_input_open(struct input_dev *dev) | ||
278 | { | ||
247 | struct wiimote_data *wdata = input_get_drvdata(dev); | 279 | struct wiimote_data *wdata = input_get_drvdata(dev); |
248 | 280 | ||
249 | if (!atomic_read(&wdata->ready)) | 281 | return hid_hw_open(wdata->hdev); |
250 | return -EBUSY; | 282 | } |
251 | /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */ | ||
252 | smp_rmb(); | ||
253 | 283 | ||
254 | return 0; | 284 | static void wiimote_input_close(struct input_dev *dev) |
285 | { | ||
286 | struct wiimote_data *wdata = input_get_drvdata(dev); | ||
287 | |||
288 | hid_hw_close(wdata->hdev); | ||
255 | } | 289 | } |
256 | 290 | ||
257 | static void handler_keys(struct wiimote_data *wdata, const __u8 *payload) | 291 | static void handler_keys(struct wiimote_data *wdata, const __u8 *payload) |
@@ -281,6 +315,26 @@ static void handler_keys(struct wiimote_data *wdata, const __u8 *payload) | |||
281 | input_sync(wdata->input); | 315 | input_sync(wdata->input); |
282 | } | 316 | } |
283 | 317 | ||
318 | static void handler_status(struct wiimote_data *wdata, const __u8 *payload) | ||
319 | { | ||
320 | handler_keys(wdata, payload); | ||
321 | |||
322 | /* on status reports the drm is reset so we need to resend the drm */ | ||
323 | wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); | ||
324 | } | ||
325 | |||
326 | static void handler_return(struct wiimote_data *wdata, const __u8 *payload) | ||
327 | { | ||
328 | __u8 err = payload[3]; | ||
329 | __u8 cmd = payload[2]; | ||
330 | |||
331 | handler_keys(wdata, payload); | ||
332 | |||
333 | if (err) | ||
334 | hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err, | ||
335 | cmd); | ||
336 | } | ||
337 | |||
284 | struct wiiproto_handler { | 338 | struct wiiproto_handler { |
285 | __u8 id; | 339 | __u8 id; |
286 | size_t size; | 340 | size_t size; |
@@ -288,6 +342,8 @@ struct wiiproto_handler { | |||
288 | }; | 342 | }; |
289 | 343 | ||
290 | static struct wiiproto_handler handlers[] = { | 344 | static struct wiiproto_handler handlers[] = { |
345 | { .id = WIIPROTO_REQ_STATUS, .size = 6, .func = handler_status }, | ||
346 | { .id = WIIPROTO_REQ_RETURN, .size = 4, .func = handler_return }, | ||
291 | { .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys }, | 347 | { .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys }, |
292 | { .id = 0 } | 348 | { .id = 0 } |
293 | }; | 349 | }; |
@@ -300,11 +356,6 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report, | |||
300 | int i; | 356 | int i; |
301 | unsigned long flags; | 357 | unsigned long flags; |
302 | 358 | ||
303 | if (!atomic_read(&wdata->ready)) | ||
304 | return -EBUSY; | ||
305 | /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */ | ||
306 | smp_rmb(); | ||
307 | |||
308 | if (size < 1) | 359 | if (size < 1) |
309 | return -EINVAL; | 360 | return -EINVAL; |
310 | 361 | ||
@@ -321,6 +372,58 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report, | |||
321 | return 0; | 372 | return 0; |
322 | } | 373 | } |
323 | 374 | ||
375 | static void wiimote_leds_destroy(struct wiimote_data *wdata) | ||
376 | { | ||
377 | int i; | ||
378 | struct led_classdev *led; | ||
379 | |||
380 | for (i = 0; i < 4; ++i) { | ||
381 | if (wdata->leds[i]) { | ||
382 | led = wdata->leds[i]; | ||
383 | wdata->leds[i] = NULL; | ||
384 | led_classdev_unregister(led); | ||
385 | kfree(led); | ||
386 | } | ||
387 | } | ||
388 | } | ||
389 | |||
390 | static int wiimote_leds_create(struct wiimote_data *wdata) | ||
391 | { | ||
392 | int i, ret; | ||
393 | struct device *dev = &wdata->hdev->dev; | ||
394 | size_t namesz = strlen(dev_name(dev)) + 9; | ||
395 | struct led_classdev *led; | ||
396 | char *name; | ||
397 | |||
398 | for (i = 0; i < 4; ++i) { | ||
399 | led = kzalloc(sizeof(struct led_classdev) + namesz, GFP_KERNEL); | ||
400 | if (!led) { | ||
401 | ret = -ENOMEM; | ||
402 | goto err; | ||
403 | } | ||
404 | name = (void*)&led[1]; | ||
405 | snprintf(name, namesz, "%s:blue:p%d", dev_name(dev), i); | ||
406 | led->name = name; | ||
407 | led->brightness = 0; | ||
408 | led->max_brightness = 1; | ||
409 | led->brightness_get = wiimote_leds_get; | ||
410 | led->brightness_set = wiimote_leds_set; | ||
411 | |||
412 | ret = led_classdev_register(dev, led); | ||
413 | if (ret) { | ||
414 | kfree(led); | ||
415 | goto err; | ||
416 | } | ||
417 | wdata->leds[i] = led; | ||
418 | } | ||
419 | |||
420 | return 0; | ||
421 | |||
422 | err: | ||
423 | wiimote_leds_destroy(wdata); | ||
424 | return ret; | ||
425 | } | ||
426 | |||
324 | static struct wiimote_data *wiimote_create(struct hid_device *hdev) | 427 | static struct wiimote_data *wiimote_create(struct hid_device *hdev) |
325 | { | 428 | { |
326 | struct wiimote_data *wdata; | 429 | struct wiimote_data *wdata; |
@@ -341,6 +444,8 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev) | |||
341 | 444 | ||
342 | input_set_drvdata(wdata->input, wdata); | 445 | input_set_drvdata(wdata->input, wdata); |
343 | wdata->input->event = wiimote_input_event; | 446 | wdata->input->event = wiimote_input_event; |
447 | wdata->input->open = wiimote_input_open; | ||
448 | wdata->input->close = wiimote_input_close; | ||
344 | wdata->input->dev.parent = &wdata->hdev->dev; | 449 | wdata->input->dev.parent = &wdata->hdev->dev; |
345 | wdata->input->id.bustype = wdata->hdev->bus; | 450 | wdata->input->id.bustype = wdata->hdev->bus; |
346 | wdata->input->id.vendor = wdata->hdev->vendor; | 451 | wdata->input->id.vendor = wdata->hdev->vendor; |
@@ -362,6 +467,12 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev) | |||
362 | 467 | ||
363 | static void wiimote_destroy(struct wiimote_data *wdata) | 468 | static void wiimote_destroy(struct wiimote_data *wdata) |
364 | { | 469 | { |
470 | wiimote_leds_destroy(wdata); | ||
471 | |||
472 | input_unregister_device(wdata->input); | ||
473 | cancel_work_sync(&wdata->worker); | ||
474 | hid_hw_stop(wdata->hdev); | ||
475 | |||
365 | kfree(wdata); | 476 | kfree(wdata); |
366 | } | 477 | } |
367 | 478 | ||
@@ -377,19 +488,6 @@ static int wiimote_hid_probe(struct hid_device *hdev, | |||
377 | return -ENOMEM; | 488 | return -ENOMEM; |
378 | } | 489 | } |
379 | 490 | ||
380 | ret = device_create_file(&hdev->dev, &dev_attr_led1); | ||
381 | if (ret) | ||
382 | goto err; | ||
383 | ret = device_create_file(&hdev->dev, &dev_attr_led2); | ||
384 | if (ret) | ||
385 | goto err; | ||
386 | ret = device_create_file(&hdev->dev, &dev_attr_led3); | ||
387 | if (ret) | ||
388 | goto err; | ||
389 | ret = device_create_file(&hdev->dev, &dev_attr_led4); | ||
390 | if (ret) | ||
391 | goto err; | ||
392 | |||
393 | ret = hid_parse(hdev); | 491 | ret = hid_parse(hdev); |
394 | if (ret) { | 492 | if (ret) { |
395 | hid_err(hdev, "HID parse failed\n"); | 493 | hid_err(hdev, "HID parse failed\n"); |
@@ -408,9 +506,10 @@ static int wiimote_hid_probe(struct hid_device *hdev, | |||
408 | goto err_stop; | 506 | goto err_stop; |
409 | } | 507 | } |
410 | 508 | ||
411 | /* smp_wmb: Write wdata->xy first before wdata->ready is set to 1 */ | 509 | ret = wiimote_leds_create(wdata); |
412 | smp_wmb(); | 510 | if (ret) |
413 | atomic_set(&wdata->ready, 1); | 511 | goto err_free; |
512 | |||
414 | hid_info(hdev, "New device registered\n"); | 513 | hid_info(hdev, "New device registered\n"); |
415 | 514 | ||
416 | /* by default set led1 after device initialization */ | 515 | /* by default set led1 after device initialization */ |
@@ -420,15 +519,15 @@ static int wiimote_hid_probe(struct hid_device *hdev, | |||
420 | 519 | ||
421 | return 0; | 520 | return 0; |
422 | 521 | ||
522 | err_free: | ||
523 | wiimote_destroy(wdata); | ||
524 | return ret; | ||
525 | |||
423 | err_stop: | 526 | err_stop: |
424 | hid_hw_stop(hdev); | 527 | hid_hw_stop(hdev); |
425 | err: | 528 | err: |
426 | input_free_device(wdata->input); | 529 | input_free_device(wdata->input); |
427 | device_remove_file(&hdev->dev, &dev_attr_led1); | 530 | kfree(wdata); |
428 | device_remove_file(&hdev->dev, &dev_attr_led2); | ||
429 | device_remove_file(&hdev->dev, &dev_attr_led3); | ||
430 | device_remove_file(&hdev->dev, &dev_attr_led4); | ||
431 | wiimote_destroy(wdata); | ||
432 | return ret; | 531 | return ret; |
433 | } | 532 | } |
434 | 533 | ||
@@ -437,16 +536,6 @@ static void wiimote_hid_remove(struct hid_device *hdev) | |||
437 | struct wiimote_data *wdata = hid_get_drvdata(hdev); | 536 | struct wiimote_data *wdata = hid_get_drvdata(hdev); |
438 | 537 | ||
439 | hid_info(hdev, "Device removed\n"); | 538 | hid_info(hdev, "Device removed\n"); |
440 | |||
441 | device_remove_file(&hdev->dev, &dev_attr_led1); | ||
442 | device_remove_file(&hdev->dev, &dev_attr_led2); | ||
443 | device_remove_file(&hdev->dev, &dev_attr_led3); | ||
444 | device_remove_file(&hdev->dev, &dev_attr_led4); | ||
445 | |||
446 | hid_hw_stop(hdev); | ||
447 | input_unregister_device(wdata->input); | ||
448 | |||
449 | cancel_work_sync(&wdata->worker); | ||
450 | wiimote_destroy(wdata); | 539 | wiimote_destroy(wdata); |
451 | } | 540 | } |
452 | 541 | ||
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 621959d5cc42..4bdb5d46c52c 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -89,6 +89,7 @@ static const struct hid_blacklist { | |||
89 | 89 | ||
90 | { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT }, | 90 | { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT }, |
91 | { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT }, | 91 | { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT }, |
92 | { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS }, | ||
92 | { 0, 0 } | 93 | { 0, 0 } |
93 | }; | 94 | }; |
94 | 95 | ||
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c index c4c40be0edbf..d22f241b6a67 100644 --- a/drivers/hwmon/i5k_amb.c +++ b/drivers/hwmon/i5k_amb.c | |||
@@ -114,7 +114,6 @@ struct i5k_amb_data { | |||
114 | void __iomem *amb_mmio; | 114 | void __iomem *amb_mmio; |
115 | struct i5k_device_attribute *attrs; | 115 | struct i5k_device_attribute *attrs; |
116 | unsigned int num_attrs; | 116 | unsigned int num_attrs; |
117 | unsigned long chipset_id; | ||
118 | }; | 117 | }; |
119 | 118 | ||
120 | static ssize_t show_name(struct device *dev, struct device_attribute *devattr, | 119 | static ssize_t show_name(struct device *dev, struct device_attribute *devattr, |
@@ -444,8 +443,6 @@ static int __devinit i5k_find_amb_registers(struct i5k_amb_data *data, | |||
444 | goto out; | 443 | goto out; |
445 | } | 444 | } |
446 | 445 | ||
447 | data->chipset_id = devid; | ||
448 | |||
449 | res = 0; | 446 | res = 0; |
450 | out: | 447 | out: |
451 | pci_dev_put(pcidev); | 448 | pci_dev_put(pcidev); |
@@ -478,23 +475,13 @@ out: | |||
478 | return res; | 475 | return res; |
479 | } | 476 | } |
480 | 477 | ||
481 | static unsigned long i5k_channel_pci_id(struct i5k_amb_data *data, | 478 | static struct { |
482 | unsigned long channel) | 479 | unsigned long err; |
483 | { | 480 | unsigned long fbd0; |
484 | switch (data->chipset_id) { | 481 | } chipset_ids[] __devinitdata = { |
485 | case PCI_DEVICE_ID_INTEL_5000_ERR: | 482 | { PCI_DEVICE_ID_INTEL_5000_ERR, PCI_DEVICE_ID_INTEL_5000_FBD0 }, |
486 | return PCI_DEVICE_ID_INTEL_5000_FBD0 + channel; | 483 | { PCI_DEVICE_ID_INTEL_5400_ERR, PCI_DEVICE_ID_INTEL_5400_FBD0 }, |
487 | case PCI_DEVICE_ID_INTEL_5400_ERR: | 484 | { 0, 0 } |
488 | return PCI_DEVICE_ID_INTEL_5400_FBD0 + channel; | ||
489 | default: | ||
490 | BUG(); | ||
491 | } | ||
492 | } | ||
493 | |||
494 | static unsigned long chipset_ids[] = { | ||
495 | PCI_DEVICE_ID_INTEL_5000_ERR, | ||
496 | PCI_DEVICE_ID_INTEL_5400_ERR, | ||
497 | 0 | ||
498 | }; | 485 | }; |
499 | 486 | ||
500 | #ifdef MODULE | 487 | #ifdef MODULE |
@@ -510,8 +497,7 @@ static int __devinit i5k_amb_probe(struct platform_device *pdev) | |||
510 | { | 497 | { |
511 | struct i5k_amb_data *data; | 498 | struct i5k_amb_data *data; |
512 | struct resource *reso; | 499 | struct resource *reso; |
513 | int i; | 500 | int i, res; |
514 | int res = -ENODEV; | ||
515 | 501 | ||
516 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 502 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
517 | if (!data) | 503 | if (!data) |
@@ -520,22 +506,22 @@ static int __devinit i5k_amb_probe(struct platform_device *pdev) | |||
520 | /* Figure out where the AMB registers live */ | 506 | /* Figure out where the AMB registers live */ |
521 | i = 0; | 507 | i = 0; |
522 | do { | 508 | do { |
523 | res = i5k_find_amb_registers(data, chipset_ids[i]); | 509 | res = i5k_find_amb_registers(data, chipset_ids[i].err); |
510 | if (res == 0) | ||
511 | break; | ||
524 | i++; | 512 | i++; |
525 | } while (res && chipset_ids[i]); | 513 | } while (chipset_ids[i].err); |
526 | 514 | ||
527 | if (res) | 515 | if (res) |
528 | goto err; | 516 | goto err; |
529 | 517 | ||
530 | /* Copy the DIMM presence map for the first two channels */ | 518 | /* Copy the DIMM presence map for the first two channels */ |
531 | res = i5k_channel_probe(&data->amb_present[0], | 519 | res = i5k_channel_probe(&data->amb_present[0], chipset_ids[i].fbd0); |
532 | i5k_channel_pci_id(data, 0)); | ||
533 | if (res) | 520 | if (res) |
534 | goto err; | 521 | goto err; |
535 | 522 | ||
536 | /* Copy the DIMM presence map for the optional second two channels */ | 523 | /* Copy the DIMM presence map for the optional second two channels */ |
537 | i5k_channel_probe(&data->amb_present[2], | 524 | i5k_channel_probe(&data->amb_present[2], chipset_ids[i].fbd0 + 1); |
538 | i5k_channel_pci_id(data, 1)); | ||
539 | 525 | ||
540 | /* Set up resource regions */ | 526 | /* Set up resource regions */ |
541 | reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME); | 527 | reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME); |
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c index 1a409c5bc9bc..c316294c48b4 100644 --- a/drivers/hwmon/ibmaem.c +++ b/drivers/hwmon/ibmaem.c | |||
@@ -432,13 +432,15 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg, | |||
432 | aem_send_message(ipmi); | 432 | aem_send_message(ipmi); |
433 | 433 | ||
434 | res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT); | 434 | res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT); |
435 | if (!res) | 435 | if (!res) { |
436 | return -ETIMEDOUT; | 436 | res = -ETIMEDOUT; |
437 | goto out; | ||
438 | } | ||
437 | 439 | ||
438 | if (ipmi->rx_result || ipmi->rx_msg_len != rs_size || | 440 | if (ipmi->rx_result || ipmi->rx_msg_len != rs_size || |
439 | memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) { | 441 | memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) { |
440 | kfree(rs_resp); | 442 | res = -ENOENT; |
441 | return -ENOENT; | 443 | goto out; |
442 | } | 444 | } |
443 | 445 | ||
444 | switch (size) { | 446 | switch (size) { |
@@ -463,8 +465,11 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg, | |||
463 | break; | 465 | break; |
464 | } | 466 | } |
465 | } | 467 | } |
468 | res = 0; | ||
466 | 469 | ||
467 | return 0; | 470 | out: |
471 | kfree(rs_resp); | ||
472 | return res; | ||
468 | } | 473 | } |
469 | 474 | ||
470 | /* Update AEM energy registers */ | 475 | /* Update AEM energy registers */ |
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index d7926f4336b5..eab11615dced 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c | |||
@@ -211,8 +211,7 @@ static int lookup_comp(struct ntc_data *data, | |||
211 | if (data->comp[mid].ohm <= ohm) { | 211 | if (data->comp[mid].ohm <= ohm) { |
212 | *i_low = mid; | 212 | *i_low = mid; |
213 | *i_high = mid - 1; | 213 | *i_high = mid - 1; |
214 | } | 214 | } else { |
215 | if (data->comp[mid].ohm > ohm) { | ||
216 | *i_low = mid + 1; | 215 | *i_low = mid + 1; |
217 | *i_high = mid; | 216 | *i_high = mid; |
218 | } | 217 | } |
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c index d4bc114572de..ac254fba551b 100644 --- a/drivers/hwmon/pmbus/lm25066.c +++ b/drivers/hwmon/pmbus/lm25066.c | |||
@@ -161,6 +161,17 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg, | |||
161 | return ret; | 161 | return ret; |
162 | } | 162 | } |
163 | 163 | ||
164 | static int lm25066_write_byte(struct i2c_client *client, int page, u8 value) | ||
165 | { | ||
166 | if (page > 1) | ||
167 | return -EINVAL; | ||
168 | |||
169 | if (page == 0) | ||
170 | return pmbus_write_byte(client, 0, value); | ||
171 | |||
172 | return 0; | ||
173 | } | ||
174 | |||
164 | static int lm25066_probe(struct i2c_client *client, | 175 | static int lm25066_probe(struct i2c_client *client, |
165 | const struct i2c_device_id *id) | 176 | const struct i2c_device_id *id) |
166 | { | 177 | { |
@@ -204,6 +215,7 @@ static int lm25066_probe(struct i2c_client *client, | |||
204 | 215 | ||
205 | info->read_word_data = lm25066_read_word_data; | 216 | info->read_word_data = lm25066_read_word_data; |
206 | info->write_word_data = lm25066_write_word_data; | 217 | info->write_word_data = lm25066_write_word_data; |
218 | info->write_byte = lm25066_write_byte; | ||
207 | 219 | ||
208 | switch (id->driver_data) { | 220 | switch (id->driver_data) { |
209 | case lm25066: | 221 | case lm25066: |
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index 0808d986d75b..a6ae20ffef6b 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h | |||
@@ -325,6 +325,7 @@ struct pmbus_driver_info { | |||
325 | int (*read_word_data)(struct i2c_client *client, int page, int reg); | 325 | int (*read_word_data)(struct i2c_client *client, int page, int reg); |
326 | int (*write_word_data)(struct i2c_client *client, int page, int reg, | 326 | int (*write_word_data)(struct i2c_client *client, int page, int reg, |
327 | u16 word); | 327 | u16 word); |
328 | int (*write_byte)(struct i2c_client *client, int page, u8 value); | ||
328 | /* | 329 | /* |
329 | * The identify function determines supported PMBus functionality. | 330 | * The identify function determines supported PMBus functionality. |
330 | * This function is only necessary if a chip driver supports multiple | 331 | * This function is only necessary if a chip driver supports multiple |
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 5c1b6cf31701..a561c3a0e916 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c | |||
@@ -182,6 +182,24 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value) | |||
182 | } | 182 | } |
183 | EXPORT_SYMBOL_GPL(pmbus_write_byte); | 183 | EXPORT_SYMBOL_GPL(pmbus_write_byte); |
184 | 184 | ||
185 | /* | ||
186 | * _pmbus_write_byte() is similar to pmbus_write_byte(), but checks if | ||
187 | * a device specific mapping funcion exists and calls it if necessary. | ||
188 | */ | ||
189 | static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value) | ||
190 | { | ||
191 | struct pmbus_data *data = i2c_get_clientdata(client); | ||
192 | const struct pmbus_driver_info *info = data->info; | ||
193 | int status; | ||
194 | |||
195 | if (info->write_byte) { | ||
196 | status = info->write_byte(client, page, value); | ||
197 | if (status != -ENODATA) | ||
198 | return status; | ||
199 | } | ||
200 | return pmbus_write_byte(client, page, value); | ||
201 | } | ||
202 | |||
185 | int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word) | 203 | int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word) |
186 | { | 204 | { |
187 | int rv; | 205 | int rv; |
@@ -281,7 +299,7 @@ static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg) | |||
281 | 299 | ||
282 | static void pmbus_clear_fault_page(struct i2c_client *client, int page) | 300 | static void pmbus_clear_fault_page(struct i2c_client *client, int page) |
283 | { | 301 | { |
284 | pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS); | 302 | _pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS); |
285 | } | 303 | } |
286 | 304 | ||
287 | void pmbus_clear_faults(struct i2c_client *client) | 305 | void pmbus_clear_faults(struct i2c_client *client) |
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index 0c731ca69f15..b228e09c5d05 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c | |||
@@ -146,6 +146,7 @@ struct i2c_nmk_client { | |||
146 | * @stop: stop condition | 146 | * @stop: stop condition |
147 | * @xfer_complete: acknowledge completion for a I2C message | 147 | * @xfer_complete: acknowledge completion for a I2C message |
148 | * @result: controller propogated result | 148 | * @result: controller propogated result |
149 | * @regulator: pointer to i2c regulator | ||
149 | * @busy: Busy doing transfer | 150 | * @busy: Busy doing transfer |
150 | */ | 151 | */ |
151 | struct nmk_i2c_dev { | 152 | struct nmk_i2c_dev { |
@@ -417,12 +418,12 @@ static int read_i2c(struct nmk_i2c_dev *dev) | |||
417 | writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, | 418 | writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, |
418 | dev->virtbase + I2C_IMSCR); | 419 | dev->virtbase + I2C_IMSCR); |
419 | 420 | ||
420 | timeout = wait_for_completion_interruptible_timeout( | 421 | timeout = wait_for_completion_timeout( |
421 | &dev->xfer_complete, dev->adap.timeout); | 422 | &dev->xfer_complete, dev->adap.timeout); |
422 | 423 | ||
423 | if (timeout < 0) { | 424 | if (timeout < 0) { |
424 | dev_err(&dev->pdev->dev, | 425 | dev_err(&dev->pdev->dev, |
425 | "wait_for_completion_interruptible_timeout" | 426 | "wait_for_completion_timeout" |
426 | "returned %d waiting for event\n", timeout); | 427 | "returned %d waiting for event\n", timeout); |
427 | status = timeout; | 428 | status = timeout; |
428 | } | 429 | } |
@@ -504,12 +505,12 @@ static int write_i2c(struct nmk_i2c_dev *dev) | |||
504 | writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, | 505 | writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, |
505 | dev->virtbase + I2C_IMSCR); | 506 | dev->virtbase + I2C_IMSCR); |
506 | 507 | ||
507 | timeout = wait_for_completion_interruptible_timeout( | 508 | timeout = wait_for_completion_timeout( |
508 | &dev->xfer_complete, dev->adap.timeout); | 509 | &dev->xfer_complete, dev->adap.timeout); |
509 | 510 | ||
510 | if (timeout < 0) { | 511 | if (timeout < 0) { |
511 | dev_err(&dev->pdev->dev, | 512 | dev_err(&dev->pdev->dev, |
512 | "wait_for_completion_interruptible_timeout" | 513 | "wait_for_completion_timeout " |
513 | "returned %d waiting for event\n", timeout); | 514 | "returned %d waiting for event\n", timeout); |
514 | status = timeout; | 515 | status = timeout; |
515 | } | 516 | } |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 1a766cf74f6b..2dfb63176856 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -1139,41 +1139,12 @@ omap_i2c_remove(struct platform_device *pdev) | |||
1139 | return 0; | 1139 | return 0; |
1140 | } | 1140 | } |
1141 | 1141 | ||
1142 | #ifdef CONFIG_SUSPEND | ||
1143 | static int omap_i2c_suspend(struct device *dev) | ||
1144 | { | ||
1145 | if (!pm_runtime_suspended(dev)) | ||
1146 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) | ||
1147 | dev->bus->pm->runtime_suspend(dev); | ||
1148 | |||
1149 | return 0; | ||
1150 | } | ||
1151 | |||
1152 | static int omap_i2c_resume(struct device *dev) | ||
1153 | { | ||
1154 | if (!pm_runtime_suspended(dev)) | ||
1155 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) | ||
1156 | dev->bus->pm->runtime_resume(dev); | ||
1157 | |||
1158 | return 0; | ||
1159 | } | ||
1160 | |||
1161 | static struct dev_pm_ops omap_i2c_pm_ops = { | ||
1162 | .suspend = omap_i2c_suspend, | ||
1163 | .resume = omap_i2c_resume, | ||
1164 | }; | ||
1165 | #define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops) | ||
1166 | #else | ||
1167 | #define OMAP_I2C_PM_OPS NULL | ||
1168 | #endif | ||
1169 | |||
1170 | static struct platform_driver omap_i2c_driver = { | 1142 | static struct platform_driver omap_i2c_driver = { |
1171 | .probe = omap_i2c_probe, | 1143 | .probe = omap_i2c_probe, |
1172 | .remove = omap_i2c_remove, | 1144 | .remove = omap_i2c_remove, |
1173 | .driver = { | 1145 | .driver = { |
1174 | .name = "omap_i2c", | 1146 | .name = "omap_i2c", |
1175 | .owner = THIS_MODULE, | 1147 | .owner = THIS_MODULE, |
1176 | .pm = OMAP_I2C_PM_OPS, | ||
1177 | }, | 1148 | }, |
1178 | }; | 1149 | }; |
1179 | 1150 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 43f89ba0a908..fe89c4660d55 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -717,11 +717,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
717 | { | 717 | { |
718 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 718 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
719 | struct ipoib_neigh *neigh; | 719 | struct ipoib_neigh *neigh; |
720 | struct neighbour *n; | 720 | struct neighbour *n = NULL; |
721 | unsigned long flags; | 721 | unsigned long flags; |
722 | 722 | ||
723 | n = dst_get_neighbour(skb_dst(skb)); | 723 | if (likely(skb_dst(skb))) |
724 | if (likely(skb_dst(skb) && n)) { | 724 | n = dst_get_neighbour(skb_dst(skb)); |
725 | |||
726 | if (likely(n)) { | ||
725 | if (unlikely(!*to_ipoib_neigh(n))) { | 727 | if (unlikely(!*to_ipoib_neigh(n))) { |
726 | ipoib_path_lookup(skb, dev); | 728 | ipoib_path_lookup(skb, dev); |
727 | return NETDEV_TX_OK; | 729 | return NETDEV_TX_OK; |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 8db008de5392..9c61b9c2c597 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -101,13 +101,17 @@ iscsi_iser_recv(struct iscsi_conn *conn, | |||
101 | 101 | ||
102 | /* verify PDU length */ | 102 | /* verify PDU length */ |
103 | datalen = ntoh24(hdr->dlength); | 103 | datalen = ntoh24(hdr->dlength); |
104 | if (datalen != rx_data_len) { | 104 | if (datalen > rx_data_len || (datalen + 4) < rx_data_len) { |
105 | printk(KERN_ERR "iscsi_iser: datalen %d (hdr) != %d (IB) \n", | 105 | iser_err("wrong datalen %d (hdr), %d (IB)\n", |
106 | datalen, rx_data_len); | 106 | datalen, rx_data_len); |
107 | rc = ISCSI_ERR_DATALEN; | 107 | rc = ISCSI_ERR_DATALEN; |
108 | goto error; | 108 | goto error; |
109 | } | 109 | } |
110 | 110 | ||
111 | if (datalen != rx_data_len) | ||
112 | iser_dbg("aligned datalen (%d) hdr, %d (IB)\n", | ||
113 | datalen, rx_data_len); | ||
114 | |||
111 | /* read AHS */ | 115 | /* read AHS */ |
112 | ahslen = hdr->hlength * 4; | 116 | ahslen = hdr->hlength * 4; |
113 | 117 | ||
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 342cbc1bdaae..db6f3ce9f3bf 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -89,7 +89,7 @@ | |||
89 | } while (0) | 89 | } while (0) |
90 | 90 | ||
91 | #define SHIFT_4K 12 | 91 | #define SHIFT_4K 12 |
92 | #define SIZE_4K (1UL << SHIFT_4K) | 92 | #define SIZE_4K (1ULL << SHIFT_4K) |
93 | #define MASK_4K (~(SIZE_4K-1)) | 93 | #define MASK_4K (~(SIZE_4K-1)) |
94 | 94 | ||
95 | /* support up to 512KB in one RDMA */ | 95 | /* support up to 512KB in one RDMA */ |
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 5745b7fe158c..f299de6b419b 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
@@ -412,7 +412,7 @@ int iser_send_control(struct iscsi_conn *conn, | |||
412 | memcpy(iser_conn->ib_conn->login_buf, task->data, | 412 | memcpy(iser_conn->ib_conn->login_buf, task->data, |
413 | task->data_count); | 413 | task->data_count); |
414 | tx_dsg->addr = iser_conn->ib_conn->login_dma; | 414 | tx_dsg->addr = iser_conn->ib_conn->login_dma; |
415 | tx_dsg->length = data_seg_len; | 415 | tx_dsg->length = task->data_count; |
416 | tx_dsg->lkey = device->mr->lkey; | 416 | tx_dsg->lkey = device->mr->lkey; |
417 | mdesc->num_sge = 2; | 417 | mdesc->num_sge = 2; |
418 | } | 418 | } |
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index 9882971827e6..358cd7ee905b 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c | |||
@@ -139,7 +139,7 @@ struct analog_port { | |||
139 | #include <linux/i8253.h> | 139 | #include <linux/i8253.h> |
140 | 140 | ||
141 | #define GET_TIME(x) do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0) | 141 | #define GET_TIME(x) do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0) |
142 | #define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? CLOCK_TICK_RATE / HZ : 0))) | 142 | #define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0))) |
143 | #define TIME_NAME (cpu_has_tsc?"TSC":"PIT") | 143 | #define TIME_NAME (cpu_has_tsc?"TSC":"PIT") |
144 | static unsigned int get_time_pit(void) | 144 | static unsigned int get_time_pit(void) |
145 | { | 145 | { |
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c index c8242dd190d0..aa17e024d803 100644 --- a/drivers/input/keyboard/ep93xx_keypad.c +++ b/drivers/input/keyboard/ep93xx_keypad.c | |||
@@ -20,6 +20,7 @@ | |||
20 | * flag. | 20 | * flag. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/module.h> | ||
23 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
24 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
25 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index f270447ba951..a5a77915c650 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c | |||
@@ -702,7 +702,7 @@ err_iounmap: | |||
702 | err_free_mem_region: | 702 | err_free_mem_region: |
703 | release_mem_region(res->start, resource_size(res)); | 703 | release_mem_region(res->start, resource_size(res)); |
704 | err_free_mem: | 704 | err_free_mem: |
705 | input_free_device(kbc->idev); | 705 | input_free_device(input_dev); |
706 | kfree(kbc); | 706 | kfree(kbc); |
707 | 707 | ||
708 | return err; | 708 | return err; |
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c index e21deb1baa8a..025417d74ca2 100644 --- a/drivers/input/misc/ad714x-i2c.c +++ b/drivers/input/misc/ad714x-i2c.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * AD714X CapTouch Programmable Controller driver (I2C bus) | 2 | * AD714X CapTouch Programmable Controller driver (I2C bus) |
3 | * | 3 | * |
4 | * Copyright 2009 Analog Devices Inc. | 4 | * Copyright 2009-2011 Analog Devices Inc. |
5 | * | 5 | * |
6 | * Licensed under the GPL-2 or later. | 6 | * Licensed under the GPL-2 or later. |
7 | */ | 7 | */ |
@@ -27,54 +27,49 @@ static int ad714x_i2c_resume(struct device *dev) | |||
27 | 27 | ||
28 | static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume); | 28 | static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume); |
29 | 29 | ||
30 | static int ad714x_i2c_write(struct device *dev, unsigned short reg, | 30 | static int ad714x_i2c_write(struct ad714x_chip *chip, |
31 | unsigned short data) | 31 | unsigned short reg, unsigned short data) |
32 | { | 32 | { |
33 | struct i2c_client *client = to_i2c_client(dev); | 33 | struct i2c_client *client = to_i2c_client(chip->dev); |
34 | int ret = 0; | 34 | int error; |
35 | u8 *_reg = (u8 *)® | 35 | |
36 | u8 *_data = (u8 *)&data; | 36 | chip->xfer_buf[0] = cpu_to_be16(reg); |
37 | 37 | chip->xfer_buf[1] = cpu_to_be16(data); | |
38 | u8 tx[4] = { | 38 | |
39 | _reg[1], | 39 | error = i2c_master_send(client, (u8 *)chip->xfer_buf, |
40 | _reg[0], | 40 | 2 * sizeof(*chip->xfer_buf)); |
41 | _data[1], | 41 | if (unlikely(error < 0)) { |
42 | _data[0] | 42 | dev_err(&client->dev, "I2C write error: %d\n", error); |
43 | }; | 43 | return error; |
44 | 44 | } | |
45 | ret = i2c_master_send(client, tx, 4); | 45 | |
46 | if (ret < 0) | 46 | return 0; |
47 | dev_err(&client->dev, "I2C write error\n"); | ||
48 | |||
49 | return ret; | ||
50 | } | 47 | } |
51 | 48 | ||
52 | static int ad714x_i2c_read(struct device *dev, unsigned short reg, | 49 | static int ad714x_i2c_read(struct ad714x_chip *chip, |
53 | unsigned short *data) | 50 | unsigned short reg, unsigned short *data, size_t len) |
54 | { | 51 | { |
55 | struct i2c_client *client = to_i2c_client(dev); | 52 | struct i2c_client *client = to_i2c_client(chip->dev); |
56 | int ret = 0; | 53 | int i; |
57 | u8 *_reg = (u8 *)® | 54 | int error; |
58 | u8 *_data = (u8 *)data; | 55 | |
59 | 56 | chip->xfer_buf[0] = cpu_to_be16(reg); | |
60 | u8 tx[2] = { | 57 | |
61 | _reg[1], | 58 | error = i2c_master_send(client, (u8 *)chip->xfer_buf, |
62 | _reg[0] | 59 | sizeof(*chip->xfer_buf)); |
63 | }; | 60 | if (error >= 0) |
64 | u8 rx[2]; | 61 | error = i2c_master_recv(client, (u8 *)chip->xfer_buf, |
65 | 62 | len * sizeof(*chip->xfer_buf)); | |
66 | ret = i2c_master_send(client, tx, 2); | 63 | |
67 | if (ret >= 0) | 64 | if (unlikely(error < 0)) { |
68 | ret = i2c_master_recv(client, rx, 2); | 65 | dev_err(&client->dev, "I2C read error: %d\n", error); |
69 | 66 | return error; | |
70 | if (unlikely(ret < 0)) { | ||
71 | dev_err(&client->dev, "I2C read error\n"); | ||
72 | } else { | ||
73 | _data[0] = rx[1]; | ||
74 | _data[1] = rx[0]; | ||
75 | } | 67 | } |
76 | 68 | ||
77 | return ret; | 69 | for (i = 0; i < len; i++) |
70 | data[i] = be16_to_cpu(chip->xfer_buf[i]); | ||
71 | |||
72 | return 0; | ||
78 | } | 73 | } |
79 | 74 | ||
80 | static int __devinit ad714x_i2c_probe(struct i2c_client *client, | 75 | static int __devinit ad714x_i2c_probe(struct i2c_client *client, |
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c index 4120dd549305..875b50811361 100644 --- a/drivers/input/misc/ad714x-spi.c +++ b/drivers/input/misc/ad714x-spi.c | |||
@@ -1,12 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * AD714X CapTouch Programmable Controller driver (SPI bus) | 2 | * AD714X CapTouch Programmable Controller driver (SPI bus) |
3 | * | 3 | * |
4 | * Copyright 2009 Analog Devices Inc. | 4 | * Copyright 2009-2011 Analog Devices Inc. |
5 | * | 5 | * |
6 | * Licensed under the GPL-2 or later. | 6 | * Licensed under the GPL-2 or later. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/input.h> /* BUS_I2C */ | 9 | #include <linux/input.h> /* BUS_SPI */ |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/spi/spi.h> | 11 | #include <linux/spi/spi.h> |
12 | #include <linux/pm.h> | 12 | #include <linux/pm.h> |
@@ -30,30 +30,68 @@ static int ad714x_spi_resume(struct device *dev) | |||
30 | 30 | ||
31 | static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume); | 31 | static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume); |
32 | 32 | ||
33 | static int ad714x_spi_read(struct device *dev, unsigned short reg, | 33 | static int ad714x_spi_read(struct ad714x_chip *chip, |
34 | unsigned short *data) | 34 | unsigned short reg, unsigned short *data, size_t len) |
35 | { | 35 | { |
36 | struct spi_device *spi = to_spi_device(dev); | 36 | struct spi_device *spi = to_spi_device(chip->dev); |
37 | unsigned short tx = AD714x_SPI_CMD_PREFIX | AD714x_SPI_READ | reg; | 37 | struct spi_message message; |
38 | struct spi_transfer xfer[2]; | ||
39 | int i; | ||
40 | int error; | ||
41 | |||
42 | spi_message_init(&message); | ||
43 | memset(xfer, 0, sizeof(xfer)); | ||
44 | |||
45 | chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX | | ||
46 | AD714x_SPI_READ | reg); | ||
47 | xfer[0].tx_buf = &chip->xfer_buf[0]; | ||
48 | xfer[0].len = sizeof(chip->xfer_buf[0]); | ||
49 | spi_message_add_tail(&xfer[0], &message); | ||
50 | |||
51 | xfer[1].rx_buf = &chip->xfer_buf[1]; | ||
52 | xfer[1].len = sizeof(chip->xfer_buf[1]) * len; | ||
53 | spi_message_add_tail(&xfer[1], &message); | ||
54 | |||
55 | error = spi_sync(spi, &message); | ||
56 | if (unlikely(error)) { | ||
57 | dev_err(chip->dev, "SPI read error: %d\n", error); | ||
58 | return error; | ||
59 | } | ||
60 | |||
61 | for (i = 0; i < len; i++) | ||
62 | data[i] = be16_to_cpu(chip->xfer_buf[i + 1]); | ||
38 | 63 | ||
39 | return spi_write_then_read(spi, (u8 *)&tx, 2, (u8 *)data, 2); | 64 | return 0; |
40 | } | 65 | } |
41 | 66 | ||
42 | static int ad714x_spi_write(struct device *dev, unsigned short reg, | 67 | static int ad714x_spi_write(struct ad714x_chip *chip, |
43 | unsigned short data) | 68 | unsigned short reg, unsigned short data) |
44 | { | 69 | { |
45 | struct spi_device *spi = to_spi_device(dev); | 70 | struct spi_device *spi = to_spi_device(chip->dev); |
46 | unsigned short tx[2] = { | 71 | int error; |
47 | AD714x_SPI_CMD_PREFIX | reg, | 72 | |
48 | data | 73 | chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX | reg); |
49 | }; | 74 | chip->xfer_buf[1] = cpu_to_be16(data); |
75 | |||
76 | error = spi_write(spi, (u8 *)chip->xfer_buf, | ||
77 | 2 * sizeof(*chip->xfer_buf)); | ||
78 | if (unlikely(error)) { | ||
79 | dev_err(chip->dev, "SPI write error: %d\n", error); | ||
80 | return error; | ||
81 | } | ||
50 | 82 | ||
51 | return spi_write(spi, (u8 *)tx, 4); | 83 | return 0; |
52 | } | 84 | } |
53 | 85 | ||
54 | static int __devinit ad714x_spi_probe(struct spi_device *spi) | 86 | static int __devinit ad714x_spi_probe(struct spi_device *spi) |
55 | { | 87 | { |
56 | struct ad714x_chip *chip; | 88 | struct ad714x_chip *chip; |
89 | int err; | ||
90 | |||
91 | spi->bits_per_word = 8; | ||
92 | err = spi_setup(spi); | ||
93 | if (err < 0) | ||
94 | return err; | ||
57 | 95 | ||
58 | chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq, | 96 | chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq, |
59 | ad714x_spi_read, ad714x_spi_write); | 97 | ad714x_spi_read, ad714x_spi_write); |
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c index c3a62c42cd28..ca42c7d2a3c7 100644 --- a/drivers/input/misc/ad714x.c +++ b/drivers/input/misc/ad714x.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * AD714X CapTouch Programmable Controller driver supporting AD7142/3/7/8/7A | 2 | * AD714X CapTouch Programmable Controller driver supporting AD7142/3/7/8/7A |
3 | * | 3 | * |
4 | * Copyright 2009 Analog Devices Inc. | 4 | * Copyright 2009-2011 Analog Devices Inc. |
5 | * | 5 | * |
6 | * Licensed under the GPL-2 or later. | 6 | * Licensed under the GPL-2 or later. |
7 | */ | 7 | */ |
@@ -59,7 +59,6 @@ | |||
59 | #define STAGE11_AMBIENT 0x27D | 59 | #define STAGE11_AMBIENT 0x27D |
60 | 60 | ||
61 | #define PER_STAGE_REG_NUM 36 | 61 | #define PER_STAGE_REG_NUM 36 |
62 | #define STAGE_NUM 12 | ||
63 | #define STAGE_CFGREG_NUM 8 | 62 | #define STAGE_CFGREG_NUM 8 |
64 | #define SYS_CFGREG_NUM 8 | 63 | #define SYS_CFGREG_NUM 8 |
65 | 64 | ||
@@ -124,27 +123,6 @@ struct ad714x_driver_data { | |||
124 | * information to integrate all things which will be private data | 123 | * information to integrate all things which will be private data |
125 | * of spi/i2c device | 124 | * of spi/i2c device |
126 | */ | 125 | */ |
127 | struct ad714x_chip { | ||
128 | unsigned short h_state; | ||
129 | unsigned short l_state; | ||
130 | unsigned short c_state; | ||
131 | unsigned short adc_reg[STAGE_NUM]; | ||
132 | unsigned short amb_reg[STAGE_NUM]; | ||
133 | unsigned short sensor_val[STAGE_NUM]; | ||
134 | |||
135 | struct ad714x_platform_data *hw; | ||
136 | struct ad714x_driver_data *sw; | ||
137 | |||
138 | int irq; | ||
139 | struct device *dev; | ||
140 | ad714x_read_t read; | ||
141 | ad714x_write_t write; | ||
142 | |||
143 | struct mutex mutex; | ||
144 | |||
145 | unsigned product; | ||
146 | unsigned version; | ||
147 | }; | ||
148 | 126 | ||
149 | static void ad714x_use_com_int(struct ad714x_chip *ad714x, | 127 | static void ad714x_use_com_int(struct ad714x_chip *ad714x, |
150 | int start_stage, int end_stage) | 128 | int start_stage, int end_stage) |
@@ -154,13 +132,13 @@ static void ad714x_use_com_int(struct ad714x_chip *ad714x, | |||
154 | 132 | ||
155 | mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); | 133 | mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); |
156 | 134 | ||
157 | ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data); | 135 | ad714x->read(ad714x, STG_COM_INT_EN_REG, &data, 1); |
158 | data |= 1 << end_stage; | 136 | data |= 1 << end_stage; |
159 | ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data); | 137 | ad714x->write(ad714x, STG_COM_INT_EN_REG, data); |
160 | 138 | ||
161 | ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data); | 139 | ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data, 1); |
162 | data &= ~mask; | 140 | data &= ~mask; |
163 | ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data); | 141 | ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data); |
164 | } | 142 | } |
165 | 143 | ||
166 | static void ad714x_use_thr_int(struct ad714x_chip *ad714x, | 144 | static void ad714x_use_thr_int(struct ad714x_chip *ad714x, |
@@ -171,13 +149,13 @@ static void ad714x_use_thr_int(struct ad714x_chip *ad714x, | |||
171 | 149 | ||
172 | mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); | 150 | mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); |
173 | 151 | ||
174 | ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data); | 152 | ad714x->read(ad714x, STG_COM_INT_EN_REG, &data, 1); |
175 | data &= ~(1 << end_stage); | 153 | data &= ~(1 << end_stage); |
176 | ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data); | 154 | ad714x->write(ad714x, STG_COM_INT_EN_REG, data); |
177 | 155 | ||
178 | ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data); | 156 | ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data, 1); |
179 | data |= mask; | 157 | data |= mask; |
180 | ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data); | 158 | ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data); |
181 | } | 159 | } |
182 | 160 | ||
183 | static int ad714x_cal_highest_stage(struct ad714x_chip *ad714x, | 161 | static int ad714x_cal_highest_stage(struct ad714x_chip *ad714x, |
@@ -273,15 +251,16 @@ static void ad714x_slider_cal_sensor_val(struct ad714x_chip *ad714x, int idx) | |||
273 | struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx]; | 251 | struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx]; |
274 | int i; | 252 | int i; |
275 | 253 | ||
254 | ad714x->read(ad714x, CDC_RESULT_S0 + hw->start_stage, | ||
255 | &ad714x->adc_reg[hw->start_stage], | ||
256 | hw->end_stage - hw->start_stage + 1); | ||
257 | |||
276 | for (i = hw->start_stage; i <= hw->end_stage; i++) { | 258 | for (i = hw->start_stage; i <= hw->end_stage; i++) { |
277 | ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, | 259 | ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, |
278 | &ad714x->adc_reg[i]); | 260 | &ad714x->amb_reg[i], 1); |
279 | ad714x->read(ad714x->dev, | 261 | |
280 | STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, | 262 | ad714x->sensor_val[i] = |
281 | &ad714x->amb_reg[i]); | 263 | abs(ad714x->adc_reg[i] - ad714x->amb_reg[i]); |
282 | |||
283 | ad714x->sensor_val[i] = abs(ad714x->adc_reg[i] - | ||
284 | ad714x->amb_reg[i]); | ||
285 | } | 264 | } |
286 | } | 265 | } |
287 | 266 | ||
@@ -444,15 +423,16 @@ static void ad714x_wheel_cal_sensor_val(struct ad714x_chip *ad714x, int idx) | |||
444 | struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx]; | 423 | struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx]; |
445 | int i; | 424 | int i; |
446 | 425 | ||
426 | ad714x->read(ad714x, CDC_RESULT_S0 + hw->start_stage, | ||
427 | &ad714x->adc_reg[hw->start_stage], | ||
428 | hw->end_stage - hw->start_stage + 1); | ||
429 | |||
447 | for (i = hw->start_stage; i <= hw->end_stage; i++) { | 430 | for (i = hw->start_stage; i <= hw->end_stage; i++) { |
448 | ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, | 431 | ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, |
449 | &ad714x->adc_reg[i]); | 432 | &ad714x->amb_reg[i], 1); |
450 | ad714x->read(ad714x->dev, | ||
451 | STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, | ||
452 | &ad714x->amb_reg[i]); | ||
453 | if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) | 433 | if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) |
454 | ad714x->sensor_val[i] = ad714x->adc_reg[i] - | 434 | ad714x->sensor_val[i] = |
455 | ad714x->amb_reg[i]; | 435 | ad714x->adc_reg[i] - ad714x->amb_reg[i]; |
456 | else | 436 | else |
457 | ad714x->sensor_val[i] = 0; | 437 | ad714x->sensor_val[i] = 0; |
458 | } | 438 | } |
@@ -597,15 +577,16 @@ static void touchpad_cal_sensor_val(struct ad714x_chip *ad714x, int idx) | |||
597 | struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx]; | 577 | struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx]; |
598 | int i; | 578 | int i; |
599 | 579 | ||
580 | ad714x->read(ad714x, CDC_RESULT_S0 + hw->x_start_stage, | ||
581 | &ad714x->adc_reg[hw->x_start_stage], | ||
582 | hw->x_end_stage - hw->x_start_stage + 1); | ||
583 | |||
600 | for (i = hw->x_start_stage; i <= hw->x_end_stage; i++) { | 584 | for (i = hw->x_start_stage; i <= hw->x_end_stage; i++) { |
601 | ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, | 585 | ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, |
602 | &ad714x->adc_reg[i]); | 586 | &ad714x->amb_reg[i], 1); |
603 | ad714x->read(ad714x->dev, | ||
604 | STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, | ||
605 | &ad714x->amb_reg[i]); | ||
606 | if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) | 587 | if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) |
607 | ad714x->sensor_val[i] = ad714x->adc_reg[i] - | 588 | ad714x->sensor_val[i] = |
608 | ad714x->amb_reg[i]; | 589 | ad714x->adc_reg[i] - ad714x->amb_reg[i]; |
609 | else | 590 | else |
610 | ad714x->sensor_val[i] = 0; | 591 | ad714x->sensor_val[i] = 0; |
611 | } | 592 | } |
@@ -891,7 +872,7 @@ static int ad714x_hw_detect(struct ad714x_chip *ad714x) | |||
891 | { | 872 | { |
892 | unsigned short data; | 873 | unsigned short data; |
893 | 874 | ||
894 | ad714x->read(ad714x->dev, AD714X_PARTID_REG, &data); | 875 | ad714x->read(ad714x, AD714X_PARTID_REG, &data, 1); |
895 | switch (data & 0xFFF0) { | 876 | switch (data & 0xFFF0) { |
896 | case AD7142_PARTID: | 877 | case AD7142_PARTID: |
897 | ad714x->product = 0x7142; | 878 | ad714x->product = 0x7142; |
@@ -940,23 +921,20 @@ static void ad714x_hw_init(struct ad714x_chip *ad714x) | |||
940 | for (i = 0; i < STAGE_NUM; i++) { | 921 | for (i = 0; i < STAGE_NUM; i++) { |
941 | reg_base = AD714X_STAGECFG_REG + i * STAGE_CFGREG_NUM; | 922 | reg_base = AD714X_STAGECFG_REG + i * STAGE_CFGREG_NUM; |
942 | for (j = 0; j < STAGE_CFGREG_NUM; j++) | 923 | for (j = 0; j < STAGE_CFGREG_NUM; j++) |
943 | ad714x->write(ad714x->dev, reg_base + j, | 924 | ad714x->write(ad714x, reg_base + j, |
944 | ad714x->hw->stage_cfg_reg[i][j]); | 925 | ad714x->hw->stage_cfg_reg[i][j]); |
945 | } | 926 | } |
946 | 927 | ||
947 | for (i = 0; i < SYS_CFGREG_NUM; i++) | 928 | for (i = 0; i < SYS_CFGREG_NUM; i++) |
948 | ad714x->write(ad714x->dev, AD714X_SYSCFG_REG + i, | 929 | ad714x->write(ad714x, AD714X_SYSCFG_REG + i, |
949 | ad714x->hw->sys_cfg_reg[i]); | 930 | ad714x->hw->sys_cfg_reg[i]); |
950 | for (i = 0; i < SYS_CFGREG_NUM; i++) | 931 | for (i = 0; i < SYS_CFGREG_NUM; i++) |
951 | ad714x->read(ad714x->dev, AD714X_SYSCFG_REG + i, | 932 | ad714x->read(ad714x, AD714X_SYSCFG_REG + i, &data, 1); |
952 | &data); | ||
953 | 933 | ||
954 | ad714x->write(ad714x->dev, AD714X_STG_CAL_EN_REG, 0xFFF); | 934 | ad714x->write(ad714x, AD714X_STG_CAL_EN_REG, 0xFFF); |
955 | 935 | ||
956 | /* clear all interrupts */ | 936 | /* clear all interrupts */ |
957 | ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data); | 937 | ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3); |
958 | ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data); | ||
959 | ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data); | ||
960 | } | 938 | } |
961 | 939 | ||
962 | static irqreturn_t ad714x_interrupt_thread(int irq, void *data) | 940 | static irqreturn_t ad714x_interrupt_thread(int irq, void *data) |
@@ -966,9 +944,7 @@ static irqreturn_t ad714x_interrupt_thread(int irq, void *data) | |||
966 | 944 | ||
967 | mutex_lock(&ad714x->mutex); | 945 | mutex_lock(&ad714x->mutex); |
968 | 946 | ||
969 | ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &ad714x->l_state); | 947 | ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3); |
970 | ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &ad714x->h_state); | ||
971 | ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &ad714x->c_state); | ||
972 | 948 | ||
973 | for (i = 0; i < ad714x->hw->button_num; i++) | 949 | for (i = 0; i < ad714x->hw->button_num; i++) |
974 | ad714x_button_state_machine(ad714x, i); | 950 | ad714x_button_state_machine(ad714x, i); |
@@ -1245,7 +1221,7 @@ int ad714x_disable(struct ad714x_chip *ad714x) | |||
1245 | mutex_lock(&ad714x->mutex); | 1221 | mutex_lock(&ad714x->mutex); |
1246 | 1222 | ||
1247 | data = ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL] | 0x3; | 1223 | data = ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL] | 0x3; |
1248 | ad714x->write(ad714x->dev, AD714X_PWR_CTRL, data); | 1224 | ad714x->write(ad714x, AD714X_PWR_CTRL, data); |
1249 | 1225 | ||
1250 | mutex_unlock(&ad714x->mutex); | 1226 | mutex_unlock(&ad714x->mutex); |
1251 | 1227 | ||
@@ -1255,24 +1231,20 @@ EXPORT_SYMBOL(ad714x_disable); | |||
1255 | 1231 | ||
1256 | int ad714x_enable(struct ad714x_chip *ad714x) | 1232 | int ad714x_enable(struct ad714x_chip *ad714x) |
1257 | { | 1233 | { |
1258 | unsigned short data; | ||
1259 | |||
1260 | dev_dbg(ad714x->dev, "%s enter\n", __func__); | 1234 | dev_dbg(ad714x->dev, "%s enter\n", __func__); |
1261 | 1235 | ||
1262 | mutex_lock(&ad714x->mutex); | 1236 | mutex_lock(&ad714x->mutex); |
1263 | 1237 | ||
1264 | /* resume to non-shutdown mode */ | 1238 | /* resume to non-shutdown mode */ |
1265 | 1239 | ||
1266 | ad714x->write(ad714x->dev, AD714X_PWR_CTRL, | 1240 | ad714x->write(ad714x, AD714X_PWR_CTRL, |
1267 | ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL]); | 1241 | ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL]); |
1268 | 1242 | ||
1269 | /* make sure the interrupt output line is not low level after resume, | 1243 | /* make sure the interrupt output line is not low level after resume, |
1270 | * otherwise we will get no chance to enter falling-edge irq again | 1244 | * otherwise we will get no chance to enter falling-edge irq again |
1271 | */ | 1245 | */ |
1272 | 1246 | ||
1273 | ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data); | 1247 | ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3); |
1274 | ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data); | ||
1275 | ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data); | ||
1276 | 1248 | ||
1277 | mutex_unlock(&ad714x->mutex); | 1249 | mutex_unlock(&ad714x->mutex); |
1278 | 1250 | ||
diff --git a/drivers/input/misc/ad714x.h b/drivers/input/misc/ad714x.h index 45c54fb13f07..3c85455aa66d 100644 --- a/drivers/input/misc/ad714x.h +++ b/drivers/input/misc/ad714x.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * AD714X CapTouch Programmable Controller driver (bus interfaces) | 2 | * AD714X CapTouch Programmable Controller driver (bus interfaces) |
3 | * | 3 | * |
4 | * Copyright 2009 Analog Devices Inc. | 4 | * Copyright 2009-2011 Analog Devices Inc. |
5 | * | 5 | * |
6 | * Licensed under the GPL-2 or later. | 6 | * Licensed under the GPL-2 or later. |
7 | */ | 7 | */ |
@@ -11,11 +11,40 @@ | |||
11 | 11 | ||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | 13 | ||
14 | #define STAGE_NUM 12 | ||
15 | |||
14 | struct device; | 16 | struct device; |
17 | struct ad714x_platform_data; | ||
18 | struct ad714x_driver_data; | ||
15 | struct ad714x_chip; | 19 | struct ad714x_chip; |
16 | 20 | ||
17 | typedef int (*ad714x_read_t)(struct device *, unsigned short, unsigned short *); | 21 | typedef int (*ad714x_read_t)(struct ad714x_chip *, unsigned short, unsigned short *, size_t); |
18 | typedef int (*ad714x_write_t)(struct device *, unsigned short, unsigned short); | 22 | typedef int (*ad714x_write_t)(struct ad714x_chip *, unsigned short, unsigned short); |
23 | |||
24 | struct ad714x_chip { | ||
25 | unsigned short l_state; | ||
26 | unsigned short h_state; | ||
27 | unsigned short c_state; | ||
28 | unsigned short adc_reg[STAGE_NUM]; | ||
29 | unsigned short amb_reg[STAGE_NUM]; | ||
30 | unsigned short sensor_val[STAGE_NUM]; | ||
31 | |||
32 | struct ad714x_platform_data *hw; | ||
33 | struct ad714x_driver_data *sw; | ||
34 | |||
35 | int irq; | ||
36 | struct device *dev; | ||
37 | ad714x_read_t read; | ||
38 | ad714x_write_t write; | ||
39 | |||
40 | struct mutex mutex; | ||
41 | |||
42 | unsigned product; | ||
43 | unsigned version; | ||
44 | |||
45 | __be16 xfer_buf[16] ____cacheline_aligned; | ||
46 | |||
47 | }; | ||
19 | 48 | ||
20 | int ad714x_disable(struct ad714x_chip *ad714x); | 49 | int ad714x_disable(struct ad714x_chip *ad714x); |
21 | int ad714x_enable(struct ad714x_chip *ad714x); | 50 | int ad714x_enable(struct ad714x_chip *ad714x); |
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c index 6c76cf792991..0794778295fc 100644 --- a/drivers/input/misc/mma8450.c +++ b/drivers/input/misc/mma8450.c | |||
@@ -234,7 +234,7 @@ static const struct of_device_id mma8450_dt_ids[] = { | |||
234 | { .compatible = "fsl,mma8450", }, | 234 | { .compatible = "fsl,mma8450", }, |
235 | { /* sentinel */ } | 235 | { /* sentinel */ } |
236 | }; | 236 | }; |
237 | MODULE_DEVICE_TABLE(i2c, mma8450_dt_ids); | 237 | MODULE_DEVICE_TABLE(of, mma8450_dt_ids); |
238 | 238 | ||
239 | static struct i2c_driver mma8450_driver = { | 239 | static struct i2c_driver mma8450_driver = { |
240 | .driver = { | 240 | .driver = { |
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c index b95fac15b2ea..f71dc728da58 100644 --- a/drivers/input/misc/mpu3050.c +++ b/drivers/input/misc/mpu3050.c | |||
@@ -282,7 +282,7 @@ err_free_irq: | |||
282 | err_pm_set_suspended: | 282 | err_pm_set_suspended: |
283 | pm_runtime_set_suspended(&client->dev); | 283 | pm_runtime_set_suspended(&client->dev); |
284 | err_free_mem: | 284 | err_free_mem: |
285 | input_unregister_device(idev); | 285 | input_free_device(idev); |
286 | kfree(sensor); | 286 | kfree(sensor); |
287 | return error; | 287 | return error; |
288 | } | 288 | } |
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 3126983c004a..da280189ef07 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c | |||
@@ -67,6 +67,14 @@ | |||
67 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 | 67 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 |
68 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 | 68 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 |
69 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 | 69 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 |
70 | /* MacbookAir4,2 (unibody, July 2011) */ | ||
71 | #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c | ||
72 | #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d | ||
73 | #define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e | ||
74 | /* Macbook8,2 (unibody) */ | ||
75 | #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252 | ||
76 | #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253 | ||
77 | #define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254 | ||
70 | 78 | ||
71 | #define BCM5974_DEVICE(prod) { \ | 79 | #define BCM5974_DEVICE(prod) { \ |
72 | .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ | 80 | .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ |
@@ -104,6 +112,14 @@ static const struct usb_device_id bcm5974_table[] = { | |||
104 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), | 112 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), |
105 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), | 113 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), |
106 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), | 114 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), |
115 | /* MacbookAir4,2 */ | ||
116 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI), | ||
117 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO), | ||
118 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_JIS), | ||
119 | /* MacbookPro8,2 */ | ||
120 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI), | ||
121 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO), | ||
122 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS), | ||
107 | /* Terminating entry */ | 123 | /* Terminating entry */ |
108 | {} | 124 | {} |
109 | }; | 125 | }; |
@@ -294,6 +310,30 @@ static const struct bcm5974_config bcm5974_config_table[] = { | |||
294 | { DIM_X, DIM_X / SN_COORD, -4415, 5050 }, | 310 | { DIM_X, DIM_X / SN_COORD, -4415, 5050 }, |
295 | { DIM_Y, DIM_Y / SN_COORD, -55, 6680 } | 311 | { DIM_Y, DIM_Y / SN_COORD, -55, 6680 } |
296 | }, | 312 | }, |
313 | { | ||
314 | USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI, | ||
315 | USB_DEVICE_ID_APPLE_WELLSPRING6_ISO, | ||
316 | USB_DEVICE_ID_APPLE_WELLSPRING6_JIS, | ||
317 | HAS_INTEGRATED_BUTTON, | ||
318 | 0x84, sizeof(struct bt_data), | ||
319 | 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, | ||
320 | { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, | ||
321 | { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, | ||
322 | { DIM_X, DIM_X / SN_COORD, -4620, 5140 }, | ||
323 | { DIM_Y, DIM_Y / SN_COORD, -150, 6600 } | ||
324 | }, | ||
325 | { | ||
326 | USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI, | ||
327 | USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO, | ||
328 | USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS, | ||
329 | HAS_INTEGRATED_BUTTON, | ||
330 | 0x84, sizeof(struct bt_data), | ||
331 | 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, | ||
332 | { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, | ||
333 | { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, | ||
334 | { DIM_X, DIM_X / SN_COORD, -4750, 5280 }, | ||
335 | { DIM_Y, DIM_Y / SN_COORD, -150, 6730 } | ||
336 | }, | ||
297 | {} | 337 | {} |
298 | }; | 338 | }; |
299 | 339 | ||
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index 449c0a46dbac..d27c9d91630b 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c | |||
@@ -49,6 +49,7 @@ struct hid_descriptor { | |||
49 | #define USB_REQ_GET_REPORT 0x01 | 49 | #define USB_REQ_GET_REPORT 0x01 |
50 | #define USB_REQ_SET_REPORT 0x09 | 50 | #define USB_REQ_SET_REPORT 0x09 |
51 | #define WAC_HID_FEATURE_REPORT 0x03 | 51 | #define WAC_HID_FEATURE_REPORT 0x03 |
52 | #define WAC_MSG_RETRIES 5 | ||
52 | 53 | ||
53 | static int usb_get_report(struct usb_interface *intf, unsigned char type, | 54 | static int usb_get_report(struct usb_interface *intf, unsigned char type, |
54 | unsigned char id, void *buf, int size) | 55 | unsigned char id, void *buf, int size) |
@@ -165,7 +166,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi | |||
165 | report, | 166 | report, |
166 | hid_desc->wDescriptorLength, | 167 | hid_desc->wDescriptorLength, |
167 | 5000); /* 5 secs */ | 168 | 5000); /* 5 secs */ |
168 | } while (result < 0 && limit++ < 5); | 169 | } while (result < 0 && limit++ < WAC_MSG_RETRIES); |
169 | 170 | ||
170 | /* No need to parse the Descriptor. It isn't an error though */ | 171 | /* No need to parse the Descriptor. It isn't an error though */ |
171 | if (result < 0) | 172 | if (result < 0) |
@@ -319,24 +320,26 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat | |||
319 | int limit = 0, report_id = 2; | 320 | int limit = 0, report_id = 2; |
320 | int error = -ENOMEM; | 321 | int error = -ENOMEM; |
321 | 322 | ||
322 | rep_data = kmalloc(2, GFP_KERNEL); | 323 | rep_data = kmalloc(4, GFP_KERNEL); |
323 | if (!rep_data) | 324 | if (!rep_data) |
324 | return error; | 325 | return error; |
325 | 326 | ||
326 | /* ask to report tablet data if it is 2FGT Tablet PC or | 327 | /* ask to report tablet data if it is MT Tablet PC or |
327 | * not a Tablet PC */ | 328 | * not a Tablet PC */ |
328 | if (features->type == TABLETPC2FG) { | 329 | if (features->type == TABLETPC2FG) { |
329 | do { | 330 | do { |
330 | rep_data[0] = 3; | 331 | rep_data[0] = 3; |
331 | rep_data[1] = 4; | 332 | rep_data[1] = 4; |
333 | rep_data[2] = 0; | ||
334 | rep_data[3] = 0; | ||
332 | report_id = 3; | 335 | report_id = 3; |
333 | error = usb_set_report(intf, WAC_HID_FEATURE_REPORT, | 336 | error = usb_set_report(intf, WAC_HID_FEATURE_REPORT, |
334 | report_id, rep_data, 2); | 337 | report_id, rep_data, 4); |
335 | if (error >= 0) | 338 | if (error >= 0) |
336 | error = usb_get_report(intf, | 339 | error = usb_get_report(intf, |
337 | WAC_HID_FEATURE_REPORT, report_id, | 340 | WAC_HID_FEATURE_REPORT, report_id, |
338 | rep_data, 3); | 341 | rep_data, 4); |
339 | } while ((error < 0 || rep_data[1] != 4) && limit++ < 5); | 342 | } while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES); |
340 | } else if (features->type != TABLETPC) { | 343 | } else if (features->type != TABLETPC) { |
341 | do { | 344 | do { |
342 | rep_data[0] = 2; | 345 | rep_data[0] = 2; |
@@ -347,7 +350,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat | |||
347 | error = usb_get_report(intf, | 350 | error = usb_get_report(intf, |
348 | WAC_HID_FEATURE_REPORT, report_id, | 351 | WAC_HID_FEATURE_REPORT, report_id, |
349 | rep_data, 2); | 352 | rep_data, 2); |
350 | } while ((error < 0 || rep_data[1] != 2) && limit++ < 5); | 353 | } while ((error < 0 || rep_data[1] != 2) && limit++ < WAC_MSG_RETRIES); |
351 | } | 354 | } |
352 | 355 | ||
353 | kfree(rep_data); | 356 | kfree(rep_data); |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 03ebcc8b24b5..c1c2f7b28d89 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
@@ -1460,6 +1460,9 @@ static const struct wacom_features wacom_features_0xD3 = | |||
1460 | static const struct wacom_features wacom_features_0xD4 = | 1460 | static const struct wacom_features wacom_features_0xD4 = |
1461 | { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, | 1461 | { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, |
1462 | 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; | 1462 | 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; |
1463 | static const struct wacom_features wacom_features_0xD5 = | ||
1464 | { "Wacom Bamboo Pen 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, | ||
1465 | 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; | ||
1463 | static const struct wacom_features wacom_features_0xD6 = | 1466 | static const struct wacom_features wacom_features_0xD6 = |
1464 | { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, | 1467 | { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, |
1465 | 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; | 1468 | 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; |
@@ -1564,6 +1567,7 @@ const struct usb_device_id wacom_ids[] = { | |||
1564 | { USB_DEVICE_WACOM(0xD2) }, | 1567 | { USB_DEVICE_WACOM(0xD2) }, |
1565 | { USB_DEVICE_WACOM(0xD3) }, | 1568 | { USB_DEVICE_WACOM(0xD3) }, |
1566 | { USB_DEVICE_WACOM(0xD4) }, | 1569 | { USB_DEVICE_WACOM(0xD4) }, |
1570 | { USB_DEVICE_WACOM(0xD5) }, | ||
1567 | { USB_DEVICE_WACOM(0xD6) }, | 1571 | { USB_DEVICE_WACOM(0xD6) }, |
1568 | { USB_DEVICE_WACOM(0xD7) }, | 1572 | { USB_DEVICE_WACOM(0xD7) }, |
1569 | { USB_DEVICE_WACOM(0xD8) }, | 1573 | { USB_DEVICE_WACOM(0xD8) }, |
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index ae00604a6a81..f5d66859f232 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c | |||
@@ -244,6 +244,7 @@ struct mxt_finger { | |||
244 | int x; | 244 | int x; |
245 | int y; | 245 | int y; |
246 | int area; | 246 | int area; |
247 | int pressure; | ||
247 | }; | 248 | }; |
248 | 249 | ||
249 | /* Each client has this additional data */ | 250 | /* Each client has this additional data */ |
@@ -536,6 +537,8 @@ static void mxt_input_report(struct mxt_data *data, int single_id) | |||
536 | finger[id].x); | 537 | finger[id].x); |
537 | input_report_abs(input_dev, ABS_MT_POSITION_Y, | 538 | input_report_abs(input_dev, ABS_MT_POSITION_Y, |
538 | finger[id].y); | 539 | finger[id].y); |
540 | input_report_abs(input_dev, ABS_MT_PRESSURE, | ||
541 | finger[id].pressure); | ||
539 | } else { | 542 | } else { |
540 | finger[id].status = 0; | 543 | finger[id].status = 0; |
541 | } | 544 | } |
@@ -546,6 +549,8 @@ static void mxt_input_report(struct mxt_data *data, int single_id) | |||
546 | if (status != MXT_RELEASE) { | 549 | if (status != MXT_RELEASE) { |
547 | input_report_abs(input_dev, ABS_X, finger[single_id].x); | 550 | input_report_abs(input_dev, ABS_X, finger[single_id].x); |
548 | input_report_abs(input_dev, ABS_Y, finger[single_id].y); | 551 | input_report_abs(input_dev, ABS_Y, finger[single_id].y); |
552 | input_report_abs(input_dev, | ||
553 | ABS_PRESSURE, finger[single_id].pressure); | ||
549 | } | 554 | } |
550 | 555 | ||
551 | input_sync(input_dev); | 556 | input_sync(input_dev); |
@@ -560,6 +565,7 @@ static void mxt_input_touchevent(struct mxt_data *data, | |||
560 | int x; | 565 | int x; |
561 | int y; | 566 | int y; |
562 | int area; | 567 | int area; |
568 | int pressure; | ||
563 | 569 | ||
564 | /* Check the touch is present on the screen */ | 570 | /* Check the touch is present on the screen */ |
565 | if (!(status & MXT_DETECT)) { | 571 | if (!(status & MXT_DETECT)) { |
@@ -584,6 +590,7 @@ static void mxt_input_touchevent(struct mxt_data *data, | |||
584 | y = y >> 2; | 590 | y = y >> 2; |
585 | 591 | ||
586 | area = message->message[4]; | 592 | area = message->message[4]; |
593 | pressure = message->message[5]; | ||
587 | 594 | ||
588 | dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id, | 595 | dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id, |
589 | status & MXT_MOVE ? "moved" : "pressed", | 596 | status & MXT_MOVE ? "moved" : "pressed", |
@@ -594,6 +601,7 @@ static void mxt_input_touchevent(struct mxt_data *data, | |||
594 | finger[id].x = x; | 601 | finger[id].x = x; |
595 | finger[id].y = y; | 602 | finger[id].y = y; |
596 | finger[id].area = area; | 603 | finger[id].area = area; |
604 | finger[id].pressure = pressure; | ||
597 | 605 | ||
598 | mxt_input_report(data, id); | 606 | mxt_input_report(data, id); |
599 | } | 607 | } |
@@ -1116,6 +1124,8 @@ static int __devinit mxt_probe(struct i2c_client *client, | |||
1116 | 0, data->max_x, 0, 0); | 1124 | 0, data->max_x, 0, 0); |
1117 | input_set_abs_params(input_dev, ABS_Y, | 1125 | input_set_abs_params(input_dev, ABS_Y, |
1118 | 0, data->max_y, 0, 0); | 1126 | 0, data->max_y, 0, 0); |
1127 | input_set_abs_params(input_dev, ABS_PRESSURE, | ||
1128 | 0, 255, 0, 0); | ||
1119 | 1129 | ||
1120 | /* For multi touch */ | 1130 | /* For multi touch */ |
1121 | input_mt_init_slots(input_dev, MXT_MAX_FINGER); | 1131 | input_mt_init_slots(input_dev, MXT_MAX_FINGER); |
@@ -1125,6 +1135,8 @@ static int __devinit mxt_probe(struct i2c_client *client, | |||
1125 | 0, data->max_x, 0, 0); | 1135 | 0, data->max_x, 0, 0); |
1126 | input_set_abs_params(input_dev, ABS_MT_POSITION_Y, | 1136 | input_set_abs_params(input_dev, ABS_MT_POSITION_Y, |
1127 | 0, data->max_y, 0, 0); | 1137 | 0, data->max_y, 0, 0); |
1138 | input_set_abs_params(input_dev, ABS_MT_PRESSURE, | ||
1139 | 0, 255, 0, 0); | ||
1128 | 1140 | ||
1129 | input_set_drvdata(input_dev, data); | 1141 | input_set_drvdata(input_dev, data); |
1130 | i2c_set_clientdata(client, data); | 1142 | i2c_set_clientdata(client, data); |
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c index 4f2713d92791..4627fe55b401 100644 --- a/drivers/input/touchscreen/max11801_ts.c +++ b/drivers/input/touchscreen/max11801_ts.c | |||
@@ -9,7 +9,8 @@ | |||
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 11 | * it under the terms of the GNU General Public License as published by |
12 | * the Free Software Foundation; either version 2 of the License. | 12 | * the Free Software Foundation; either version 2 of the License, or |
13 | * (at your option) any later version. | ||
13 | */ | 14 | */ |
14 | 15 | ||
15 | /* | 16 | /* |
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c index 089b0a0f3d8c..0e8f63e5b36f 100644 --- a/drivers/input/touchscreen/tnetv107x-ts.c +++ b/drivers/input/touchscreen/tnetv107x-ts.c | |||
@@ -13,6 +13,7 @@ | |||
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
17 | #include <linux/err.h> | 18 | #include <linux/err.h> |
18 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c index b9826032450b..8c00937bf7e7 100644 --- a/drivers/leds/leds-ams-delta.c +++ b/drivers/leds/leds-ams-delta.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/module.h> | ||
11 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
12 | #include <linux/init.h> | 13 | #include <linux/init.h> |
13 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c index 3ebe3824662d..ea2185531f82 100644 --- a/drivers/leds/leds-bd2802.c +++ b/drivers/leds/leds-bd2802.c | |||
@@ -662,6 +662,11 @@ failed_unregister_led1_R: | |||
662 | static void bd2802_unregister_led_classdev(struct bd2802_led *led) | 662 | static void bd2802_unregister_led_classdev(struct bd2802_led *led) |
663 | { | 663 | { |
664 | cancel_work_sync(&led->work); | 664 | cancel_work_sync(&led->work); |
665 | led_classdev_unregister(&led->cdev_led2b); | ||
666 | led_classdev_unregister(&led->cdev_led2g); | ||
667 | led_classdev_unregister(&led->cdev_led2r); | ||
668 | led_classdev_unregister(&led->cdev_led1b); | ||
669 | led_classdev_unregister(&led->cdev_led1g); | ||
665 | led_classdev_unregister(&led->cdev_led1r); | 670 | led_classdev_unregister(&led->cdev_led1r); |
666 | } | 671 | } |
667 | 672 | ||
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c index e4ce1fd46338..bcfbd3a60eab 100644 --- a/drivers/leds/leds-hp6xx.c +++ b/drivers/leds/leds-hp6xx.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/module.h> | ||
13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
14 | #include <linux/init.h> | 15 | #include <linux/init.h> |
15 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c index 54e3d05b63cc..35903154ca2e 100644 --- a/drivers/misc/ab8500-pwm.c +++ b/drivers/misc/ab8500-pwm.c | |||
@@ -164,5 +164,5 @@ subsys_initcall(ab8500_pwm_init); | |||
164 | module_exit(ab8500_pwm_exit); | 164 | module_exit(ab8500_pwm_exit); |
165 | MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>"); | 165 | MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>"); |
166 | MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver"); | 166 | MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver"); |
167 | MODULE_ALIAS("AB8500 PWM driver"); | 167 | MODULE_ALIAS("platform:ab8500-pwm"); |
168 | MODULE_LICENSE("GPL v2"); | 168 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c index efec4139c3f6..68cd05b6d829 100644 --- a/drivers/misc/cb710/core.c +++ b/drivers/misc/cb710/core.c | |||
@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(cb710_pci_update_config_reg); | |||
33 | static int __devinit cb710_pci_configure(struct pci_dev *pdev) | 33 | static int __devinit cb710_pci_configure(struct pci_dev *pdev) |
34 | { | 34 | { |
35 | unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); | 35 | unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); |
36 | struct pci_dev *pdev0 = pci_get_slot(pdev->bus, devfn); | 36 | struct pci_dev *pdev0; |
37 | u32 val; | 37 | u32 val; |
38 | 38 | ||
39 | cb710_pci_update_config_reg(pdev, 0x48, | 39 | cb710_pci_update_config_reg(pdev, 0x48, |
@@ -43,6 +43,7 @@ static int __devinit cb710_pci_configure(struct pci_dev *pdev) | |||
43 | if (val & 0x80000000) | 43 | if (val & 0x80000000) |
44 | return 0; | 44 | return 0; |
45 | 45 | ||
46 | pdev0 = pci_get_slot(pdev->bus, devfn); | ||
46 | if (!pdev0) | 47 | if (!pdev0) |
47 | return -ENODEV; | 48 | return -ENODEV; |
48 | 49 | ||
diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c index 5325a7e70dcf..27dc0d21aafa 100644 --- a/drivers/misc/fsa9480.c +++ b/drivers/misc/fsa9480.c | |||
@@ -455,7 +455,7 @@ static int __devinit fsa9480_probe(struct i2c_client *client, | |||
455 | 455 | ||
456 | fail2: | 456 | fail2: |
457 | if (client->irq) | 457 | if (client->irq) |
458 | free_irq(client->irq, NULL); | 458 | free_irq(client->irq, usbsw); |
459 | fail1: | 459 | fail1: |
460 | i2c_set_clientdata(client, NULL); | 460 | i2c_set_clientdata(client, NULL); |
461 | kfree(usbsw); | 461 | kfree(usbsw); |
@@ -466,7 +466,7 @@ static int __devexit fsa9480_remove(struct i2c_client *client) | |||
466 | { | 466 | { |
467 | struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); | 467 | struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); |
468 | if (client->irq) | 468 | if (client->irq) |
469 | free_irq(client->irq, NULL); | 469 | free_irq(client->irq, usbsw); |
470 | i2c_set_clientdata(client, NULL); | 470 | i2c_set_clientdata(client, NULL); |
471 | 471 | ||
472 | sysfs_remove_group(&client->dev.kobj, &fsa9480_group); | 472 | sysfs_remove_group(&client->dev.kobj, &fsa9480_group); |
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c index 8653bd0b1a33..06df1877ad0f 100644 --- a/drivers/misc/pti.c +++ b/drivers/misc/pti.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include <linux/mutex.h> | 33 | #include <linux/mutex.h> |
34 | #include <linux/miscdevice.h> | 34 | #include <linux/miscdevice.h> |
35 | #include <linux/pti.h> | 35 | #include <linux/pti.h> |
36 | #include <linux/slab.h> | ||
37 | #include <linux/uaccess.h> | ||
36 | 38 | ||
37 | #define DRIVERNAME "pti" | 39 | #define DRIVERNAME "pti" |
38 | #define PCINAME "pciPTI" | 40 | #define PCINAME "pciPTI" |
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 006a5e9f8ab8..2bf229acd3b8 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c | |||
@@ -224,7 +224,7 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test, | |||
224 | static int mmc_test_busy(struct mmc_command *cmd) | 224 | static int mmc_test_busy(struct mmc_command *cmd) |
225 | { | 225 | { |
226 | return !(cmd->resp[0] & R1_READY_FOR_DATA) || | 226 | return !(cmd->resp[0] & R1_READY_FOR_DATA) || |
227 | (R1_CURRENT_STATE(cmd->resp[0]) == 7); | 227 | (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG); |
228 | } | 228 | } |
229 | 229 | ||
230 | /* | 230 | /* |
@@ -2900,7 +2900,7 @@ static const struct file_operations mmc_test_fops_testlist = { | |||
2900 | .release = single_release, | 2900 | .release = single_release, |
2901 | }; | 2901 | }; |
2902 | 2902 | ||
2903 | static void mmc_test_free_file_test(struct mmc_card *card) | 2903 | static void mmc_test_free_dbgfs_file(struct mmc_card *card) |
2904 | { | 2904 | { |
2905 | struct mmc_test_dbgfs_file *df, *dfs; | 2905 | struct mmc_test_dbgfs_file *df, *dfs; |
2906 | 2906 | ||
@@ -2917,34 +2917,21 @@ static void mmc_test_free_file_test(struct mmc_card *card) | |||
2917 | mutex_unlock(&mmc_test_lock); | 2917 | mutex_unlock(&mmc_test_lock); |
2918 | } | 2918 | } |
2919 | 2919 | ||
2920 | static int mmc_test_register_file_test(struct mmc_card *card) | 2920 | static int __mmc_test_register_dbgfs_file(struct mmc_card *card, |
2921 | const char *name, mode_t mode, const struct file_operations *fops) | ||
2921 | { | 2922 | { |
2922 | struct dentry *file = NULL; | 2923 | struct dentry *file = NULL; |
2923 | struct mmc_test_dbgfs_file *df; | 2924 | struct mmc_test_dbgfs_file *df; |
2924 | int ret = 0; | ||
2925 | |||
2926 | mutex_lock(&mmc_test_lock); | ||
2927 | |||
2928 | if (card->debugfs_root) | ||
2929 | file = debugfs_create_file("test", S_IWUSR | S_IRUGO, | ||
2930 | card->debugfs_root, card, &mmc_test_fops_test); | ||
2931 | |||
2932 | if (IS_ERR_OR_NULL(file)) { | ||
2933 | dev_err(&card->dev, | ||
2934 | "Can't create test. Perhaps debugfs is disabled.\n"); | ||
2935 | ret = -ENODEV; | ||
2936 | goto err; | ||
2937 | } | ||
2938 | 2925 | ||
2939 | if (card->debugfs_root) | 2926 | if (card->debugfs_root) |
2940 | file = debugfs_create_file("testlist", S_IRUGO, | 2927 | file = debugfs_create_file(name, mode, card->debugfs_root, |
2941 | card->debugfs_root, card, &mmc_test_fops_testlist); | 2928 | card, fops); |
2942 | 2929 | ||
2943 | if (IS_ERR_OR_NULL(file)) { | 2930 | if (IS_ERR_OR_NULL(file)) { |
2944 | dev_err(&card->dev, | 2931 | dev_err(&card->dev, |
2945 | "Can't create testlist. Perhaps debugfs is disabled.\n"); | 2932 | "Can't create %s. Perhaps debugfs is disabled.\n", |
2946 | ret = -ENODEV; | 2933 | name); |
2947 | goto err; | 2934 | return -ENODEV; |
2948 | } | 2935 | } |
2949 | 2936 | ||
2950 | df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL); | 2937 | df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL); |
@@ -2952,14 +2939,31 @@ static int mmc_test_register_file_test(struct mmc_card *card) | |||
2952 | debugfs_remove(file); | 2939 | debugfs_remove(file); |
2953 | dev_err(&card->dev, | 2940 | dev_err(&card->dev, |
2954 | "Can't allocate memory for internal usage.\n"); | 2941 | "Can't allocate memory for internal usage.\n"); |
2955 | ret = -ENOMEM; | 2942 | return -ENOMEM; |
2956 | goto err; | ||
2957 | } | 2943 | } |
2958 | 2944 | ||
2959 | df->card = card; | 2945 | df->card = card; |
2960 | df->file = file; | 2946 | df->file = file; |
2961 | 2947 | ||
2962 | list_add(&df->link, &mmc_test_file_test); | 2948 | list_add(&df->link, &mmc_test_file_test); |
2949 | return 0; | ||
2950 | } | ||
2951 | |||
2952 | static int mmc_test_register_dbgfs_file(struct mmc_card *card) | ||
2953 | { | ||
2954 | int ret; | ||
2955 | |||
2956 | mutex_lock(&mmc_test_lock); | ||
2957 | |||
2958 | ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO, | ||
2959 | &mmc_test_fops_test); | ||
2960 | if (ret) | ||
2961 | goto err; | ||
2962 | |||
2963 | ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO, | ||
2964 | &mmc_test_fops_testlist); | ||
2965 | if (ret) | ||
2966 | goto err; | ||
2963 | 2967 | ||
2964 | err: | 2968 | err: |
2965 | mutex_unlock(&mmc_test_lock); | 2969 | mutex_unlock(&mmc_test_lock); |
@@ -2974,7 +2978,7 @@ static int mmc_test_probe(struct mmc_card *card) | |||
2974 | if (!mmc_card_mmc(card) && !mmc_card_sd(card)) | 2978 | if (!mmc_card_mmc(card) && !mmc_card_sd(card)) |
2975 | return -ENODEV; | 2979 | return -ENODEV; |
2976 | 2980 | ||
2977 | ret = mmc_test_register_file_test(card); | 2981 | ret = mmc_test_register_dbgfs_file(card); |
2978 | if (ret) | 2982 | if (ret) |
2979 | return ret; | 2983 | return ret; |
2980 | 2984 | ||
@@ -2986,7 +2990,7 @@ static int mmc_test_probe(struct mmc_card *card) | |||
2986 | static void mmc_test_remove(struct mmc_card *card) | 2990 | static void mmc_test_remove(struct mmc_card *card) |
2987 | { | 2991 | { |
2988 | mmc_test_free_result(card); | 2992 | mmc_test_free_result(card); |
2989 | mmc_test_free_file_test(card); | 2993 | mmc_test_free_dbgfs_file(card); |
2990 | } | 2994 | } |
2991 | 2995 | ||
2992 | static struct mmc_driver mmc_driver = { | 2996 | static struct mmc_driver mmc_driver = { |
@@ -3006,7 +3010,7 @@ static void __exit mmc_test_exit(void) | |||
3006 | { | 3010 | { |
3007 | /* Clear stalled data if card is still plugged */ | 3011 | /* Clear stalled data if card is still plugged */ |
3008 | mmc_test_free_result(NULL); | 3012 | mmc_test_free_result(NULL); |
3009 | mmc_test_free_file_test(NULL); | 3013 | mmc_test_free_dbgfs_file(NULL); |
3010 | 3014 | ||
3011 | mmc_unregister_driver(&mmc_driver); | 3015 | mmc_unregister_driver(&mmc_driver); |
3012 | } | 3016 | } |
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 89bdeaec7182..91a0a7460ebb 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c | |||
@@ -1502,7 +1502,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, | |||
1502 | goto out; | 1502 | goto out; |
1503 | } | 1503 | } |
1504 | } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || | 1504 | } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || |
1505 | R1_CURRENT_STATE(cmd.resp[0]) == 7); | 1505 | R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG); |
1506 | out: | 1506 | out: |
1507 | return err; | 1507 | return err; |
1508 | } | 1508 | } |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index aa7d1d79b8c5..5700b1cbdfec 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
@@ -259,7 +259,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
259 | } | 259 | } |
260 | 260 | ||
261 | card->ext_csd.rev = ext_csd[EXT_CSD_REV]; | 261 | card->ext_csd.rev = ext_csd[EXT_CSD_REV]; |
262 | if (card->ext_csd.rev > 5) { | 262 | if (card->ext_csd.rev > 6) { |
263 | printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n", | 263 | printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n", |
264 | mmc_hostname(card->host), card->ext_csd.rev); | 264 | mmc_hostname(card->host), card->ext_csd.rev); |
265 | err = -EINVAL; | 265 | err = -EINVAL; |
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 845ce7c533b9..770c3d06f5dc 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c | |||
@@ -407,7 +407,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, | |||
407 | break; | 407 | break; |
408 | if (mmc_host_is_spi(card->host)) | 408 | if (mmc_host_is_spi(card->host)) |
409 | break; | 409 | break; |
410 | } while (R1_CURRENT_STATE(status) == 7); | 410 | } while (R1_CURRENT_STATE(status) == R1_STATE_PRG); |
411 | 411 | ||
412 | if (mmc_host_is_spi(card->host)) { | 412 | if (mmc_host_is_spi(card->host)) { |
413 | if (status & R1_SPI_ILLEGAL_COMMAND) | 413 | if (status & R1_SPI_ILLEGAL_COMMAND) |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 77f0b6b1681d..ff0f714b012c 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -62,7 +62,7 @@ struct idmac_desc { | |||
62 | 62 | ||
63 | u32 des1; /* Buffer sizes */ | 63 | u32 des1; /* Buffer sizes */ |
64 | #define IDMAC_SET_BUFFER1_SIZE(d, s) \ | 64 | #define IDMAC_SET_BUFFER1_SIZE(d, s) \ |
65 | ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff)) | 65 | ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff)) |
66 | 66 | ||
67 | u32 des2; /* buffer 1 physical address */ | 67 | u32 des2; /* buffer 1 physical address */ |
68 | 68 | ||
@@ -699,7 +699,7 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
699 | } | 699 | } |
700 | 700 | ||
701 | /* DDR mode set */ | 701 | /* DDR mode set */ |
702 | if (ios->ddr) { | 702 | if (ios->timing == MMC_TIMING_UHS_DDR50) { |
703 | regs = mci_readl(slot->host, UHS_REG); | 703 | regs = mci_readl(slot->host, UHS_REG); |
704 | regs |= (0x1 << slot->id) << 16; | 704 | regs |= (0x1 << slot->id) << 16; |
705 | mci_writel(slot->host, UHS_REG, regs); | 705 | mci_writel(slot->host, UHS_REG, regs); |
@@ -1646,7 +1646,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) | |||
1646 | mmc->caps |= MMC_CAP_4_BIT_DATA; | 1646 | mmc->caps |= MMC_CAP_4_BIT_DATA; |
1647 | 1647 | ||
1648 | if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) | 1648 | if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) |
1649 | mmc->caps |= MMC_CAP_SD_HIGHSPEED; | 1649 | mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; |
1650 | 1650 | ||
1651 | #ifdef CONFIG_MMC_DW_IDMAC | 1651 | #ifdef CONFIG_MMC_DW_IDMAC |
1652 | mmc->max_segs = host->ring_size; | 1652 | mmc->max_segs = host->ring_size; |
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 9ebfb4b482f5..0e9780f5a4a9 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include "sdhci-pltfm.h" | 27 | #include "sdhci-pltfm.h" |
28 | #include "sdhci-esdhc.h" | 28 | #include "sdhci-esdhc.h" |
29 | 29 | ||
30 | #define SDHCI_CTRL_D3CD 0x08 | ||
30 | /* VENDOR SPEC register */ | 31 | /* VENDOR SPEC register */ |
31 | #define SDHCI_VENDOR_SPEC 0xC0 | 32 | #define SDHCI_VENDOR_SPEC 0xC0 |
32 | #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 | 33 | #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 |
@@ -141,13 +142,32 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) | |||
141 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 142 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
142 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | 143 | struct pltfm_imx_data *imx_data = pltfm_host->priv; |
143 | struct esdhc_platform_data *boarddata = &imx_data->boarddata; | 144 | struct esdhc_platform_data *boarddata = &imx_data->boarddata; |
144 | 145 | u32 data; | |
145 | if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) | 146 | |
146 | && (boarddata->cd_type == ESDHC_CD_GPIO))) | 147 | if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { |
147 | /* | 148 | if (boarddata->cd_type == ESDHC_CD_GPIO) |
148 | * these interrupts won't work with a custom card_detect gpio | 149 | /* |
149 | */ | 150 | * These interrupts won't work with a custom |
150 | val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); | 151 | * card_detect gpio (only applied to mx25/35) |
152 | */ | ||
153 | val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); | ||
154 | |||
155 | if (val & SDHCI_INT_CARD_INT) { | ||
156 | /* | ||
157 | * Clear and then set D3CD bit to avoid missing the | ||
158 | * card interrupt. This is a eSDHC controller problem | ||
159 | * so we need to apply the following workaround: clear | ||
160 | * and set D3CD bit will make eSDHC re-sample the card | ||
161 | * interrupt. In case a card interrupt was lost, | ||
162 | * re-sample it by the following steps. | ||
163 | */ | ||
164 | data = readl(host->ioaddr + SDHCI_HOST_CONTROL); | ||
165 | data &= ~SDHCI_CTRL_D3CD; | ||
166 | writel(data, host->ioaddr + SDHCI_HOST_CONTROL); | ||
167 | data |= SDHCI_CTRL_D3CD; | ||
168 | writel(data, host->ioaddr + SDHCI_HOST_CONTROL); | ||
169 | } | ||
170 | } | ||
151 | 171 | ||
152 | if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) | 172 | if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) |
153 | && (reg == SDHCI_INT_STATUS) | 173 | && (reg == SDHCI_INT_STATUS) |
@@ -217,8 +237,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) | |||
217 | */ | 237 | */ |
218 | return; | 238 | return; |
219 | case SDHCI_HOST_CONTROL: | 239 | case SDHCI_HOST_CONTROL: |
220 | /* FSL messed up here, so we can just keep those two */ | 240 | /* FSL messed up here, so we can just keep those three */ |
221 | new_val = val & (SDHCI_CTRL_LED | SDHCI_CTRL_4BITBUS); | 241 | new_val = val & (SDHCI_CTRL_LED | \ |
242 | SDHCI_CTRL_4BITBUS | \ | ||
243 | SDHCI_CTRL_D3CD); | ||
222 | /* ensure the endianess */ | 244 | /* ensure the endianess */ |
223 | new_val |= ESDHC_HOST_CONTROL_LE; | 245 | new_val |= ESDHC_HOST_CONTROL_LE; |
224 | /* DMA mode bits are shifted */ | 246 | /* DMA mode bits are shifted */ |
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 4198dbbc5c20..fc7e4a515629 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c | |||
@@ -195,7 +195,8 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev) | |||
195 | clk_enable(clk); | 195 | clk_enable(clk); |
196 | 196 | ||
197 | host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 197 | host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
198 | | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; | 198 | | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
199 | | SDHCI_QUIRK_32BIT_ADMA_SIZE; | ||
199 | 200 | ||
200 | /* enable 1/8V DDR capable */ | 201 | /* enable 1/8V DDR capable */ |
201 | host->mmc->caps |= MMC_CAP_1_8V_DDR; | 202 | host->mmc->caps |= MMC_CAP_1_8V_DDR; |
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 460ffaf0f6d7..2bd7bf4fece7 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/gpio.h> | 21 | #include <linux/gpio.h> |
22 | #include <linux/module.h> | ||
22 | 23 | ||
23 | #include <linux/mmc/host.h> | 24 | #include <linux/mmc/host.h> |
24 | 25 | ||
@@ -502,6 +503,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) | |||
502 | /* This host supports the Auto CMD12 */ | 503 | /* This host supports the Auto CMD12 */ |
503 | host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; | 504 | host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; |
504 | 505 | ||
506 | /* Samsung SoCs need BROKEN_ADMA_ZEROLEN_DESC */ | ||
507 | host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC; | ||
508 | |||
505 | if (pdata->cd_type == S3C_SDHCI_CD_NONE || | 509 | if (pdata->cd_type == S3C_SDHCI_CD_NONE || |
506 | pdata->cd_type == S3C_SDHCI_CD_PERMANENT) | 510 | pdata->cd_type == S3C_SDHCI_CD_PERMANENT) |
507 | host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; | 511 | host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index c31a3343340d..0e02cc1df12e 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -628,12 +628,11 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) | |||
628 | /* timeout in us */ | 628 | /* timeout in us */ |
629 | if (!data) | 629 | if (!data) |
630 | target_timeout = cmd->cmd_timeout_ms * 1000; | 630 | target_timeout = cmd->cmd_timeout_ms * 1000; |
631 | else | 631 | else { |
632 | target_timeout = data->timeout_ns / 1000 + | 632 | target_timeout = data->timeout_ns / 1000; |
633 | data->timeout_clks / host->clock; | 633 | if (host->clock) |
634 | 634 | target_timeout += data->timeout_clks / host->clock; | |
635 | if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) | 635 | } |
636 | host->timeout_clk = host->clock / 1000; | ||
637 | 636 | ||
638 | /* | 637 | /* |
639 | * Figure out needed cycles. | 638 | * Figure out needed cycles. |
@@ -645,7 +644,6 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) | |||
645 | * => | 644 | * => |
646 | * (1) / (2) > 2^6 | 645 | * (1) / (2) > 2^6 |
647 | */ | 646 | */ |
648 | BUG_ON(!host->timeout_clk); | ||
649 | count = 0; | 647 | count = 0; |
650 | current_timeout = (1 << 13) * 1000 / host->timeout_clk; | 648 | current_timeout = (1 << 13) * 1000 / host->timeout_clk; |
651 | while (current_timeout < target_timeout) { | 649 | while (current_timeout < target_timeout) { |
@@ -1867,9 +1865,6 @@ static void sdhci_tasklet_finish(unsigned long param) | |||
1867 | 1865 | ||
1868 | del_timer(&host->timer); | 1866 | del_timer(&host->timer); |
1869 | 1867 | ||
1870 | if (host->version >= SDHCI_SPEC_300) | ||
1871 | del_timer(&host->tuning_timer); | ||
1872 | |||
1873 | mrq = host->mrq; | 1868 | mrq = host->mrq; |
1874 | 1869 | ||
1875 | /* | 1870 | /* |
@@ -2461,22 +2456,6 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2461 | host->max_clk = host->ops->get_max_clock(host); | 2456 | host->max_clk = host->ops->get_max_clock(host); |
2462 | } | 2457 | } |
2463 | 2458 | ||
2464 | host->timeout_clk = | ||
2465 | (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; | ||
2466 | if (host->timeout_clk == 0) { | ||
2467 | if (host->ops->get_timeout_clock) { | ||
2468 | host->timeout_clk = host->ops->get_timeout_clock(host); | ||
2469 | } else if (!(host->quirks & | ||
2470 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { | ||
2471 | printk(KERN_ERR | ||
2472 | "%s: Hardware doesn't specify timeout clock " | ||
2473 | "frequency.\n", mmc_hostname(mmc)); | ||
2474 | return -ENODEV; | ||
2475 | } | ||
2476 | } | ||
2477 | if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT) | ||
2478 | host->timeout_clk *= 1000; | ||
2479 | |||
2480 | /* | 2459 | /* |
2481 | * In case of Host Controller v3.00, find out whether clock | 2460 | * In case of Host Controller v3.00, find out whether clock |
2482 | * multiplier is supported. | 2461 | * multiplier is supported. |
@@ -2509,10 +2488,26 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2509 | } else | 2488 | } else |
2510 | mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; | 2489 | mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; |
2511 | 2490 | ||
2491 | host->timeout_clk = | ||
2492 | (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; | ||
2493 | if (host->timeout_clk == 0) { | ||
2494 | if (host->ops->get_timeout_clock) { | ||
2495 | host->timeout_clk = host->ops->get_timeout_clock(host); | ||
2496 | } else if (!(host->quirks & | ||
2497 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { | ||
2498 | printk(KERN_ERR | ||
2499 | "%s: Hardware doesn't specify timeout clock " | ||
2500 | "frequency.\n", mmc_hostname(mmc)); | ||
2501 | return -ENODEV; | ||
2502 | } | ||
2503 | } | ||
2504 | if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT) | ||
2505 | host->timeout_clk *= 1000; | ||
2506 | |||
2512 | if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) | 2507 | if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) |
2513 | mmc->max_discard_to = (1 << 27) / (mmc->f_max / 1000); | 2508 | host->timeout_clk = mmc->f_max / 1000; |
2514 | else | 2509 | |
2515 | mmc->max_discard_to = (1 << 27) / host->timeout_clk; | 2510 | mmc->max_discard_to = (1 << 27) / host->timeout_clk; |
2516 | 2511 | ||
2517 | mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; | 2512 | mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; |
2518 | 2513 | ||
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index 8d185de90d20..44a9668c4b7a 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c | |||
@@ -27,7 +27,6 @@ | |||
27 | static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) | 27 | static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) |
28 | { | 28 | { |
29 | const struct mfd_cell *cell = mfd_get_cell(dev); | 29 | const struct mfd_cell *cell = mfd_get_cell(dev); |
30 | struct mmc_host *mmc = platform_get_drvdata(dev); | ||
31 | int ret; | 30 | int ret; |
32 | 31 | ||
33 | ret = tmio_mmc_host_suspend(&dev->dev); | 32 | ret = tmio_mmc_host_suspend(&dev->dev); |
@@ -42,7 +41,6 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) | |||
42 | static int tmio_mmc_resume(struct platform_device *dev) | 41 | static int tmio_mmc_resume(struct platform_device *dev) |
43 | { | 42 | { |
44 | const struct mfd_cell *cell = mfd_get_cell(dev); | 43 | const struct mfd_cell *cell = mfd_get_cell(dev); |
45 | struct mmc_host *mmc = platform_get_drvdata(dev); | ||
46 | int ret = 0; | 44 | int ret = 0; |
47 | 45 | ||
48 | /* Tell the MFD core we are ready to be enabled */ | 46 | /* Tell the MFD core we are ready to be enabled */ |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index d724a18b5285..37e5790681ad 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c | |||
@@ -63,8 +63,9 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) | |||
63 | fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0); | 63 | fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0); |
64 | 64 | ||
65 | #ifdef BCM_CNIC | 65 | #ifdef BCM_CNIC |
66 | /* We don't want TPA on FCoE, FWD and OOO L2 rings */ | 66 | /* We don't want TPA on an FCoE L2 ring */ |
67 | bnx2x_fcoe(bp, disable_tpa) = 1; | 67 | if (IS_FCOE_FP(fp)) |
68 | fp->disable_tpa = 1; | ||
68 | #endif | 69 | #endif |
69 | } | 70 | } |
70 | 71 | ||
@@ -1404,10 +1405,9 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) | |||
1404 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) | 1405 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) |
1405 | { | 1406 | { |
1406 | struct bnx2x *bp = netdev_priv(dev); | 1407 | struct bnx2x *bp = netdev_priv(dev); |
1408 | |||
1407 | #ifdef BCM_CNIC | 1409 | #ifdef BCM_CNIC |
1408 | if (NO_FCOE(bp)) | 1410 | if (!NO_FCOE(bp)) { |
1409 | return skb_tx_hash(dev, skb); | ||
1410 | else { | ||
1411 | struct ethhdr *hdr = (struct ethhdr *)skb->data; | 1411 | struct ethhdr *hdr = (struct ethhdr *)skb->data; |
1412 | u16 ether_type = ntohs(hdr->h_proto); | 1412 | u16 ether_type = ntohs(hdr->h_proto); |
1413 | 1413 | ||
@@ -1424,8 +1424,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
1424 | return bnx2x_fcoe_tx(bp, txq_index); | 1424 | return bnx2x_fcoe_tx(bp, txq_index); |
1425 | } | 1425 | } |
1426 | #endif | 1426 | #endif |
1427 | /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring | 1427 | /* select a non-FCoE queue */ |
1428 | */ | ||
1429 | return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); | 1428 | return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); |
1430 | } | 1429 | } |
1431 | 1430 | ||
@@ -1448,6 +1447,28 @@ void bnx2x_set_num_queues(struct bnx2x *bp) | |||
1448 | bp->num_queues += NON_ETH_CONTEXT_USE; | 1447 | bp->num_queues += NON_ETH_CONTEXT_USE; |
1449 | } | 1448 | } |
1450 | 1449 | ||
1450 | /** | ||
1451 | * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues | ||
1452 | * | ||
1453 | * @bp: Driver handle | ||
1454 | * | ||
1455 | * We currently support for at most 16 Tx queues for each CoS thus we will | ||
1456 | * allocate a multiple of 16 for ETH L2 rings according to the value of the | ||
1457 | * bp->max_cos. | ||
1458 | * | ||
1459 | * If there is an FCoE L2 queue the appropriate Tx queue will have the next | ||
1460 | * index after all ETH L2 indices. | ||
1461 | * | ||
1462 | * If the actual number of Tx queues (for each CoS) is less than 16 then there | ||
1463 | * will be the holes at the end of each group of 16 ETh L2 indices (0..15, | ||
1464 | * 16..31,...) with indicies that are not coupled with any real Tx queue. | ||
1465 | * | ||
1466 | * The proper configuration of skb->queue_mapping is handled by | ||
1467 | * bnx2x_select_queue() and __skb_tx_hash(). | ||
1468 | * | ||
1469 | * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() | ||
1470 | * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). | ||
1471 | */ | ||
1451 | static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) | 1472 | static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) |
1452 | { | 1473 | { |
1453 | int rc, tx, rx; | 1474 | int rc, tx, rx; |
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c index a4ea35f6a456..a1e004a82f7a 100644 --- a/drivers/net/bnx2x/bnx2x_dcb.c +++ b/drivers/net/bnx2x/bnx2x_dcb.c | |||
@@ -920,7 +920,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, | |||
920 | 920 | ||
921 | void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) | 921 | void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) |
922 | { | 922 | { |
923 | if (!CHIP_IS_E1x(bp)) { | 923 | if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) { |
924 | bp->dcb_state = dcb_on; | 924 | bp->dcb_state = dcb_on; |
925 | bp->dcbx_enabled = dcbx_enabled; | 925 | bp->dcbx_enabled = dcbx_enabled; |
926 | } else { | 926 | } else { |
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 150709111548..f74582a22c68 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
@@ -5798,6 +5798,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) | |||
5798 | 5798 | ||
5799 | DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); | 5799 | DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); |
5800 | 5800 | ||
5801 | /* | ||
5802 | * take the UNDI lock to protect undi_unload flow from accessing | ||
5803 | * registers while we're resetting the chip | ||
5804 | */ | ||
5805 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | ||
5806 | |||
5801 | bnx2x_reset_common(bp); | 5807 | bnx2x_reset_common(bp); |
5802 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); | 5808 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); |
5803 | 5809 | ||
@@ -5808,6 +5814,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) | |||
5808 | } | 5814 | } |
5809 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); | 5815 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); |
5810 | 5816 | ||
5817 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | ||
5818 | |||
5811 | bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); | 5819 | bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); |
5812 | 5820 | ||
5813 | if (!CHIP_IS_E1x(bp)) { | 5821 | if (!CHIP_IS_E1x(bp)) { |
@@ -10251,10 +10259,17 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, | |||
10251 | /* clean indirect addresses */ | 10259 | /* clean indirect addresses */ |
10252 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, | 10260 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, |
10253 | PCICFG_VENDOR_ID_OFFSET); | 10261 | PCICFG_VENDOR_ID_OFFSET); |
10254 | REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0); | 10262 | /* Clean the following indirect addresses for all functions since it |
10255 | REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0); | 10263 | * is not used by the driver. |
10256 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); | 10264 | */ |
10257 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); | 10265 | REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); |
10266 | REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); | ||
10267 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); | ||
10268 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); | ||
10269 | REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); | ||
10270 | REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); | ||
10271 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); | ||
10272 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); | ||
10258 | 10273 | ||
10259 | /* | 10274 | /* |
10260 | * Enable internal target-read (in case we are probed after PF FLR). | 10275 | * Enable internal target-read (in case we are probed after PF FLR). |
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 27b5ecb11830..40266c14e6dc 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h | |||
@@ -3007,11 +3007,27 @@ | |||
3007 | /* [R 6] Debug only: Number of used entries in the data FIFO */ | 3007 | /* [R 6] Debug only: Number of used entries in the data FIFO */ |
3008 | #define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c | 3008 | #define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c |
3009 | /* [R 7] Debug only: Number of used entries in the header FIFO */ | 3009 | /* [R 7] Debug only: Number of used entries in the header FIFO */ |
3010 | #define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478 | 3010 | #define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478 |
3011 | #define PXP2_REG_PGL_ADDR_88_F0 0x120534 | 3011 | #define PXP2_REG_PGL_ADDR_88_F0 0x120534 |
3012 | #define PXP2_REG_PGL_ADDR_8C_F0 0x120538 | 3012 | /* [R 32] GRC address for configuration access to PCIE config address 0x88. |
3013 | #define PXP2_REG_PGL_ADDR_90_F0 0x12053c | 3013 | * any write to this PCIE address will cause a GRC write access to the |
3014 | #define PXP2_REG_PGL_ADDR_94_F0 0x120540 | 3014 | * address that's in t this register */ |
3015 | #define PXP2_REG_PGL_ADDR_88_F1 0x120544 | ||
3016 | #define PXP2_REG_PGL_ADDR_8C_F0 0x120538 | ||
3017 | /* [R 32] GRC address for configuration access to PCIE config address 0x8c. | ||
3018 | * any write to this PCIE address will cause a GRC write access to the | ||
3019 | * address that's in t this register */ | ||
3020 | #define PXP2_REG_PGL_ADDR_8C_F1 0x120548 | ||
3021 | #define PXP2_REG_PGL_ADDR_90_F0 0x12053c | ||
3022 | /* [R 32] GRC address for configuration access to PCIE config address 0x90. | ||
3023 | * any write to this PCIE address will cause a GRC write access to the | ||
3024 | * address that's in t this register */ | ||
3025 | #define PXP2_REG_PGL_ADDR_90_F1 0x12054c | ||
3026 | #define PXP2_REG_PGL_ADDR_94_F0 0x120540 | ||
3027 | /* [R 32] GRC address for configuration access to PCIE config address 0x94. | ||
3028 | * any write to this PCIE address will cause a GRC write access to the | ||
3029 | * address that's in t this register */ | ||
3030 | #define PXP2_REG_PGL_ADDR_94_F1 0x120550 | ||
3015 | #define PXP2_REG_PGL_CONTROL0 0x120490 | 3031 | #define PXP2_REG_PGL_CONTROL0 0x120490 |
3016 | #define PXP2_REG_PGL_CONTROL1 0x120514 | 3032 | #define PXP2_REG_PGL_CONTROL1 0x120514 |
3017 | #define PXP2_REG_PGL_DEBUG 0x120520 | 3033 | #define PXP2_REG_PGL_DEBUG 0x120520 |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 38a83acd502e..43f2ea541088 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3419,9 +3419,27 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count) | |||
3419 | static int bond_open(struct net_device *bond_dev) | 3419 | static int bond_open(struct net_device *bond_dev) |
3420 | { | 3420 | { |
3421 | struct bonding *bond = netdev_priv(bond_dev); | 3421 | struct bonding *bond = netdev_priv(bond_dev); |
3422 | struct slave *slave; | ||
3423 | int i; | ||
3422 | 3424 | ||
3423 | bond->kill_timers = 0; | 3425 | bond->kill_timers = 0; |
3424 | 3426 | ||
3427 | /* reset slave->backup and slave->inactive */ | ||
3428 | read_lock(&bond->lock); | ||
3429 | if (bond->slave_cnt > 0) { | ||
3430 | read_lock(&bond->curr_slave_lock); | ||
3431 | bond_for_each_slave(bond, slave, i) { | ||
3432 | if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) | ||
3433 | && (slave != bond->curr_active_slave)) { | ||
3434 | bond_set_slave_inactive_flags(slave); | ||
3435 | } else { | ||
3436 | bond_set_slave_active_flags(slave); | ||
3437 | } | ||
3438 | } | ||
3439 | read_unlock(&bond->curr_slave_lock); | ||
3440 | } | ||
3441 | read_unlock(&bond->lock); | ||
3442 | |||
3425 | INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed); | 3443 | INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed); |
3426 | 3444 | ||
3427 | if (bond_is_lb(bond)) { | 3445 | if (bond_is_lb(bond)) { |
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index 231385b8e08f..c7f3d4ea1167 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c | |||
@@ -408,7 +408,7 @@ static void plx_pci_del_card(struct pci_dev *pdev) | |||
408 | struct sja1000_priv *priv; | 408 | struct sja1000_priv *priv; |
409 | int i = 0; | 409 | int i = 0; |
410 | 410 | ||
411 | for (i = 0; i < card->channels; i++) { | 411 | for (i = 0; i < PLX_PCI_MAX_CHAN; i++) { |
412 | dev = card->net_dev[i]; | 412 | dev = card->net_dev[i]; |
413 | if (!dev) | 413 | if (!dev) |
414 | continue; | 414 | continue; |
@@ -536,7 +536,6 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev, | |||
536 | if (err) { | 536 | if (err) { |
537 | dev_err(&pdev->dev, "Registering device failed " | 537 | dev_err(&pdev->dev, "Registering device failed " |
538 | "(err=%d)\n", err); | 538 | "(err=%d)\n", err); |
539 | free_sja1000dev(dev); | ||
540 | goto failure_cleanup; | 539 | goto failure_cleanup; |
541 | } | 540 | } |
542 | 541 | ||
@@ -549,6 +548,7 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev, | |||
549 | dev_err(&pdev->dev, "Channel #%d not detected\n", | 548 | dev_err(&pdev->dev, "Channel #%d not detected\n", |
550 | i + 1); | 549 | i + 1); |
551 | free_sja1000dev(dev); | 550 | free_sja1000dev(dev); |
551 | card->net_dev[i] = NULL; | ||
552 | } | 552 | } |
553 | } | 553 | } |
554 | 554 | ||
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index f523f1cc5142..4b70b7e8bdeb 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c | |||
@@ -197,7 +197,7 @@ static void slc_bump(struct slcan *sl) | |||
197 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 197 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
198 | memcpy(skb_put(skb, sizeof(struct can_frame)), | 198 | memcpy(skb_put(skb, sizeof(struct can_frame)), |
199 | &cf, sizeof(struct can_frame)); | 199 | &cf, sizeof(struct can_frame)); |
200 | netif_rx(skb); | 200 | netif_rx_ni(skb); |
201 | 201 | ||
202 | sl->dev->stats.rx_packets++; | 202 | sl->dev->stats.rx_packets++; |
203 | sl->dev->stats.rx_bytes += cf.can_dlc; | 203 | sl->dev->stats.rx_bytes += cf.can_dlc; |
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index 480f2592f8a5..536b3a55c45f 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c | |||
@@ -2085,7 +2085,8 @@ struct e1000_info e1000_82574_info = { | |||
2085 | | FLAG_HAS_AMT | 2085 | | FLAG_HAS_AMT |
2086 | | FLAG_HAS_CTRLEXT_ON_LOAD, | 2086 | | FLAG_HAS_CTRLEXT_ON_LOAD, |
2087 | .flags2 = FLAG2_CHECK_PHY_HANG | 2087 | .flags2 = FLAG2_CHECK_PHY_HANG |
2088 | | FLAG2_DISABLE_ASPM_L0S, | 2088 | | FLAG2_DISABLE_ASPM_L0S |
2089 | | FLAG2_NO_DISABLE_RX, | ||
2089 | .pba = 32, | 2090 | .pba = 32, |
2090 | .max_hw_frame_size = DEFAULT_JUMBO, | 2091 | .max_hw_frame_size = DEFAULT_JUMBO, |
2091 | .get_variants = e1000_get_variants_82571, | 2092 | .get_variants = e1000_get_variants_82571, |
@@ -2104,7 +2105,8 @@ struct e1000_info e1000_82583_info = { | |||
2104 | | FLAG_HAS_AMT | 2105 | | FLAG_HAS_AMT |
2105 | | FLAG_HAS_JUMBO_FRAMES | 2106 | | FLAG_HAS_JUMBO_FRAMES |
2106 | | FLAG_HAS_CTRLEXT_ON_LOAD, | 2107 | | FLAG_HAS_CTRLEXT_ON_LOAD, |
2107 | .flags2 = FLAG2_DISABLE_ASPM_L0S, | 2108 | .flags2 = FLAG2_DISABLE_ASPM_L0S |
2109 | | FLAG2_NO_DISABLE_RX, | ||
2108 | .pba = 32, | 2110 | .pba = 32, |
2109 | .max_hw_frame_size = DEFAULT_JUMBO, | 2111 | .max_hw_frame_size = DEFAULT_JUMBO, |
2110 | .get_variants = e1000_get_variants_82571, | 2112 | .get_variants = e1000_get_variants_82571, |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 638d175792cf..8533ad7f3559 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -155,6 +155,9 @@ struct e1000_info; | |||
155 | #define HV_M_STATUS_SPEED_1000 0x0200 | 155 | #define HV_M_STATUS_SPEED_1000 0x0200 |
156 | #define HV_M_STATUS_LINK_UP 0x0040 | 156 | #define HV_M_STATUS_LINK_UP 0x0040 |
157 | 157 | ||
158 | #define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */ | ||
159 | #define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000 | ||
160 | |||
158 | /* Time to wait before putting the device into D3 if there's no link (in ms). */ | 161 | /* Time to wait before putting the device into D3 if there's no link (in ms). */ |
159 | #define LINK_TIMEOUT 100 | 162 | #define LINK_TIMEOUT 100 |
160 | 163 | ||
@@ -453,6 +456,8 @@ struct e1000_info { | |||
453 | #define FLAG2_DISABLE_ASPM_L0S (1 << 7) | 456 | #define FLAG2_DISABLE_ASPM_L0S (1 << 7) |
454 | #define FLAG2_DISABLE_AIM (1 << 8) | 457 | #define FLAG2_DISABLE_AIM (1 << 8) |
455 | #define FLAG2_CHECK_PHY_HANG (1 << 9) | 458 | #define FLAG2_CHECK_PHY_HANG (1 << 9) |
459 | #define FLAG2_NO_DISABLE_RX (1 << 10) | ||
460 | #define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11) | ||
456 | 461 | ||
457 | #define E1000_RX_DESC_PS(R, i) \ | 462 | #define E1000_RX_DESC_PS(R, i) \ |
458 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) | 463 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index 06d88f316dce..6a0526a59a8a 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -1206,7 +1206,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1206 | rx_ring->next_to_clean = 0; | 1206 | rx_ring->next_to_clean = 0; |
1207 | 1207 | ||
1208 | rctl = er32(RCTL); | 1208 | rctl = er32(RCTL); |
1209 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | 1209 | if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) |
1210 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | ||
1210 | ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); | 1211 | ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); |
1211 | ew32(RDBAH, ((u64) rx_ring->dma >> 32)); | 1212 | ew32(RDBAH, ((u64) rx_ring->dma >> 32)); |
1212 | ew32(RDLEN, rx_ring->size); | 1213 | ew32(RDLEN, rx_ring->size); |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 4e36978b8fd8..54add27c8f76 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -137,8 +137,9 @@ | |||
137 | #define HV_PM_CTRL PHY_REG(770, 17) | 137 | #define HV_PM_CTRL PHY_REG(770, 17) |
138 | 138 | ||
139 | /* PHY Low Power Idle Control */ | 139 | /* PHY Low Power Idle Control */ |
140 | #define I82579_LPI_CTRL PHY_REG(772, 20) | 140 | #define I82579_LPI_CTRL PHY_REG(772, 20) |
141 | #define I82579_LPI_CTRL_ENABLE_MASK 0x6000 | 141 | #define I82579_LPI_CTRL_ENABLE_MASK 0x6000 |
142 | #define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80 | ||
142 | 143 | ||
143 | /* EMI Registers */ | 144 | /* EMI Registers */ |
144 | #define I82579_EMI_ADDR 0x10 | 145 | #define I82579_EMI_ADDR 0x10 |
@@ -163,6 +164,11 @@ | |||
163 | #define HV_KMRN_MODE_CTRL PHY_REG(769, 16) | 164 | #define HV_KMRN_MODE_CTRL PHY_REG(769, 16) |
164 | #define HV_KMRN_MDIO_SLOW 0x0400 | 165 | #define HV_KMRN_MDIO_SLOW 0x0400 |
165 | 166 | ||
167 | /* KMRN FIFO Control and Status */ | ||
168 | #define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16) | ||
169 | #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000 | ||
170 | #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12 | ||
171 | |||
166 | /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ | 172 | /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ |
167 | /* Offset 04h HSFSTS */ | 173 | /* Offset 04h HSFSTS */ |
168 | union ich8_hws_flash_status { | 174 | union ich8_hws_flash_status { |
@@ -657,6 +663,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
657 | struct e1000_mac_info *mac = &hw->mac; | 663 | struct e1000_mac_info *mac = &hw->mac; |
658 | s32 ret_val; | 664 | s32 ret_val; |
659 | bool link; | 665 | bool link; |
666 | u16 phy_reg; | ||
660 | 667 | ||
661 | /* | 668 | /* |
662 | * We only want to go out to the PHY registers to see if Auto-Neg | 669 | * We only want to go out to the PHY registers to see if Auto-Neg |
@@ -689,16 +696,35 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
689 | 696 | ||
690 | mac->get_link_status = false; | 697 | mac->get_link_status = false; |
691 | 698 | ||
692 | if (hw->phy.type == e1000_phy_82578) { | 699 | switch (hw->mac.type) { |
693 | ret_val = e1000_link_stall_workaround_hv(hw); | 700 | case e1000_pch2lan: |
694 | if (ret_val) | ||
695 | goto out; | ||
696 | } | ||
697 | |||
698 | if (hw->mac.type == e1000_pch2lan) { | ||
699 | ret_val = e1000_k1_workaround_lv(hw); | 701 | ret_val = e1000_k1_workaround_lv(hw); |
700 | if (ret_val) | 702 | if (ret_val) |
701 | goto out; | 703 | goto out; |
704 | /* fall-thru */ | ||
705 | case e1000_pchlan: | ||
706 | if (hw->phy.type == e1000_phy_82578) { | ||
707 | ret_val = e1000_link_stall_workaround_hv(hw); | ||
708 | if (ret_val) | ||
709 | goto out; | ||
710 | } | ||
711 | |||
712 | /* | ||
713 | * Workaround for PCHx parts in half-duplex: | ||
714 | * Set the number of preambles removed from the packet | ||
715 | * when it is passed from the PHY to the MAC to prevent | ||
716 | * the MAC from misinterpreting the packet type. | ||
717 | */ | ||
718 | e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); | ||
719 | phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; | ||
720 | |||
721 | if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) | ||
722 | phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); | ||
723 | |||
724 | e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); | ||
725 | break; | ||
726 | default: | ||
727 | break; | ||
702 | } | 728 | } |
703 | 729 | ||
704 | /* | 730 | /* |
@@ -788,6 +814,11 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) | |||
788 | (adapter->hw.phy.type == e1000_phy_igp_3)) | 814 | (adapter->hw.phy.type == e1000_phy_igp_3)) |
789 | adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; | 815 | adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; |
790 | 816 | ||
817 | /* Enable workaround for 82579 w/ ME enabled */ | ||
818 | if ((adapter->hw.mac.type == e1000_pch2lan) && | ||
819 | (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) | ||
820 | adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA; | ||
821 | |||
791 | /* Disable EEE by default until IEEE802.3az spec is finalized */ | 822 | /* Disable EEE by default until IEEE802.3az spec is finalized */ |
792 | if (adapter->flags2 & FLAG2_HAS_EEE) | 823 | if (adapter->flags2 & FLAG2_HAS_EEE) |
793 | adapter->hw.dev_spec.ich8lan.eee_disable = true; | 824 | adapter->hw.dev_spec.ich8lan.eee_disable = true; |
@@ -1355,7 +1386,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) | |||
1355 | return ret_val; | 1386 | return ret_val; |
1356 | 1387 | ||
1357 | /* Preamble tuning for SSC */ | 1388 | /* Preamble tuning for SSC */ |
1358 | ret_val = e1e_wphy(hw, PHY_REG(770, 16), 0xA204); | 1389 | ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204); |
1359 | if (ret_val) | 1390 | if (ret_val) |
1360 | return ret_val; | 1391 | return ret_val; |
1361 | } | 1392 | } |
@@ -1645,6 +1676,7 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) | |||
1645 | s32 ret_val = 0; | 1676 | s32 ret_val = 0; |
1646 | u16 status_reg = 0; | 1677 | u16 status_reg = 0; |
1647 | u32 mac_reg; | 1678 | u32 mac_reg; |
1679 | u16 phy_reg; | ||
1648 | 1680 | ||
1649 | if (hw->mac.type != e1000_pch2lan) | 1681 | if (hw->mac.type != e1000_pch2lan) |
1650 | goto out; | 1682 | goto out; |
@@ -1659,12 +1691,19 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) | |||
1659 | mac_reg = er32(FEXTNVM4); | 1691 | mac_reg = er32(FEXTNVM4); |
1660 | mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; | 1692 | mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; |
1661 | 1693 | ||
1662 | if (status_reg & HV_M_STATUS_SPEED_1000) | 1694 | ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); |
1695 | if (ret_val) | ||
1696 | goto out; | ||
1697 | |||
1698 | if (status_reg & HV_M_STATUS_SPEED_1000) { | ||
1663 | mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; | 1699 | mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; |
1664 | else | 1700 | phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; |
1701 | } else { | ||
1665 | mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; | 1702 | mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; |
1666 | 1703 | phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; | |
1704 | } | ||
1667 | ew32(FEXTNVM4, mac_reg); | 1705 | ew32(FEXTNVM4, mac_reg); |
1706 | ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); | ||
1668 | } | 1707 | } |
1669 | 1708 | ||
1670 | out: | 1709 | out: |
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index 7898a67d6505..0893ab107adf 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c | |||
@@ -190,7 +190,8 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) | |||
190 | /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ | 190 | /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ |
191 | if (!((nvm_data & NVM_COMPAT_LOM) || | 191 | if (!((nvm_data & NVM_COMPAT_LOM) || |
192 | (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) || | 192 | (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) || |
193 | (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD))) | 193 | (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) || |
194 | (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES))) | ||
194 | goto out; | 195 | goto out; |
195 | 196 | ||
196 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, | 197 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, |
@@ -200,10 +201,10 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) | |||
200 | goto out; | 201 | goto out; |
201 | } | 202 | } |
202 | 203 | ||
203 | if (nvm_alt_mac_addr_offset == 0xFFFF) { | 204 | if ((nvm_alt_mac_addr_offset == 0xFFFF) || |
205 | (nvm_alt_mac_addr_offset == 0x0000)) | ||
204 | /* There is no Alternate MAC Address */ | 206 | /* There is no Alternate MAC Address */ |
205 | goto out; | 207 | goto out; |
206 | } | ||
207 | 208 | ||
208 | if (hw->bus.func == E1000_FUNC_1) | 209 | if (hw->bus.func == E1000_FUNC_1) |
209 | nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; | 210 | nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index ab4be80f7ab5..2198e615f241 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -56,7 +56,7 @@ | |||
56 | 56 | ||
57 | #define DRV_EXTRAVERSION "-k" | 57 | #define DRV_EXTRAVERSION "-k" |
58 | 58 | ||
59 | #define DRV_VERSION "1.3.16" DRV_EXTRAVERSION | 59 | #define DRV_VERSION "1.4.4" DRV_EXTRAVERSION |
60 | char e1000e_driver_name[] = "e1000e"; | 60 | char e1000e_driver_name[] = "e1000e"; |
61 | const char e1000e_driver_version[] = DRV_VERSION; | 61 | const char e1000e_driver_version[] = DRV_VERSION; |
62 | 62 | ||
@@ -519,6 +519,63 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, | |||
519 | } | 519 | } |
520 | 520 | ||
521 | /** | 521 | /** |
522 | * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa() | ||
523 | * @hw: pointer to the HW structure | ||
524 | * @tail: address of tail descriptor register | ||
525 | * @i: value to write to tail descriptor register | ||
526 | * | ||
527 | * When updating the tail register, the ME could be accessing Host CSR | ||
528 | * registers at the same time. Normally, this is handled in h/w by an | ||
529 | * arbiter but on some parts there is a bug that acknowledges Host accesses | ||
530 | * later than it should which could result in the descriptor register to | ||
531 | * have an incorrect value. Workaround this by checking the FWSM register | ||
532 | * which has bit 24 set while ME is accessing Host CSR registers, wait | ||
533 | * if it is set and try again a number of times. | ||
534 | **/ | ||
535 | static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail, | ||
536 | unsigned int i) | ||
537 | { | ||
538 | unsigned int j = 0; | ||
539 | |||
540 | while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) && | ||
541 | (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI)) | ||
542 | udelay(50); | ||
543 | |||
544 | writel(i, tail); | ||
545 | |||
546 | if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail))) | ||
547 | return E1000_ERR_SWFW_SYNC; | ||
548 | |||
549 | return 0; | ||
550 | } | ||
551 | |||
552 | static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i) | ||
553 | { | ||
554 | u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail); | ||
555 | struct e1000_hw *hw = &adapter->hw; | ||
556 | |||
557 | if (e1000e_update_tail_wa(hw, tail, i)) { | ||
558 | u32 rctl = er32(RCTL); | ||
559 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | ||
560 | e_err("ME firmware caused invalid RDT - resetting\n"); | ||
561 | schedule_work(&adapter->reset_task); | ||
562 | } | ||
563 | } | ||
564 | |||
565 | static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i) | ||
566 | { | ||
567 | u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail); | ||
568 | struct e1000_hw *hw = &adapter->hw; | ||
569 | |||
570 | if (e1000e_update_tail_wa(hw, tail, i)) { | ||
571 | u32 tctl = er32(TCTL); | ||
572 | ew32(TCTL, tctl & ~E1000_TCTL_EN); | ||
573 | e_err("ME firmware caused invalid TDT - resetting\n"); | ||
574 | schedule_work(&adapter->reset_task); | ||
575 | } | ||
576 | } | ||
577 | |||
578 | /** | ||
522 | * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended | 579 | * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended |
523 | * @adapter: address of board private structure | 580 | * @adapter: address of board private structure |
524 | **/ | 581 | **/ |
@@ -573,7 +630,10 @@ map_skb: | |||
573 | * such as IA-64). | 630 | * such as IA-64). |
574 | */ | 631 | */ |
575 | wmb(); | 632 | wmb(); |
576 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | 633 | if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) |
634 | e1000e_update_rdt_wa(adapter, i); | ||
635 | else | ||
636 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | ||
577 | } | 637 | } |
578 | i++; | 638 | i++; |
579 | if (i == rx_ring->count) | 639 | if (i == rx_ring->count) |
@@ -673,7 +733,11 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
673 | * such as IA-64). | 733 | * such as IA-64). |
674 | */ | 734 | */ |
675 | wmb(); | 735 | wmb(); |
676 | writel(i << 1, adapter->hw.hw_addr + rx_ring->tail); | 736 | if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) |
737 | e1000e_update_rdt_wa(adapter, i << 1); | ||
738 | else | ||
739 | writel(i << 1, | ||
740 | adapter->hw.hw_addr + rx_ring->tail); | ||
677 | } | 741 | } |
678 | 742 | ||
679 | i++; | 743 | i++; |
@@ -756,7 +820,10 @@ check_page: | |||
756 | * applicable for weak-ordered memory model archs, | 820 | * applicable for weak-ordered memory model archs, |
757 | * such as IA-64). */ | 821 | * such as IA-64). */ |
758 | wmb(); | 822 | wmb(); |
759 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | 823 | if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) |
824 | e1000e_update_rdt_wa(adapter, i); | ||
825 | else | ||
826 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | ||
760 | } | 827 | } |
761 | } | 828 | } |
762 | 829 | ||
@@ -2915,7 +2982,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2915 | 2982 | ||
2916 | /* disable receives while setting up the descriptors */ | 2983 | /* disable receives while setting up the descriptors */ |
2917 | rctl = er32(RCTL); | 2984 | rctl = er32(RCTL); |
2918 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | 2985 | if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) |
2986 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | ||
2919 | e1e_flush(); | 2987 | e1e_flush(); |
2920 | usleep_range(10000, 20000); | 2988 | usleep_range(10000, 20000); |
2921 | 2989 | ||
@@ -3394,7 +3462,8 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
3394 | 3462 | ||
3395 | /* disable receives in the hardware */ | 3463 | /* disable receives in the hardware */ |
3396 | rctl = er32(RCTL); | 3464 | rctl = er32(RCTL); |
3397 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | 3465 | if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) |
3466 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | ||
3398 | /* flush and sleep below */ | 3467 | /* flush and sleep below */ |
3399 | 3468 | ||
3400 | netif_stop_queue(netdev); | 3469 | netif_stop_queue(netdev); |
@@ -3403,6 +3472,7 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
3403 | tctl = er32(TCTL); | 3472 | tctl = er32(TCTL); |
3404 | tctl &= ~E1000_TCTL_EN; | 3473 | tctl &= ~E1000_TCTL_EN; |
3405 | ew32(TCTL, tctl); | 3474 | ew32(TCTL, tctl); |
3475 | |||
3406 | /* flush both disables and wait for them to finish */ | 3476 | /* flush both disables and wait for them to finish */ |
3407 | e1e_flush(); | 3477 | e1e_flush(); |
3408 | usleep_range(10000, 20000); | 3478 | usleep_range(10000, 20000); |
@@ -4686,7 +4756,12 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, | |||
4686 | wmb(); | 4756 | wmb(); |
4687 | 4757 | ||
4688 | tx_ring->next_to_use = i; | 4758 | tx_ring->next_to_use = i; |
4689 | writel(i, adapter->hw.hw_addr + tx_ring->tail); | 4759 | |
4760 | if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) | ||
4761 | e1000e_update_tdt_wa(adapter, i); | ||
4762 | else | ||
4763 | writel(i, adapter->hw.hw_addr + tx_ring->tail); | ||
4764 | |||
4690 | /* | 4765 | /* |
4691 | * we need this if more than one processor can write to our tail | 4766 | * we need this if more than one processor can write to our tail |
4692 | * at a time, it synchronizes IO on IA64/Altix systems | 4767 | * at a time, it synchronizes IO on IA64/Altix systems |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index e55df308a3af..6d5fbd4d4256 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -5615,7 +5615,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5615 | goto out_error; | 5615 | goto out_error; |
5616 | } | 5616 | } |
5617 | 5617 | ||
5618 | nv_vlan_mode(dev, dev->features); | 5618 | if (id->driver_data & DEV_HAS_VLAN) |
5619 | nv_vlan_mode(dev, dev->features); | ||
5619 | 5620 | ||
5620 | netif_carrier_off(dev); | 5621 | netif_carrier_off(dev); |
5621 | 5622 | ||
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 2659daad783d..31d5c574e5a9 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -2710,8 +2710,13 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | |||
2710 | /* Tell the skb what kind of packet this is */ | 2710 | /* Tell the skb what kind of packet this is */ |
2711 | skb->protocol = eth_type_trans(skb, dev); | 2711 | skb->protocol = eth_type_trans(skb, dev); |
2712 | 2712 | ||
2713 | /* Set vlan tag */ | 2713 | /* |
2714 | if (fcb->flags & RXFCB_VLN) | 2714 | * There's need to check for NETIF_F_HW_VLAN_RX here. |
2715 | * Even if vlan rx accel is disabled, on some chips | ||
2716 | * RXFCB_VLN is pseudo randomly set. | ||
2717 | */ | ||
2718 | if (dev->features & NETIF_F_HW_VLAN_RX && | ||
2719 | fcb->flags & RXFCB_VLN) | ||
2715 | __vlan_hwaccel_put_tag(skb, fcb->vlctl); | 2720 | __vlan_hwaccel_put_tag(skb, fcb->vlctl); |
2716 | 2721 | ||
2717 | /* Send the packet up the stack */ | 2722 | /* Send the packet up the stack */ |
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index 6e350692d118..25a8c2adb001 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c | |||
@@ -686,10 +686,21 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u | |||
686 | { | 686 | { |
687 | unsigned int last_rule_idx = priv->cur_filer_idx; | 687 | unsigned int last_rule_idx = priv->cur_filer_idx; |
688 | unsigned int cmp_rqfpr; | 688 | unsigned int cmp_rqfpr; |
689 | unsigned int local_rqfpr[MAX_FILER_IDX + 1]; | 689 | unsigned int *local_rqfpr; |
690 | unsigned int local_rqfcr[MAX_FILER_IDX + 1]; | 690 | unsigned int *local_rqfcr; |
691 | int i = 0x0, k = 0x0; | 691 | int i = 0x0, k = 0x0; |
692 | int j = MAX_FILER_IDX, l = 0x0; | 692 | int j = MAX_FILER_IDX, l = 0x0; |
693 | int ret = 1; | ||
694 | |||
695 | local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), | ||
696 | GFP_KERNEL); | ||
697 | local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), | ||
698 | GFP_KERNEL); | ||
699 | if (!local_rqfpr || !local_rqfcr) { | ||
700 | pr_err("Out of memory\n"); | ||
701 | ret = 0; | ||
702 | goto err; | ||
703 | } | ||
693 | 704 | ||
694 | switch (class) { | 705 | switch (class) { |
695 | case TCP_V4_FLOW: | 706 | case TCP_V4_FLOW: |
@@ -706,7 +717,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u | |||
706 | break; | 717 | break; |
707 | default: | 718 | default: |
708 | pr_err("Right now this class is not supported\n"); | 719 | pr_err("Right now this class is not supported\n"); |
709 | return 0; | 720 | ret = 0; |
721 | goto err; | ||
710 | } | 722 | } |
711 | 723 | ||
712 | for (i = 0; i < MAX_FILER_IDX + 1; i++) { | 724 | for (i = 0; i < MAX_FILER_IDX + 1; i++) { |
@@ -721,7 +733,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u | |||
721 | 733 | ||
722 | if (i == MAX_FILER_IDX + 1) { | 734 | if (i == MAX_FILER_IDX + 1) { |
723 | pr_err("No parse rule found, can't create hash rules\n"); | 735 | pr_err("No parse rule found, can't create hash rules\n"); |
724 | return 0; | 736 | ret = 0; |
737 | goto err; | ||
725 | } | 738 | } |
726 | 739 | ||
727 | /* If a match was found, then it begins the starting of a cluster rule | 740 | /* If a match was found, then it begins the starting of a cluster rule |
@@ -765,7 +778,10 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u | |||
765 | priv->cur_filer_idx = priv->cur_filer_idx - 1; | 778 | priv->cur_filer_idx = priv->cur_filer_idx - 1; |
766 | } | 779 | } |
767 | 780 | ||
768 | return 1; | 781 | err: |
782 | kfree(local_rqfcr); | ||
783 | kfree(local_rqfpr); | ||
784 | return ret; | ||
769 | } | 785 | } |
770 | 786 | ||
771 | static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd) | 787 | static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd) |
diff --git a/drivers/net/gianfar_ptp.c b/drivers/net/gianfar_ptp.c index 1c97861596f0..f67b8aebc89c 100644 --- a/drivers/net/gianfar_ptp.c +++ b/drivers/net/gianfar_ptp.c | |||
@@ -193,14 +193,9 @@ static void set_alarm(struct etsects *etsects) | |||
193 | /* Caller must hold etsects->lock. */ | 193 | /* Caller must hold etsects->lock. */ |
194 | static void set_fipers(struct etsects *etsects) | 194 | static void set_fipers(struct etsects *etsects) |
195 | { | 195 | { |
196 | u32 tmr_ctrl = gfar_read(&etsects->regs->tmr_ctrl); | 196 | set_alarm(etsects); |
197 | |||
198 | gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl & (~TE)); | ||
199 | gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc); | ||
200 | gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); | 197 | gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); |
201 | gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); | 198 | gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); |
202 | set_alarm(etsects); | ||
203 | gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|TE); | ||
204 | } | 199 | } |
205 | 200 | ||
206 | /* | 201 | /* |
@@ -511,7 +506,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) | |||
511 | gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); | 506 | gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); |
512 | gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); | 507 | gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); |
513 | set_alarm(etsects); | 508 | set_alarm(etsects); |
514 | gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE); | 509 | gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE|FRD); |
515 | 510 | ||
516 | spin_unlock_irqrestore(&etsects->lock, flags); | 511 | spin_unlock_irqrestore(&etsects->lock, flags); |
517 | 512 | ||
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c index 4488bd581eca..82660672dcd9 100644 --- a/drivers/net/irda/sh_irda.c +++ b/drivers/net/irda/sh_irda.c | |||
@@ -22,6 +22,8 @@ | |||
22 | * - DMA transfer support | 22 | * - DMA transfer support |
23 | * - FIFO mode support | 23 | * - FIFO mode support |
24 | */ | 24 | */ |
25 | #include <linux/io.h> | ||
26 | #include <linux/interrupt.h> | ||
25 | #include <linux/module.h> | 27 | #include <linux/module.h> |
26 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
27 | #include <linux/clk.h> | 29 | #include <linux/clk.h> |
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c index 52a7c86af663..ed7d7d62bf68 100644 --- a/drivers/net/irda/sh_sir.c +++ b/drivers/net/irda/sh_sir.c | |||
@@ -12,6 +12,8 @@ | |||
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/io.h> | ||
16 | #include <linux/interrupt.h> | ||
15 | #include <linux/module.h> | 17 | #include <linux/module.h> |
16 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
17 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
@@ -511,7 +513,7 @@ static void sh_sir_tx(struct sh_sir_self *self, int phase) | |||
511 | 513 | ||
512 | static int sh_sir_read_data(struct sh_sir_self *self) | 514 | static int sh_sir_read_data(struct sh_sir_self *self) |
513 | { | 515 | { |
514 | u16 val; | 516 | u16 val = 0; |
515 | int timeout = 1024; | 517 | int timeout = 1024; |
516 | 518 | ||
517 | while (timeout--) { | 519 | while (timeout--) { |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index e86297b32733..22790394318a 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -1459,8 +1459,10 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1459 | if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { | 1459 | if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { |
1460 | ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb, | 1460 | ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb, |
1461 | staterr); | 1461 | staterr); |
1462 | if (!ddp_bytes) | 1462 | if (!ddp_bytes) { |
1463 | dev_kfree_skb_any(skb); | ||
1463 | goto next_desc; | 1464 | goto next_desc; |
1465 | } | ||
1464 | } | 1466 | } |
1465 | #endif /* IXGBE_FCOE */ | 1467 | #endif /* IXGBE_FCOE */ |
1466 | ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); | 1468 | ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); |
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 8b3090dc4bcd..80b6f36a8074 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
@@ -82,7 +82,7 @@ static int cards_found; | |||
82 | /* | 82 | /* |
83 | * VLB I/O addresses | 83 | * VLB I/O addresses |
84 | */ | 84 | */ |
85 | static unsigned int pcnet32_portlist[] __initdata = | 85 | static unsigned int pcnet32_portlist[] = |
86 | { 0x300, 0x320, 0x340, 0x360, 0 }; | 86 | { 0x300, 0x320, 0x340, 0x360, 0 }; |
87 | 87 | ||
88 | static int pcnet32_debug; | 88 | static int pcnet32_debug; |
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 2cd8dc5847b4..cb6e0b486b1e 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c | |||
@@ -34,8 +34,7 @@ | |||
34 | #define PAGESEL 0x13 | 34 | #define PAGESEL 0x13 |
35 | #define LAYER4 0x02 | 35 | #define LAYER4 0x02 |
36 | #define LAYER2 0x01 | 36 | #define LAYER2 0x01 |
37 | #define MAX_RXTS 4 | 37 | #define MAX_RXTS 64 |
38 | #define MAX_TXTS 4 | ||
39 | #define N_EXT_TS 1 | 38 | #define N_EXT_TS 1 |
40 | #define PSF_PTPVER 2 | 39 | #define PSF_PTPVER 2 |
41 | #define PSF_EVNT 0x4000 | 40 | #define PSF_EVNT 0x4000 |
@@ -218,7 +217,7 @@ static void phy2rxts(struct phy_rxts *p, struct rxts *rxts) | |||
218 | rxts->seqid = p->seqid; | 217 | rxts->seqid = p->seqid; |
219 | rxts->msgtype = (p->msgtype >> 12) & 0xf; | 218 | rxts->msgtype = (p->msgtype >> 12) & 0xf; |
220 | rxts->hash = p->msgtype & 0x0fff; | 219 | rxts->hash = p->msgtype & 0x0fff; |
221 | rxts->tmo = jiffies + HZ; | 220 | rxts->tmo = jiffies + 2; |
222 | } | 221 | } |
223 | 222 | ||
224 | static u64 phy2txts(struct phy_txts *p) | 223 | static u64 phy2txts(struct phy_txts *p) |
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 86ac38c96bcf..3bb131137033 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c | |||
@@ -80,13 +80,13 @@ static int rionet_capable = 1; | |||
80 | */ | 80 | */ |
81 | static struct rio_dev **rionet_active; | 81 | static struct rio_dev **rionet_active; |
82 | 82 | ||
83 | #define is_rionet_capable(pef, src_ops, dst_ops) \ | 83 | #define is_rionet_capable(src_ops, dst_ops) \ |
84 | ((pef & RIO_PEF_INB_MBOX) && \ | 84 | ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ |
85 | (pef & RIO_PEF_INB_DOORBELL) && \ | 85 | (dst_ops & RIO_DST_OPS_DATA_MSG) && \ |
86 | (src_ops & RIO_SRC_OPS_DOORBELL) && \ | 86 | (src_ops & RIO_SRC_OPS_DOORBELL) && \ |
87 | (dst_ops & RIO_DST_OPS_DOORBELL)) | 87 | (dst_ops & RIO_DST_OPS_DOORBELL)) |
88 | #define dev_rionet_capable(dev) \ | 88 | #define dev_rionet_capable(dev) \ |
89 | is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops) | 89 | is_rionet_capable(dev->src_ops, dev->dst_ops) |
90 | 90 | ||
91 | #define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) | 91 | #define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) |
92 | #define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) | 92 | #define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) |
@@ -282,7 +282,6 @@ static int rionet_open(struct net_device *ndev) | |||
282 | { | 282 | { |
283 | int i, rc = 0; | 283 | int i, rc = 0; |
284 | struct rionet_peer *peer, *tmp; | 284 | struct rionet_peer *peer, *tmp; |
285 | u32 pwdcsr; | ||
286 | struct rionet_private *rnet = netdev_priv(ndev); | 285 | struct rionet_private *rnet = netdev_priv(ndev); |
287 | 286 | ||
288 | if (netif_msg_ifup(rnet)) | 287 | if (netif_msg_ifup(rnet)) |
@@ -332,13 +331,8 @@ static int rionet_open(struct net_device *ndev) | |||
332 | continue; | 331 | continue; |
333 | } | 332 | } |
334 | 333 | ||
335 | /* | 334 | /* Send a join message */ |
336 | * If device has initialized inbound doorbells, | 335 | rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); |
337 | * send a join message | ||
338 | */ | ||
339 | rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr); | ||
340 | if (pwdcsr & RIO_DOORBELL_AVAIL) | ||
341 | rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); | ||
342 | } | 336 | } |
343 | 337 | ||
344 | out: | 338 | out: |
@@ -492,7 +486,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) | |||
492 | static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) | 486 | static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) |
493 | { | 487 | { |
494 | int rc = -ENODEV; | 488 | int rc = -ENODEV; |
495 | u32 lpef, lsrc_ops, ldst_ops; | 489 | u32 lsrc_ops, ldst_ops; |
496 | struct rionet_peer *peer; | 490 | struct rionet_peer *peer; |
497 | struct net_device *ndev = NULL; | 491 | struct net_device *ndev = NULL; |
498 | 492 | ||
@@ -515,12 +509,11 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) | |||
515 | * on later probes | 509 | * on later probes |
516 | */ | 510 | */ |
517 | if (!rionet_check) { | 511 | if (!rionet_check) { |
518 | rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef); | ||
519 | rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, | 512 | rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, |
520 | &lsrc_ops); | 513 | &lsrc_ops); |
521 | rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, | 514 | rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, |
522 | &ldst_ops); | 515 | &ldst_ops); |
523 | if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) { | 516 | if (!is_rionet_capable(lsrc_ops, ldst_ops)) { |
524 | printk(KERN_ERR | 517 | printk(KERN_ERR |
525 | "%s: local device is not network capable\n", | 518 | "%s: local device is not network capable\n", |
526 | DRV_NAME); | 519 | DRV_NAME); |
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index ad35c210b839..190f619e4215 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c | |||
@@ -21,6 +21,7 @@ | |||
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/interrupt.h> | ||
24 | #include <linux/dma-mapping.h> | 25 | #include <linux/dma-mapping.h> |
25 | #include <linux/etherdevice.h> | 26 | #include <linux/etherdevice.h> |
26 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
diff --git a/drivers/net/slip.c b/drivers/net/slip.c index f11b3f3df24f..4c617534f937 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c | |||
@@ -367,7 +367,7 @@ static void sl_bump(struct slip *sl) | |||
367 | memcpy(skb_put(skb, count), sl->rbuff, count); | 367 | memcpy(skb_put(skb, count), sl->rbuff, count); |
368 | skb_reset_mac_header(skb); | 368 | skb_reset_mac_header(skb); |
369 | skb->protocol = htons(ETH_P_IP); | 369 | skb->protocol = htons(ETH_P_IP); |
370 | netif_rx(skb); | 370 | netif_rx_ni(skb); |
371 | dev->stats.rx_packets++; | 371 | dev->stats.rx_packets++; |
372 | } | 372 | } |
373 | 373 | ||
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index a03336e086d5..f06fb78383a1 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -228,23 +228,40 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
228 | if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { | 228 | if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { |
229 | 229 | ||
230 | if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) { | 230 | if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) { |
231 | struct usb_cdc_ncm_ndp_input_size ndp_in_sz; | 231 | struct usb_cdc_ncm_ndp_input_size *ndp_in_sz; |
232 | |||
233 | ndp_in_sz = kzalloc(sizeof(*ndp_in_sz), GFP_KERNEL); | ||
234 | if (!ndp_in_sz) { | ||
235 | err = -ENOMEM; | ||
236 | goto size_err; | ||
237 | } | ||
238 | |||
232 | err = usb_control_msg(ctx->udev, | 239 | err = usb_control_msg(ctx->udev, |
233 | usb_sndctrlpipe(ctx->udev, 0), | 240 | usb_sndctrlpipe(ctx->udev, 0), |
234 | USB_CDC_SET_NTB_INPUT_SIZE, | 241 | USB_CDC_SET_NTB_INPUT_SIZE, |
235 | USB_TYPE_CLASS | USB_DIR_OUT | 242 | USB_TYPE_CLASS | USB_DIR_OUT |
236 | | USB_RECIP_INTERFACE, | 243 | | USB_RECIP_INTERFACE, |
237 | 0, iface_no, &ndp_in_sz, 8, 1000); | 244 | 0, iface_no, ndp_in_sz, 8, 1000); |
245 | kfree(ndp_in_sz); | ||
238 | } else { | 246 | } else { |
239 | __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); | 247 | __le32 *dwNtbInMaxSize; |
248 | dwNtbInMaxSize = kzalloc(sizeof(*dwNtbInMaxSize), | ||
249 | GFP_KERNEL); | ||
250 | if (!dwNtbInMaxSize) { | ||
251 | err = -ENOMEM; | ||
252 | goto size_err; | ||
253 | } | ||
254 | *dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); | ||
255 | |||
240 | err = usb_control_msg(ctx->udev, | 256 | err = usb_control_msg(ctx->udev, |
241 | usb_sndctrlpipe(ctx->udev, 0), | 257 | usb_sndctrlpipe(ctx->udev, 0), |
242 | USB_CDC_SET_NTB_INPUT_SIZE, | 258 | USB_CDC_SET_NTB_INPUT_SIZE, |
243 | USB_TYPE_CLASS | USB_DIR_OUT | 259 | USB_TYPE_CLASS | USB_DIR_OUT |
244 | | USB_RECIP_INTERFACE, | 260 | | USB_RECIP_INTERFACE, |
245 | 0, iface_no, &dwNtbInMaxSize, 4, 1000); | 261 | 0, iface_no, dwNtbInMaxSize, 4, 1000); |
262 | kfree(dwNtbInMaxSize); | ||
246 | } | 263 | } |
247 | 264 | size_err: | |
248 | if (err < 0) | 265 | if (err < 0) |
249 | pr_debug("Setting NTB Input Size failed\n"); | 266 | pr_debug("Setting NTB Input Size failed\n"); |
250 | } | 267 | } |
@@ -325,19 +342,29 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
325 | 342 | ||
326 | /* set Max Datagram Size (MTU) */ | 343 | /* set Max Datagram Size (MTU) */ |
327 | if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) { | 344 | if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) { |
328 | __le16 max_datagram_size; | 345 | __le16 *max_datagram_size; |
329 | u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); | 346 | u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); |
347 | |||
348 | max_datagram_size = kzalloc(sizeof(*max_datagram_size), | ||
349 | GFP_KERNEL); | ||
350 | if (!max_datagram_size) { | ||
351 | err = -ENOMEM; | ||
352 | goto max_dgram_err; | ||
353 | } | ||
354 | |||
330 | err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0), | 355 | err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0), |
331 | USB_CDC_GET_MAX_DATAGRAM_SIZE, | 356 | USB_CDC_GET_MAX_DATAGRAM_SIZE, |
332 | USB_TYPE_CLASS | USB_DIR_IN | 357 | USB_TYPE_CLASS | USB_DIR_IN |
333 | | USB_RECIP_INTERFACE, | 358 | | USB_RECIP_INTERFACE, |
334 | 0, iface_no, &max_datagram_size, | 359 | 0, iface_no, max_datagram_size, |
335 | 2, 1000); | 360 | 2, 1000); |
336 | if (err < 0) { | 361 | if (err < 0) { |
337 | pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n", | 362 | pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n", |
338 | CDC_NCM_MIN_DATAGRAM_SIZE); | 363 | CDC_NCM_MIN_DATAGRAM_SIZE); |
364 | kfree(max_datagram_size); | ||
339 | } else { | 365 | } else { |
340 | ctx->max_datagram_size = le16_to_cpu(max_datagram_size); | 366 | ctx->max_datagram_size = |
367 | le16_to_cpu(*max_datagram_size); | ||
341 | /* Check Eth descriptor value */ | 368 | /* Check Eth descriptor value */ |
342 | if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) { | 369 | if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) { |
343 | if (ctx->max_datagram_size > eth_max_sz) | 370 | if (ctx->max_datagram_size > eth_max_sz) |
@@ -360,8 +387,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
360 | USB_TYPE_CLASS | USB_DIR_OUT | 387 | USB_TYPE_CLASS | USB_DIR_OUT |
361 | | USB_RECIP_INTERFACE, | 388 | | USB_RECIP_INTERFACE, |
362 | 0, | 389 | 0, |
363 | iface_no, &max_datagram_size, | 390 | iface_no, max_datagram_size, |
364 | 2, 1000); | 391 | 2, 1000); |
392 | kfree(max_datagram_size); | ||
393 | max_dgram_err: | ||
365 | if (err < 0) | 394 | if (err < 0) |
366 | pr_debug("SET_MAX_DATAGRAM_SIZE failed\n"); | 395 | pr_debug("SET_MAX_DATAGRAM_SIZE failed\n"); |
367 | } | 396 | } |
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 041fb7d43c4f..ef3b236b5145 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c | |||
@@ -977,7 +977,6 @@ static void rtl8150_disconnect(struct usb_interface *intf) | |||
977 | usb_set_intfdata(intf, NULL); | 977 | usb_set_intfdata(intf, NULL); |
978 | if (dev) { | 978 | if (dev) { |
979 | set_bit(RTL8150_UNPLUG, &dev->flags); | 979 | set_bit(RTL8150_UNPLUG, &dev->flags); |
980 | tasklet_disable(&dev->tl); | ||
981 | tasklet_kill(&dev->tl); | 980 | tasklet_kill(&dev->tl); |
982 | unregister_netdev(dev->netdev); | 981 | unregister_netdev(dev->netdev); |
983 | unlink_all_urbs(dev); | 982 | unlink_all_urbs(dev); |
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index deb1eca13c9f..7c5336c5c37f 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -515,10 +515,6 @@ static void velocity_init_cam_filter(struct velocity_info *vptr) | |||
515 | mac_set_cam_mask(regs, vptr->mCAMmask); | 515 | mac_set_cam_mask(regs, vptr->mCAMmask); |
516 | 516 | ||
517 | /* Enable VCAMs */ | 517 | /* Enable VCAMs */ |
518 | |||
519 | if (test_bit(0, vptr->active_vlans)) | ||
520 | WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG); | ||
521 | |||
522 | for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) { | 518 | for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) { |
523 | mac_set_vlan_cam(regs, i, (u8 *) &vid); | 519 | mac_set_vlan_cam(regs, i, (u8 *) &vid); |
524 | vptr->vCAMmask[i / 8] |= 0x1 << (i % 8); | 520 | vptr->vCAMmask[i / 8] |= 0x1 << (i % 8); |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 1cbacb389652..0959583feb27 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -1929,14 +1929,17 @@ static void | |||
1929 | vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | 1929 | vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) |
1930 | { | 1930 | { |
1931 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1931 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1932 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | ||
1933 | unsigned long flags; | ||
1934 | 1932 | ||
1935 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); | 1933 | if (!(netdev->flags & IFF_PROMISC)) { |
1936 | spin_lock_irqsave(&adapter->cmd_lock, flags); | 1934 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; |
1937 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 1935 | unsigned long flags; |
1938 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | 1936 | |
1939 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | 1937 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); |
1938 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1939 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1940 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | ||
1941 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
1942 | } | ||
1940 | 1943 | ||
1941 | set_bit(vid, adapter->active_vlans); | 1944 | set_bit(vid, adapter->active_vlans); |
1942 | } | 1945 | } |
@@ -1946,14 +1949,17 @@ static void | |||
1946 | vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | 1949 | vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) |
1947 | { | 1950 | { |
1948 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1951 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1949 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | ||
1950 | unsigned long flags; | ||
1951 | 1952 | ||
1952 | VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); | 1953 | if (!(netdev->flags & IFF_PROMISC)) { |
1953 | spin_lock_irqsave(&adapter->cmd_lock, flags); | 1954 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; |
1954 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 1955 | unsigned long flags; |
1955 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | 1956 | |
1956 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | 1957 | VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); |
1958 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1959 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1960 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | ||
1961 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
1962 | } | ||
1957 | 1963 | ||
1958 | clear_bit(vid, adapter->active_vlans); | 1964 | clear_bit(vid, adapter->active_vlans); |
1959 | } | 1965 | } |
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index f54dff44ed50..c3119a6caace 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c | |||
@@ -1735,6 +1735,8 @@ ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf) | |||
1735 | 1735 | ||
1736 | if (dma_mapping_error(ah->dev, bf->skbaddr)) { | 1736 | if (dma_mapping_error(ah->dev, bf->skbaddr)) { |
1737 | ATH5K_ERR(ah, "beacon DMA mapping failed\n"); | 1737 | ATH5K_ERR(ah, "beacon DMA mapping failed\n"); |
1738 | dev_kfree_skb_any(skb); | ||
1739 | bf->skb = NULL; | ||
1738 | return -EIO; | 1740 | return -EIO; |
1739 | } | 1741 | } |
1740 | 1742 | ||
@@ -1819,8 +1821,6 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) | |||
1819 | ath5k_txbuf_free_skb(ah, avf->bbuf); | 1821 | ath5k_txbuf_free_skb(ah, avf->bbuf); |
1820 | avf->bbuf->skb = skb; | 1822 | avf->bbuf->skb = skb; |
1821 | ret = ath5k_beacon_setup(ah, avf->bbuf); | 1823 | ret = ath5k_beacon_setup(ah, avf->bbuf); |
1822 | if (ret) | ||
1823 | avf->bbuf->skb = NULL; | ||
1824 | out: | 1824 | out: |
1825 | return ret; | 1825 | return ret; |
1826 | } | 1826 | } |
@@ -1840,6 +1840,7 @@ ath5k_beacon_send(struct ath5k_hw *ah) | |||
1840 | struct ath5k_vif *avf; | 1840 | struct ath5k_vif *avf; |
1841 | struct ath5k_buf *bf; | 1841 | struct ath5k_buf *bf; |
1842 | struct sk_buff *skb; | 1842 | struct sk_buff *skb; |
1843 | int err; | ||
1843 | 1844 | ||
1844 | ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n"); | 1845 | ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n"); |
1845 | 1846 | ||
@@ -1888,11 +1889,6 @@ ath5k_beacon_send(struct ath5k_hw *ah) | |||
1888 | 1889 | ||
1889 | avf = (void *)vif->drv_priv; | 1890 | avf = (void *)vif->drv_priv; |
1890 | bf = avf->bbuf; | 1891 | bf = avf->bbuf; |
1891 | if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION || | ||
1892 | ah->opmode == NL80211_IFTYPE_MONITOR)) { | ||
1893 | ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL); | ||
1894 | return; | ||
1895 | } | ||
1896 | 1892 | ||
1897 | /* | 1893 | /* |
1898 | * Stop any current dma and put the new frame on the queue. | 1894 | * Stop any current dma and put the new frame on the queue. |
@@ -1906,8 +1902,17 @@ ath5k_beacon_send(struct ath5k_hw *ah) | |||
1906 | 1902 | ||
1907 | /* refresh the beacon for AP or MESH mode */ | 1903 | /* refresh the beacon for AP or MESH mode */ |
1908 | if (ah->opmode == NL80211_IFTYPE_AP || | 1904 | if (ah->opmode == NL80211_IFTYPE_AP || |
1909 | ah->opmode == NL80211_IFTYPE_MESH_POINT) | 1905 | ah->opmode == NL80211_IFTYPE_MESH_POINT) { |
1910 | ath5k_beacon_update(ah->hw, vif); | 1906 | err = ath5k_beacon_update(ah->hw, vif); |
1907 | if (err) | ||
1908 | return; | ||
1909 | } | ||
1910 | |||
1911 | if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION || | ||
1912 | ah->opmode == NL80211_IFTYPE_MONITOR)) { | ||
1913 | ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb); | ||
1914 | return; | ||
1915 | } | ||
1911 | 1916 | ||
1912 | trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]); | 1917 | trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]); |
1913 | 1918 | ||
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index d109c25417f4..c34bef1bf2b0 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c | |||
@@ -307,7 +307,7 @@ static const struct ar9300_eeprom ar9300_default = { | |||
307 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, | 307 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, |
308 | { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, | 308 | { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, |
309 | 309 | ||
310 | { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } }, | 310 | { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } }, |
311 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, | 311 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, |
312 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, | 312 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, |
313 | 313 | ||
@@ -884,7 +884,7 @@ static const struct ar9300_eeprom ar9300_x113 = { | |||
884 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, | 884 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, |
885 | { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, | 885 | { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, |
886 | 886 | ||
887 | { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } }, | 887 | { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } }, |
888 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, | 888 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, |
889 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, | 889 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, |
890 | 890 | ||
@@ -2040,7 +2040,7 @@ static const struct ar9300_eeprom ar9300_x112 = { | |||
2040 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, | 2040 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, |
2041 | { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, | 2041 | { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, |
2042 | 2042 | ||
2043 | { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } }, | 2043 | { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } }, |
2044 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, | 2044 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, |
2045 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, | 2045 | { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, |
2046 | 2046 | ||
@@ -3734,7 +3734,7 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah) | |||
3734 | } | 3734 | } |
3735 | } else { | 3735 | } else { |
3736 | reg_pmu_set = (5 << 1) | (7 << 4) | | 3736 | reg_pmu_set = (5 << 1) | (7 << 4) | |
3737 | (1 << 8) | (2 << 14) | | 3737 | (2 << 8) | (2 << 14) | |
3738 | (6 << 17) | (1 << 20) | | 3738 | (6 << 17) | (1 << 20) | |
3739 | (3 << 24) | (1 << 28); | 3739 | (3 << 24) | (1 << 28); |
3740 | } | 3740 | } |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index 6de3f0bc18e6..5c590429f120 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h | |||
@@ -850,7 +850,7 @@ | |||
850 | #define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220) | 850 | #define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220) |
851 | #define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240) | 851 | #define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240) |
852 | #define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c) | 852 | #define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c) |
853 | #define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM_BASE + 0x450 + ((_i) << 2)) | 853 | #define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM1_BASE + 0x450 + ((_i) << 2)) |
854 | 854 | ||
855 | /* | 855 | /* |
856 | * Channel 2 Register Map | 856 | * Channel 2 Register Map |
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 83cba22ac6e8..481e534534eb 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c | |||
@@ -795,9 +795,23 @@ static u64 supported_dma_mask(struct b43_wldev *dev) | |||
795 | u32 tmp; | 795 | u32 tmp; |
796 | u16 mmio_base; | 796 | u16 mmio_base; |
797 | 797 | ||
798 | tmp = b43_read32(dev, SSB_TMSHIGH); | 798 | switch (dev->dev->bus_type) { |
799 | if (tmp & SSB_TMSHIGH_DMA64) | 799 | #ifdef CONFIG_B43_BCMA |
800 | return DMA_BIT_MASK(64); | 800 | case B43_BUS_BCMA: |
801 | tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST); | ||
802 | if (tmp & BCMA_IOST_DMA64) | ||
803 | return DMA_BIT_MASK(64); | ||
804 | break; | ||
805 | #endif | ||
806 | #ifdef CONFIG_B43_SSB | ||
807 | case B43_BUS_SSB: | ||
808 | tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH); | ||
809 | if (tmp & SSB_TMSHIGH_DMA64) | ||
810 | return DMA_BIT_MASK(64); | ||
811 | break; | ||
812 | #endif | ||
813 | } | ||
814 | |||
801 | mmio_base = b43_dmacontroller_base(0, 0); | 815 | mmio_base = b43_dmacontroller_base(0, 0); |
802 | b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); | 816 | b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); |
803 | tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); | 817 | tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c index 69d4ec467dca..2fdbffa079c1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-pci.c +++ b/drivers/net/wireless/iwlwifi/iwl-pci.c | |||
@@ -478,27 +478,22 @@ out_no_pci: | |||
478 | return err; | 478 | return err; |
479 | } | 479 | } |
480 | 480 | ||
481 | static void iwl_pci_down(struct iwl_bus *bus) | ||
482 | { | ||
483 | struct iwl_pci_bus *pci_bus = (struct iwl_pci_bus *) bus->bus_specific; | ||
484 | |||
485 | pci_disable_msi(pci_bus->pci_dev); | ||
486 | pci_iounmap(pci_bus->pci_dev, pci_bus->hw_base); | ||
487 | pci_release_regions(pci_bus->pci_dev); | ||
488 | pci_disable_device(pci_bus->pci_dev); | ||
489 | pci_set_drvdata(pci_bus->pci_dev, NULL); | ||
490 | |||
491 | kfree(bus); | ||
492 | } | ||
493 | |||
494 | static void __devexit iwl_pci_remove(struct pci_dev *pdev) | 481 | static void __devexit iwl_pci_remove(struct pci_dev *pdev) |
495 | { | 482 | { |
496 | struct iwl_priv *priv = pci_get_drvdata(pdev); | 483 | struct iwl_priv *priv = pci_get_drvdata(pdev); |
497 | void *bus_specific = priv->bus->bus_specific; | 484 | struct iwl_bus *bus = priv->bus; |
485 | struct iwl_pci_bus *pci_bus = IWL_BUS_GET_PCI_BUS(bus); | ||
486 | struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus); | ||
498 | 487 | ||
499 | iwl_remove(priv); | 488 | iwl_remove(priv); |
500 | 489 | ||
501 | iwl_pci_down(bus_specific); | 490 | pci_disable_msi(pci_dev); |
491 | pci_iounmap(pci_dev, pci_bus->hw_base); | ||
492 | pci_release_regions(pci_dev); | ||
493 | pci_disable_device(pci_dev); | ||
494 | pci_set_drvdata(pci_dev, NULL); | ||
495 | |||
496 | kfree(bus); | ||
502 | } | 497 | } |
503 | 498 | ||
504 | #ifdef CONFIG_PM | 499 | #ifdef CONFIG_PM |
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 507559361d87..dbf501ca317f 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c | |||
@@ -464,6 +464,15 @@ static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg) | |||
464 | int wcid, ack, pid; | 464 | int wcid, ack, pid; |
465 | int tx_wcid, tx_ack, tx_pid; | 465 | int tx_wcid, tx_ack, tx_pid; |
466 | 466 | ||
467 | if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || | ||
468 | !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) { | ||
469 | WARNING(entry->queue->rt2x00dev, | ||
470 | "Data pending for entry %u in queue %u\n", | ||
471 | entry->entry_idx, entry->queue->qid); | ||
472 | cond_resched(); | ||
473 | return false; | ||
474 | } | ||
475 | |||
467 | wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID); | 476 | wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID); |
468 | ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED); | 477 | ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED); |
469 | pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE); | 478 | pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE); |
@@ -529,12 +538,11 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev) | |||
529 | entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); | 538 | entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); |
530 | if (rt2800usb_txdone_entry_check(entry, reg)) | 539 | if (rt2800usb_txdone_entry_check(entry, reg)) |
531 | break; | 540 | break; |
541 | entry = NULL; | ||
532 | } | 542 | } |
533 | 543 | ||
534 | if (!entry || rt2x00queue_empty(queue)) | 544 | if (entry) |
535 | break; | 545 | rt2800_txdone_entry(entry, reg); |
536 | |||
537 | rt2800_txdone_entry(entry, reg); | ||
538 | } | 546 | } |
539 | } | 547 | } |
540 | 548 | ||
@@ -558,8 +566,10 @@ static void rt2800usb_work_txdone(struct work_struct *work) | |||
558 | while (!rt2x00queue_empty(queue)) { | 566 | while (!rt2x00queue_empty(queue)) { |
559 | entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); | 567 | entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); |
560 | 568 | ||
561 | if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) | 569 | if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || |
570 | !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) | ||
562 | break; | 571 | break; |
572 | |||
563 | if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) | 573 | if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) |
564 | rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); | 574 | rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); |
565 | else if (rt2x00queue_status_timeout(entry)) | 575 | else if (rt2x00queue_status_timeout(entry)) |
@@ -921,6 +931,8 @@ static struct usb_device_id rt2800usb_device_table[] = { | |||
921 | { USB_DEVICE(0x07d1, 0x3c16) }, | 931 | { USB_DEVICE(0x07d1, 0x3c16) }, |
922 | /* Draytek */ | 932 | /* Draytek */ |
923 | { USB_DEVICE(0x07fa, 0x7712) }, | 933 | { USB_DEVICE(0x07fa, 0x7712) }, |
934 | /* DVICO */ | ||
935 | { USB_DEVICE(0x0fe9, 0xb307) }, | ||
924 | /* Edimax */ | 936 | /* Edimax */ |
925 | { USB_DEVICE(0x7392, 0x7711) }, | 937 | { USB_DEVICE(0x7392, 0x7711) }, |
926 | { USB_DEVICE(0x7392, 0x7717) }, | 938 | { USB_DEVICE(0x7392, 0x7717) }, |
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index b6b4542c2460..7fbb55c9da82 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c | |||
@@ -262,23 +262,20 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb) | |||
262 | struct queue_entry *entry = (struct queue_entry *)urb->context; | 262 | struct queue_entry *entry = (struct queue_entry *)urb->context; |
263 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; | 263 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; |
264 | 264 | ||
265 | if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) | 265 | if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) |
266 | return; | 266 | return; |
267 | |||
268 | if (rt2x00dev->ops->lib->tx_dma_done) | ||
269 | rt2x00dev->ops->lib->tx_dma_done(entry); | ||
270 | |||
271 | /* | ||
272 | * Report the frame as DMA done | ||
273 | */ | ||
274 | rt2x00lib_dmadone(entry); | ||
275 | |||
276 | /* | 267 | /* |
277 | * Check if the frame was correctly uploaded | 268 | * Check if the frame was correctly uploaded |
278 | */ | 269 | */ |
279 | if (urb->status) | 270 | if (urb->status) |
280 | set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); | 271 | set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); |
272 | /* | ||
273 | * Report the frame as DMA done | ||
274 | */ | ||
275 | rt2x00lib_dmadone(entry); | ||
281 | 276 | ||
277 | if (rt2x00dev->ops->lib->tx_dma_done) | ||
278 | rt2x00dev->ops->lib->tx_dma_done(entry); | ||
282 | /* | 279 | /* |
283 | * Schedule the delayed work for reading the TX status | 280 | * Schedule the delayed work for reading the TX status |
284 | * from the device. | 281 | * from the device. |
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index 6a93939f44e8..0baeb894f093 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c | |||
@@ -2420,6 +2420,7 @@ static struct usb_device_id rt73usb_device_table[] = { | |||
2420 | /* Buffalo */ | 2420 | /* Buffalo */ |
2421 | { USB_DEVICE(0x0411, 0x00d8) }, | 2421 | { USB_DEVICE(0x0411, 0x00d8) }, |
2422 | { USB_DEVICE(0x0411, 0x00d9) }, | 2422 | { USB_DEVICE(0x0411, 0x00d9) }, |
2423 | { USB_DEVICE(0x0411, 0x00e6) }, | ||
2423 | { USB_DEVICE(0x0411, 0x00f4) }, | 2424 | { USB_DEVICE(0x0411, 0x00f4) }, |
2424 | { USB_DEVICE(0x0411, 0x0116) }, | 2425 | { USB_DEVICE(0x0411, 0x0116) }, |
2425 | { USB_DEVICE(0x0411, 0x0119) }, | 2426 | { USB_DEVICE(0x0411, 0x0119) }, |
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 942f7a3969a7..ef63c0df006a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c | |||
@@ -281,6 +281,8 @@ static struct usb_device_id rtl8192c_usb_ids[] = { | |||
281 | {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)}, | 281 | {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)}, |
282 | /* 8188CE-VAU USB minCard (b/g mode only) */ | 282 | /* 8188CE-VAU USB minCard (b/g mode only) */ |
283 | {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)}, | 283 | {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)}, |
284 | /* 8188RU in Alfa AWUS036NHR */ | ||
285 | {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)}, | ||
284 | /* 8188 Combo for BC4 */ | 286 | /* 8188 Combo for BC4 */ |
285 | {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)}, | 287 | {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)}, |
286 | 288 | ||
@@ -303,20 +305,23 @@ static struct usb_device_id rtl8192c_usb_ids[] = { | |||
303 | {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ | 305 | {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ |
304 | /* HP - Lite-On ,8188CUS Slim Combo */ | 306 | /* HP - Lite-On ,8188CUS Slim Combo */ |
305 | {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)}, | 307 | {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)}, |
308 | {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */ | ||
306 | {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/ | 309 | {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/ |
307 | {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/ | 310 | {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/ |
308 | {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/ | 311 | {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/ |
309 | {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/ | 312 | {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/ |
310 | {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/ | 313 | {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/ |
311 | {RTL_USB_DEVICE(0x3358, 0x13d3, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/ | 314 | {RTL_USB_DEVICE(0x13d3, 0x3358, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/ |
312 | /* Russian customer -Azwave (8188CE-VAU b/g mode only) */ | 315 | /* Russian customer -Azwave (8188CE-VAU b/g mode only) */ |
313 | {RTL_USB_DEVICE(0x3359, 0x13d3, rtl92cu_hal_cfg)}, | 316 | {RTL_USB_DEVICE(0x13d3, 0x3359, rtl92cu_hal_cfg)}, |
317 | {RTL_USB_DEVICE(0x4855, 0x0090, rtl92cu_hal_cfg)}, /* Feixun */ | ||
318 | {RTL_USB_DEVICE(0x4855, 0x0091, rtl92cu_hal_cfg)}, /* NetweeN-Feixun */ | ||
319 | {RTL_USB_DEVICE(0x9846, 0x9041, rtl92cu_hal_cfg)}, /* Netgear Cameo */ | ||
314 | 320 | ||
315 | /****** 8192CU ********/ | 321 | /****** 8192CU ********/ |
316 | {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/ | 322 | {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/ |
317 | {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/ | 323 | {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/ |
318 | {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/ | 324 | {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/ |
319 | {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Abocom -Abocom*/ | ||
320 | {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/ | 325 | {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/ |
321 | {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ | 326 | {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ |
322 | {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ | 327 | {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ |
diff --git a/drivers/net/wireless/wl1251/acx.c b/drivers/net/wireless/wl1251/acx.c index ef8370edace7..ad87a1ac6462 100644 --- a/drivers/net/wireless/wl1251/acx.c +++ b/drivers/net/wireless/wl1251/acx.c | |||
@@ -140,8 +140,6 @@ int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth) | |||
140 | auth->sleep_auth = sleep_auth; | 140 | auth->sleep_auth = sleep_auth; |
141 | 141 | ||
142 | ret = wl1251_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); | 142 | ret = wl1251_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); |
143 | if (ret < 0) | ||
144 | return ret; | ||
145 | 143 | ||
146 | out: | 144 | out: |
147 | kfree(auth); | 145 | kfree(auth); |
@@ -681,10 +679,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl) | |||
681 | 679 | ||
682 | ret = wl1251_cmd_configure(wl, ACX_CCA_THRESHOLD, | 680 | ret = wl1251_cmd_configure(wl, ACX_CCA_THRESHOLD, |
683 | detection, sizeof(*detection)); | 681 | detection, sizeof(*detection)); |
684 | if (ret < 0) { | 682 | if (ret < 0) |
685 | wl1251_warning("failed to set cca threshold: %d", ret); | 683 | wl1251_warning("failed to set cca threshold: %d", ret); |
686 | return ret; | ||
687 | } | ||
688 | 684 | ||
689 | out: | 685 | out: |
690 | kfree(detection); | 686 | kfree(detection); |
diff --git a/drivers/net/wireless/wl1251/cmd.c b/drivers/net/wireless/wl1251/cmd.c index 81f164bc4888..d14d69d733a0 100644 --- a/drivers/net/wireless/wl1251/cmd.c +++ b/drivers/net/wireless/wl1251/cmd.c | |||
@@ -241,7 +241,7 @@ int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable) | |||
241 | if (ret < 0) { | 241 | if (ret < 0) { |
242 | wl1251_error("tx %s cmd for channel %d failed", | 242 | wl1251_error("tx %s cmd for channel %d failed", |
243 | enable ? "start" : "stop", channel); | 243 | enable ? "start" : "stop", channel); |
244 | return ret; | 244 | goto out; |
245 | } | 245 | } |
246 | 246 | ||
247 | wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d", | 247 | wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d", |
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c index 7e33f1f4f3d4..34f6ab53e519 100644 --- a/drivers/net/wireless/wl12xx/acx.c +++ b/drivers/net/wireless/wl12xx/acx.c | |||
@@ -77,8 +77,6 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth) | |||
77 | auth->sleep_auth = sleep_auth; | 77 | auth->sleep_auth = sleep_auth; |
78 | 78 | ||
79 | ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); | 79 | ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); |
80 | if (ret < 0) | ||
81 | return ret; | ||
82 | 80 | ||
83 | out: | 81 | out: |
84 | kfree(auth); | 82 | kfree(auth); |
@@ -624,10 +622,8 @@ int wl1271_acx_cca_threshold(struct wl1271 *wl) | |||
624 | 622 | ||
625 | ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, | 623 | ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, |
626 | detection, sizeof(*detection)); | 624 | detection, sizeof(*detection)); |
627 | if (ret < 0) { | 625 | if (ret < 0) |
628 | wl1271_warning("failed to set cca threshold: %d", ret); | 626 | wl1271_warning("failed to set cca threshold: %d", ret); |
629 | return ret; | ||
630 | } | ||
631 | 627 | ||
632 | out: | 628 | out: |
633 | kfree(detection); | 629 | kfree(detection); |
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c index 5d5e1ef87206..88add68bd9ac 100644 --- a/drivers/net/wireless/wl12xx/testmode.c +++ b/drivers/net/wireless/wl12xx/testmode.c | |||
@@ -139,12 +139,15 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) | |||
139 | 139 | ||
140 | if (ret < 0) { | 140 | if (ret < 0) { |
141 | wl1271_warning("testmode cmd interrogate failed: %d", ret); | 141 | wl1271_warning("testmode cmd interrogate failed: %d", ret); |
142 | kfree(cmd); | ||
142 | return ret; | 143 | return ret; |
143 | } | 144 | } |
144 | 145 | ||
145 | skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); | 146 | skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); |
146 | if (!skb) | 147 | if (!skb) { |
148 | kfree(cmd); | ||
147 | return -ENOMEM; | 149 | return -ENOMEM; |
150 | } | ||
148 | 151 | ||
149 | NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); | 152 | NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); |
150 | 153 | ||
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c index 749fdf070319..753b21aaea61 100644 --- a/drivers/pci/hotplug/pcihp_slot.c +++ b/drivers/pci/hotplug/pcihp_slot.c | |||
@@ -158,47 +158,6 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) | |||
158 | */ | 158 | */ |
159 | } | 159 | } |
160 | 160 | ||
161 | /* Program PCIE MaxPayload setting on device: ensure parent maxpayload <= device */ | ||
162 | static int pci_set_payload(struct pci_dev *dev) | ||
163 | { | ||
164 | int pos, ppos; | ||
165 | u16 pctl, psz; | ||
166 | u16 dctl, dsz, dcap, dmax; | ||
167 | struct pci_dev *parent; | ||
168 | |||
169 | parent = dev->bus->self; | ||
170 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
171 | if (!pos) | ||
172 | return 0; | ||
173 | |||
174 | /* Read Device MaxPayload capability and setting */ | ||
175 | pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &dctl); | ||
176 | pci_read_config_word(dev, pos + PCI_EXP_DEVCAP, &dcap); | ||
177 | dsz = (dctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5; | ||
178 | dmax = (dcap & PCI_EXP_DEVCAP_PAYLOAD); | ||
179 | |||
180 | /* Read Parent MaxPayload setting */ | ||
181 | ppos = pci_find_capability(parent, PCI_CAP_ID_EXP); | ||
182 | if (!ppos) | ||
183 | return 0; | ||
184 | pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl); | ||
185 | psz = (pctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5; | ||
186 | |||
187 | /* If parent payload > device max payload -> error | ||
188 | * If parent payload > device payload -> set speed | ||
189 | * If parent payload <= device payload -> do nothing | ||
190 | */ | ||
191 | if (psz > dmax) | ||
192 | return -1; | ||
193 | else if (psz > dsz) { | ||
194 | dev_info(&dev->dev, "Setting MaxPayload to %d\n", 128 << psz); | ||
195 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, | ||
196 | (dctl & ~PCI_EXP_DEVCTL_PAYLOAD) + | ||
197 | (psz << 5)); | ||
198 | } | ||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | void pci_configure_slot(struct pci_dev *dev) | 161 | void pci_configure_slot(struct pci_dev *dev) |
203 | { | 162 | { |
204 | struct pci_dev *cdev; | 163 | struct pci_dev *cdev; |
@@ -210,9 +169,7 @@ void pci_configure_slot(struct pci_dev *dev) | |||
210 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) | 169 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) |
211 | return; | 170 | return; |
212 | 171 | ||
213 | ret = pci_set_payload(dev); | 172 | pcie_bus_configure_settings(dev->bus, dev->bus->self->pcie_mpss); |
214 | if (ret) | ||
215 | dev_warn(&dev->dev, "could not set device max payload\n"); | ||
216 | 173 | ||
217 | memset(&hpp, 0, sizeof(hpp)); | 174 | memset(&hpp, 0, sizeof(hpp)); |
218 | ret = pci_get_hp_params(dev, &hpp); | 175 | ret = pci_get_hp_params(dev, &hpp); |
diff --git a/drivers/pci/of.c b/drivers/pci/of.c index c94d37ec55c8..f0929934bb7a 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c | |||
@@ -55,7 +55,7 @@ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus) | |||
55 | */ | 55 | */ |
56 | if (bus->bridge->of_node) | 56 | if (bus->bridge->of_node) |
57 | return of_node_get(bus->bridge->of_node); | 57 | return of_node_get(bus->bridge->of_node); |
58 | if (bus->bridge->parent->of_node) | 58 | if (bus->bridge->parent && bus->bridge->parent->of_node) |
59 | return of_node_get(bus->bridge->parent->of_node); | 59 | return of_node_get(bus->bridge->parent->of_node); |
60 | return NULL; | 60 | return NULL; |
61 | } | 61 | } |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 08a95b369d85..0ce67423a0a3 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -77,6 +77,8 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; | |||
77 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; | 77 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; |
78 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; | 78 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; |
79 | 79 | ||
80 | enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE; | ||
81 | |||
80 | /* | 82 | /* |
81 | * The default CLS is used if arch didn't set CLS explicitly and not | 83 | * The default CLS is used if arch didn't set CLS explicitly and not |
82 | * all pci devices agree on the same value. Arch can override either | 84 | * all pci devices agree on the same value. Arch can override either |
@@ -3223,6 +3225,67 @@ out: | |||
3223 | EXPORT_SYMBOL(pcie_set_readrq); | 3225 | EXPORT_SYMBOL(pcie_set_readrq); |
3224 | 3226 | ||
3225 | /** | 3227 | /** |
3228 | * pcie_get_mps - get PCI Express maximum payload size | ||
3229 | * @dev: PCI device to query | ||
3230 | * | ||
3231 | * Returns maximum payload size in bytes | ||
3232 | * or appropriate error value. | ||
3233 | */ | ||
3234 | int pcie_get_mps(struct pci_dev *dev) | ||
3235 | { | ||
3236 | int ret, cap; | ||
3237 | u16 ctl; | ||
3238 | |||
3239 | cap = pci_pcie_cap(dev); | ||
3240 | if (!cap) | ||
3241 | return -EINVAL; | ||
3242 | |||
3243 | ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); | ||
3244 | if (!ret) | ||
3245 | ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); | ||
3246 | |||
3247 | return ret; | ||
3248 | } | ||
3249 | |||
3250 | /** | ||
3251 | * pcie_set_mps - set PCI Express maximum payload size | ||
3252 | * @dev: PCI device to query | ||
3253 | * @mps: maximum payload size in bytes | ||
3254 | * valid values are 128, 256, 512, 1024, 2048, 4096 | ||
3255 | * | ||
3256 | * If possible sets maximum payload size | ||
3257 | */ | ||
3258 | int pcie_set_mps(struct pci_dev *dev, int mps) | ||
3259 | { | ||
3260 | int cap, err = -EINVAL; | ||
3261 | u16 ctl, v; | ||
3262 | |||
3263 | if (mps < 128 || mps > 4096 || !is_power_of_2(mps)) | ||
3264 | goto out; | ||
3265 | |||
3266 | v = ffs(mps) - 8; | ||
3267 | if (v > dev->pcie_mpss) | ||
3268 | goto out; | ||
3269 | v <<= 5; | ||
3270 | |||
3271 | cap = pci_pcie_cap(dev); | ||
3272 | if (!cap) | ||
3273 | goto out; | ||
3274 | |||
3275 | err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); | ||
3276 | if (err) | ||
3277 | goto out; | ||
3278 | |||
3279 | if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) { | ||
3280 | ctl &= ~PCI_EXP_DEVCTL_PAYLOAD; | ||
3281 | ctl |= v; | ||
3282 | err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl); | ||
3283 | } | ||
3284 | out: | ||
3285 | return err; | ||
3286 | } | ||
3287 | |||
3288 | /** | ||
3226 | * pci_select_bars - Make BAR mask from the type of resource | 3289 | * pci_select_bars - Make BAR mask from the type of resource |
3227 | * @dev: the PCI device for which BAR mask is made | 3290 | * @dev: the PCI device for which BAR mask is made |
3228 | * @flags: resource type mask to be selected | 3291 | * @flags: resource type mask to be selected |
@@ -3505,6 +3568,10 @@ static int __init pci_setup(char *str) | |||
3505 | pci_hotplug_io_size = memparse(str + 9, &str); | 3568 | pci_hotplug_io_size = memparse(str + 9, &str); |
3506 | } else if (!strncmp(str, "hpmemsize=", 10)) { | 3569 | } else if (!strncmp(str, "hpmemsize=", 10)) { |
3507 | pci_hotplug_mem_size = memparse(str + 10, &str); | 3570 | pci_hotplug_mem_size = memparse(str + 10, &str); |
3571 | } else if (!strncmp(str, "pcie_bus_safe", 13)) { | ||
3572 | pcie_bus_config = PCIE_BUS_SAFE; | ||
3573 | } else if (!strncmp(str, "pcie_bus_perf", 13)) { | ||
3574 | pcie_bus_config = PCIE_BUS_PERFORMANCE; | ||
3508 | } else { | 3575 | } else { |
3509 | printk(KERN_ERR "PCI: Unknown option `%s'\n", | 3576 | printk(KERN_ERR "PCI: Unknown option `%s'\n", |
3510 | str); | 3577 | str); |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index c8cee764b0de..b74084e9ca12 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -283,6 +283,8 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) | |||
283 | 283 | ||
284 | #endif /* CONFIG_PCI_IOV */ | 284 | #endif /* CONFIG_PCI_IOV */ |
285 | 285 | ||
286 | extern unsigned long pci_cardbus_resource_alignment(struct resource *); | ||
287 | |||
286 | static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, | 288 | static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, |
287 | struct resource *res) | 289 | struct resource *res) |
288 | { | 290 | { |
@@ -292,6 +294,8 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, | |||
292 | if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) | 294 | if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) |
293 | return pci_sriov_resource_alignment(dev, resno); | 295 | return pci_sriov_resource_alignment(dev, resno); |
294 | #endif | 296 | #endif |
297 | if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS) | ||
298 | return pci_cardbus_resource_alignment(res); | ||
295 | return resource_alignment(res); | 299 | return resource_alignment(res); |
296 | } | 300 | } |
297 | 301 | ||
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 795c9026d55f..8473727b29fa 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -856,6 +856,8 @@ void set_pcie_port_type(struct pci_dev *pdev) | |||
856 | pdev->pcie_cap = pos; | 856 | pdev->pcie_cap = pos; |
857 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); | 857 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); |
858 | pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; | 858 | pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; |
859 | pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); | ||
860 | pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; | ||
859 | } | 861 | } |
860 | 862 | ||
861 | void set_pcie_hotplug_bridge(struct pci_dev *pdev) | 863 | void set_pcie_hotplug_bridge(struct pci_dev *pdev) |
@@ -1326,6 +1328,150 @@ int pci_scan_slot(struct pci_bus *bus, int devfn) | |||
1326 | return nr; | 1328 | return nr; |
1327 | } | 1329 | } |
1328 | 1330 | ||
1331 | static int pcie_find_smpss(struct pci_dev *dev, void *data) | ||
1332 | { | ||
1333 | u8 *smpss = data; | ||
1334 | |||
1335 | if (!pci_is_pcie(dev)) | ||
1336 | return 0; | ||
1337 | |||
1338 | /* For PCIE hotplug enabled slots not connected directly to a | ||
1339 | * PCI-E root port, there can be problems when hotplugging | ||
1340 | * devices. This is due to the possibility of hotplugging a | ||
1341 | * device into the fabric with a smaller MPS that the devices | ||
1342 | * currently running have configured. Modifying the MPS on the | ||
1343 | * running devices could cause a fatal bus error due to an | ||
1344 | * incoming frame being larger than the newly configured MPS. | ||
1345 | * To work around this, the MPS for the entire fabric must be | ||
1346 | * set to the minimum size. Any devices hotplugged into this | ||
1347 | * fabric will have the minimum MPS set. If the PCI hotplug | ||
1348 | * slot is directly connected to the root port and there are not | ||
1349 | * other devices on the fabric (which seems to be the most | ||
1350 | * common case), then this is not an issue and MPS discovery | ||
1351 | * will occur as normal. | ||
1352 | */ | ||
1353 | if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || | ||
1354 | dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)) | ||
1355 | *smpss = 0; | ||
1356 | |||
1357 | if (*smpss > dev->pcie_mpss) | ||
1358 | *smpss = dev->pcie_mpss; | ||
1359 | |||
1360 | return 0; | ||
1361 | } | ||
1362 | |||
1363 | static void pcie_write_mps(struct pci_dev *dev, int mps) | ||
1364 | { | ||
1365 | int rc, dev_mpss; | ||
1366 | |||
1367 | dev_mpss = 128 << dev->pcie_mpss; | ||
1368 | |||
1369 | if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { | ||
1370 | if (dev->bus->self) { | ||
1371 | dev_dbg(&dev->bus->dev, "Bus MPSS %d\n", | ||
1372 | 128 << dev->bus->self->pcie_mpss); | ||
1373 | |||
1374 | /* For "MPS Force Max", the assumption is made that | ||
1375 | * downstream communication will never be larger than | ||
1376 | * the MRRS. So, the MPS only needs to be configured | ||
1377 | * for the upstream communication. This being the case, | ||
1378 | * walk from the top down and set the MPS of the child | ||
1379 | * to that of the parent bus. | ||
1380 | */ | ||
1381 | mps = 128 << dev->bus->self->pcie_mpss; | ||
1382 | if (mps > dev_mpss) | ||
1383 | dev_warn(&dev->dev, "MPS configured higher than" | ||
1384 | " maximum supported by the device. If" | ||
1385 | " a bus issue occurs, try running with" | ||
1386 | " pci=pcie_bus_safe.\n"); | ||
1387 | } | ||
1388 | |||
1389 | dev->pcie_mpss = ffs(mps) - 8; | ||
1390 | } | ||
1391 | |||
1392 | rc = pcie_set_mps(dev, mps); | ||
1393 | if (rc) | ||
1394 | dev_err(&dev->dev, "Failed attempting to set the MPS\n"); | ||
1395 | } | ||
1396 | |||
1397 | static void pcie_write_mrrs(struct pci_dev *dev, int mps) | ||
1398 | { | ||
1399 | int rc, mrrs; | ||
1400 | |||
1401 | if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { | ||
1402 | int dev_mpss = 128 << dev->pcie_mpss; | ||
1403 | |||
1404 | /* For Max performance, the MRRS must be set to the largest | ||
1405 | * supported value. However, it cannot be configured larger | ||
1406 | * than the MPS the device or the bus can support. This assumes | ||
1407 | * that the largest MRRS available on the device cannot be | ||
1408 | * smaller than the device MPSS. | ||
1409 | */ | ||
1410 | mrrs = mps < dev_mpss ? mps : dev_mpss; | ||
1411 | } else | ||
1412 | /* In the "safe" case, configure the MRRS for fairness on the | ||
1413 | * bus by making all devices have the same size | ||
1414 | */ | ||
1415 | mrrs = mps; | ||
1416 | |||
1417 | |||
1418 | /* MRRS is a R/W register. Invalid values can be written, but a | ||
1419 | * subsiquent read will verify if the value is acceptable or not. | ||
1420 | * If the MRRS value provided is not acceptable (e.g., too large), | ||
1421 | * shrink the value until it is acceptable to the HW. | ||
1422 | */ | ||
1423 | while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { | ||
1424 | rc = pcie_set_readrq(dev, mrrs); | ||
1425 | if (rc) | ||
1426 | dev_err(&dev->dev, "Failed attempting to set the MRRS\n"); | ||
1427 | |||
1428 | mrrs /= 2; | ||
1429 | } | ||
1430 | } | ||
1431 | |||
1432 | static int pcie_bus_configure_set(struct pci_dev *dev, void *data) | ||
1433 | { | ||
1434 | int mps = 128 << *(u8 *)data; | ||
1435 | |||
1436 | if (!pci_is_pcie(dev)) | ||
1437 | return 0; | ||
1438 | |||
1439 | dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", | ||
1440 | pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev)); | ||
1441 | |||
1442 | pcie_write_mps(dev, mps); | ||
1443 | pcie_write_mrrs(dev, mps); | ||
1444 | |||
1445 | dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", | ||
1446 | pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev)); | ||
1447 | |||
1448 | return 0; | ||
1449 | } | ||
1450 | |||
1451 | /* pcie_bus_configure_mps requires that pci_walk_bus work in a top-down, | ||
1452 | * parents then children fashion. If this changes, then this code will not | ||
1453 | * work as designed. | ||
1454 | */ | ||
1455 | void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) | ||
1456 | { | ||
1457 | u8 smpss = mpss; | ||
1458 | |||
1459 | if (!bus->self) | ||
1460 | return; | ||
1461 | |||
1462 | if (!pci_is_pcie(bus->self)) | ||
1463 | return; | ||
1464 | |||
1465 | if (pcie_bus_config == PCIE_BUS_SAFE) { | ||
1466 | pcie_find_smpss(bus->self, &smpss); | ||
1467 | pci_walk_bus(bus, pcie_find_smpss, &smpss); | ||
1468 | } | ||
1469 | |||
1470 | pcie_bus_configure_set(bus->self, &smpss); | ||
1471 | pci_walk_bus(bus, pcie_bus_configure_set, &smpss); | ||
1472 | } | ||
1473 | EXPORT_SYMBOL_GPL(pcie_bus_configure_settings); | ||
1474 | |||
1329 | unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) | 1475 | unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) |
1330 | { | 1476 | { |
1331 | unsigned int devfn, pass, max = bus->secondary; | 1477 | unsigned int devfn, pass, max = bus->secondary; |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 8a1d3c7863a8..784da9d36029 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -34,6 +34,7 @@ struct resource_list_x { | |||
34 | resource_size_t start; | 34 | resource_size_t start; |
35 | resource_size_t end; | 35 | resource_size_t end; |
36 | resource_size_t add_size; | 36 | resource_size_t add_size; |
37 | resource_size_t min_align; | ||
37 | unsigned long flags; | 38 | unsigned long flags; |
38 | }; | 39 | }; |
39 | 40 | ||
@@ -65,7 +66,7 @@ void pci_realloc(void) | |||
65 | */ | 66 | */ |
66 | static void add_to_list(struct resource_list_x *head, | 67 | static void add_to_list(struct resource_list_x *head, |
67 | struct pci_dev *dev, struct resource *res, | 68 | struct pci_dev *dev, struct resource *res, |
68 | resource_size_t add_size) | 69 | resource_size_t add_size, resource_size_t min_align) |
69 | { | 70 | { |
70 | struct resource_list_x *list = head; | 71 | struct resource_list_x *list = head; |
71 | struct resource_list_x *ln = list->next; | 72 | struct resource_list_x *ln = list->next; |
@@ -84,13 +85,16 @@ static void add_to_list(struct resource_list_x *head, | |||
84 | tmp->end = res->end; | 85 | tmp->end = res->end; |
85 | tmp->flags = res->flags; | 86 | tmp->flags = res->flags; |
86 | tmp->add_size = add_size; | 87 | tmp->add_size = add_size; |
88 | tmp->min_align = min_align; | ||
87 | list->next = tmp; | 89 | list->next = tmp; |
88 | } | 90 | } |
89 | 91 | ||
90 | static void add_to_failed_list(struct resource_list_x *head, | 92 | static void add_to_failed_list(struct resource_list_x *head, |
91 | struct pci_dev *dev, struct resource *res) | 93 | struct pci_dev *dev, struct resource *res) |
92 | { | 94 | { |
93 | add_to_list(head, dev, res, 0); | 95 | add_to_list(head, dev, res, |
96 | 0 /* dont care */, | ||
97 | 0 /* dont care */); | ||
94 | } | 98 | } |
95 | 99 | ||
96 | static void __dev_sort_resources(struct pci_dev *dev, | 100 | static void __dev_sort_resources(struct pci_dev *dev, |
@@ -121,18 +125,18 @@ static inline void reset_resource(struct resource *res) | |||
121 | } | 125 | } |
122 | 126 | ||
123 | /** | 127 | /** |
124 | * adjust_resources_sorted() - satisfy any additional resource requests | 128 | * reassign_resources_sorted() - satisfy any additional resource requests |
125 | * | 129 | * |
126 | * @add_head : head of the list tracking requests requiring additional | 130 | * @realloc_head : head of the list tracking requests requiring additional |
127 | * resources | 131 | * resources |
128 | * @head : head of the list tracking requests with allocated | 132 | * @head : head of the list tracking requests with allocated |
129 | * resources | 133 | * resources |
130 | * | 134 | * |
131 | * Walk through each element of the add_head and try to procure | 135 | * Walk through each element of the realloc_head and try to procure |
132 | * additional resources for the element, provided the element | 136 | * additional resources for the element, provided the element |
133 | * is in the head list. | 137 | * is in the head list. |
134 | */ | 138 | */ |
135 | static void adjust_resources_sorted(struct resource_list_x *add_head, | 139 | static void reassign_resources_sorted(struct resource_list_x *realloc_head, |
136 | struct resource_list *head) | 140 | struct resource_list *head) |
137 | { | 141 | { |
138 | struct resource *res; | 142 | struct resource *res; |
@@ -141,8 +145,8 @@ static void adjust_resources_sorted(struct resource_list_x *add_head, | |||
141 | resource_size_t add_size; | 145 | resource_size_t add_size; |
142 | int idx; | 146 | int idx; |
143 | 147 | ||
144 | prev = add_head; | 148 | prev = realloc_head; |
145 | for (list = add_head->next; list;) { | 149 | for (list = realloc_head->next; list;) { |
146 | res = list->res; | 150 | res = list->res; |
147 | /* skip resource that has been reset */ | 151 | /* skip resource that has been reset */ |
148 | if (!res->flags) | 152 | if (!res->flags) |
@@ -159,13 +163,17 @@ static void adjust_resources_sorted(struct resource_list_x *add_head, | |||
159 | 163 | ||
160 | idx = res - &list->dev->resource[0]; | 164 | idx = res - &list->dev->resource[0]; |
161 | add_size=list->add_size; | 165 | add_size=list->add_size; |
162 | if (!resource_size(res) && add_size) { | 166 | if (!resource_size(res)) { |
163 | res->end = res->start + add_size - 1; | 167 | res->start = list->start; |
164 | if(pci_assign_resource(list->dev, idx)) | 168 | res->end = res->start + add_size - 1; |
169 | if(pci_assign_resource(list->dev, idx)) | ||
165 | reset_resource(res); | 170 | reset_resource(res); |
166 | } else if (add_size) { | 171 | } else { |
167 | adjust_resource(res, res->start, | 172 | resource_size_t align = list->min_align; |
168 | resource_size(res) + add_size); | 173 | res->flags |= list->flags & (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN); |
174 | if (pci_reassign_resource(list->dev, idx, add_size, align)) | ||
175 | dev_printk(KERN_DEBUG, &list->dev->dev, "failed to add optional resources res=%pR\n", | ||
176 | res); | ||
169 | } | 177 | } |
170 | out: | 178 | out: |
171 | tmp = list; | 179 | tmp = list; |
@@ -210,16 +218,16 @@ static void assign_requested_resources_sorted(struct resource_list *head, | |||
210 | } | 218 | } |
211 | 219 | ||
212 | static void __assign_resources_sorted(struct resource_list *head, | 220 | static void __assign_resources_sorted(struct resource_list *head, |
213 | struct resource_list_x *add_head, | 221 | struct resource_list_x *realloc_head, |
214 | struct resource_list_x *fail_head) | 222 | struct resource_list_x *fail_head) |
215 | { | 223 | { |
216 | /* Satisfy the must-have resource requests */ | 224 | /* Satisfy the must-have resource requests */ |
217 | assign_requested_resources_sorted(head, fail_head); | 225 | assign_requested_resources_sorted(head, fail_head); |
218 | 226 | ||
219 | /* Try to satisfy any additional nice-to-have resource | 227 | /* Try to satisfy any additional optional resource |
220 | requests */ | 228 | requests */ |
221 | if (add_head) | 229 | if (realloc_head) |
222 | adjust_resources_sorted(add_head, head); | 230 | reassign_resources_sorted(realloc_head, head); |
223 | free_list(resource_list, head); | 231 | free_list(resource_list, head); |
224 | } | 232 | } |
225 | 233 | ||
@@ -235,7 +243,7 @@ static void pdev_assign_resources_sorted(struct pci_dev *dev, | |||
235 | } | 243 | } |
236 | 244 | ||
237 | static void pbus_assign_resources_sorted(const struct pci_bus *bus, | 245 | static void pbus_assign_resources_sorted(const struct pci_bus *bus, |
238 | struct resource_list_x *add_head, | 246 | struct resource_list_x *realloc_head, |
239 | struct resource_list_x *fail_head) | 247 | struct resource_list_x *fail_head) |
240 | { | 248 | { |
241 | struct pci_dev *dev; | 249 | struct pci_dev *dev; |
@@ -245,7 +253,7 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus, | |||
245 | list_for_each_entry(dev, &bus->devices, bus_list) | 253 | list_for_each_entry(dev, &bus->devices, bus_list) |
246 | __dev_sort_resources(dev, &head); | 254 | __dev_sort_resources(dev, &head); |
247 | 255 | ||
248 | __assign_resources_sorted(&head, add_head, fail_head); | 256 | __assign_resources_sorted(&head, realloc_head, fail_head); |
249 | } | 257 | } |
250 | 258 | ||
251 | void pci_setup_cardbus(struct pci_bus *bus) | 259 | void pci_setup_cardbus(struct pci_bus *bus) |
@@ -540,13 +548,27 @@ static resource_size_t calculate_memsize(resource_size_t size, | |||
540 | return size; | 548 | return size; |
541 | } | 549 | } |
542 | 550 | ||
551 | static resource_size_t get_res_add_size(struct resource_list_x *realloc_head, | ||
552 | struct resource *res) | ||
553 | { | ||
554 | struct resource_list_x *list; | ||
555 | |||
556 | /* check if it is in realloc_head list */ | ||
557 | for (list = realloc_head->next; list && list->res != res; | ||
558 | list = list->next); | ||
559 | if (list) | ||
560 | return list->add_size; | ||
561 | |||
562 | return 0; | ||
563 | } | ||
564 | |||
543 | /** | 565 | /** |
544 | * pbus_size_io() - size the io window of a given bus | 566 | * pbus_size_io() - size the io window of a given bus |
545 | * | 567 | * |
546 | * @bus : the bus | 568 | * @bus : the bus |
547 | * @min_size : the minimum io window that must to be allocated | 569 | * @min_size : the minimum io window that must to be allocated |
548 | * @add_size : additional optional io window | 570 | * @add_size : additional optional io window |
549 | * @add_head : track the additional io window on this list | 571 | * @realloc_head : track the additional io window on this list |
550 | * | 572 | * |
551 | * Sizing the IO windows of the PCI-PCI bridge is trivial, | 573 | * Sizing the IO windows of the PCI-PCI bridge is trivial, |
552 | * since these windows have 4K granularity and the IO ranges | 574 | * since these windows have 4K granularity and the IO ranges |
@@ -554,11 +576,12 @@ static resource_size_t calculate_memsize(resource_size_t size, | |||
554 | * We must be careful with the ISA aliasing though. | 576 | * We must be careful with the ISA aliasing though. |
555 | */ | 577 | */ |
556 | static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | 578 | static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, |
557 | resource_size_t add_size, struct resource_list_x *add_head) | 579 | resource_size_t add_size, struct resource_list_x *realloc_head) |
558 | { | 580 | { |
559 | struct pci_dev *dev; | 581 | struct pci_dev *dev; |
560 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); | 582 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); |
561 | unsigned long size = 0, size0 = 0, size1 = 0; | 583 | unsigned long size = 0, size0 = 0, size1 = 0; |
584 | resource_size_t children_add_size = 0; | ||
562 | 585 | ||
563 | if (!b_res) | 586 | if (!b_res) |
564 | return; | 587 | return; |
@@ -579,11 +602,16 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | |||
579 | size += r_size; | 602 | size += r_size; |
580 | else | 603 | else |
581 | size1 += r_size; | 604 | size1 += r_size; |
605 | |||
606 | if (realloc_head) | ||
607 | children_add_size += get_res_add_size(realloc_head, r); | ||
582 | } | 608 | } |
583 | } | 609 | } |
584 | size0 = calculate_iosize(size, min_size, size1, | 610 | size0 = calculate_iosize(size, min_size, size1, |
585 | resource_size(b_res), 4096); | 611 | resource_size(b_res), 4096); |
586 | size1 = (!add_head || (add_head && !add_size)) ? size0 : | 612 | if (children_add_size > add_size) |
613 | add_size = children_add_size; | ||
614 | size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 : | ||
587 | calculate_iosize(size, min_size+add_size, size1, | 615 | calculate_iosize(size, min_size+add_size, size1, |
588 | resource_size(b_res), 4096); | 616 | resource_size(b_res), 4096); |
589 | if (!size0 && !size1) { | 617 | if (!size0 && !size1) { |
@@ -598,8 +626,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | |||
598 | b_res->start = 4096; | 626 | b_res->start = 4096; |
599 | b_res->end = b_res->start + size0 - 1; | 627 | b_res->end = b_res->start + size0 - 1; |
600 | b_res->flags |= IORESOURCE_STARTALIGN; | 628 | b_res->flags |= IORESOURCE_STARTALIGN; |
601 | if (size1 > size0 && add_head) | 629 | if (size1 > size0 && realloc_head) |
602 | add_to_list(add_head, bus->self, b_res, size1-size0); | 630 | add_to_list(realloc_head, bus->self, b_res, size1-size0, 4096); |
603 | } | 631 | } |
604 | 632 | ||
605 | /** | 633 | /** |
@@ -608,7 +636,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | |||
608 | * @bus : the bus | 636 | * @bus : the bus |
609 | * @min_size : the minimum memory window that must to be allocated | 637 | * @min_size : the minimum memory window that must to be allocated |
610 | * @add_size : additional optional memory window | 638 | * @add_size : additional optional memory window |
611 | * @add_head : track the additional memory window on this list | 639 | * @realloc_head : track the additional memory window on this list |
612 | * | 640 | * |
613 | * Calculate the size of the bus and minimal alignment which | 641 | * Calculate the size of the bus and minimal alignment which |
614 | * guarantees that all child resources fit in this size. | 642 | * guarantees that all child resources fit in this size. |
@@ -616,7 +644,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | |||
616 | static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | 644 | static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, |
617 | unsigned long type, resource_size_t min_size, | 645 | unsigned long type, resource_size_t min_size, |
618 | resource_size_t add_size, | 646 | resource_size_t add_size, |
619 | struct resource_list_x *add_head) | 647 | struct resource_list_x *realloc_head) |
620 | { | 648 | { |
621 | struct pci_dev *dev; | 649 | struct pci_dev *dev; |
622 | resource_size_t min_align, align, size, size0, size1; | 650 | resource_size_t min_align, align, size, size0, size1; |
@@ -624,6 +652,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
624 | int order, max_order; | 652 | int order, max_order; |
625 | struct resource *b_res = find_free_bus_resource(bus, type); | 653 | struct resource *b_res = find_free_bus_resource(bus, type); |
626 | unsigned int mem64_mask = 0; | 654 | unsigned int mem64_mask = 0; |
655 | resource_size_t children_add_size = 0; | ||
627 | 656 | ||
628 | if (!b_res) | 657 | if (!b_res) |
629 | return 0; | 658 | return 0; |
@@ -645,6 +674,16 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
645 | if (r->parent || (r->flags & mask) != type) | 674 | if (r->parent || (r->flags & mask) != type) |
646 | continue; | 675 | continue; |
647 | r_size = resource_size(r); | 676 | r_size = resource_size(r); |
677 | #ifdef CONFIG_PCI_IOV | ||
678 | /* put SRIOV requested res to the optional list */ | ||
679 | if (realloc_head && i >= PCI_IOV_RESOURCES && | ||
680 | i <= PCI_IOV_RESOURCE_END) { | ||
681 | r->end = r->start - 1; | ||
682 | add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */); | ||
683 | children_add_size += r_size; | ||
684 | continue; | ||
685 | } | ||
686 | #endif | ||
648 | /* For bridges size != alignment */ | 687 | /* For bridges size != alignment */ |
649 | align = pci_resource_alignment(dev, r); | 688 | align = pci_resource_alignment(dev, r); |
650 | order = __ffs(align) - 20; | 689 | order = __ffs(align) - 20; |
@@ -665,6 +704,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
665 | if (order > max_order) | 704 | if (order > max_order) |
666 | max_order = order; | 705 | max_order = order; |
667 | mem64_mask &= r->flags & IORESOURCE_MEM_64; | 706 | mem64_mask &= r->flags & IORESOURCE_MEM_64; |
707 | |||
708 | if (realloc_head) | ||
709 | children_add_size += get_res_add_size(realloc_head, r); | ||
668 | } | 710 | } |
669 | } | 711 | } |
670 | align = 0; | 712 | align = 0; |
@@ -681,7 +723,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
681 | align += aligns[order]; | 723 | align += aligns[order]; |
682 | } | 724 | } |
683 | size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); | 725 | size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); |
684 | size1 = (!add_head || (add_head && !add_size)) ? size0 : | 726 | if (children_add_size > add_size) |
727 | add_size = children_add_size; | ||
728 | size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 : | ||
685 | calculate_memsize(size, min_size+add_size, 0, | 729 | calculate_memsize(size, min_size+add_size, 0, |
686 | resource_size(b_res), min_align); | 730 | resource_size(b_res), min_align); |
687 | if (!size0 && !size1) { | 731 | if (!size0 && !size1) { |
@@ -695,12 +739,22 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
695 | b_res->start = min_align; | 739 | b_res->start = min_align; |
696 | b_res->end = size0 + min_align - 1; | 740 | b_res->end = size0 + min_align - 1; |
697 | b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask; | 741 | b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask; |
698 | if (size1 > size0 && add_head) | 742 | if (size1 > size0 && realloc_head) |
699 | add_to_list(add_head, bus->self, b_res, size1-size0); | 743 | add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align); |
700 | return 1; | 744 | return 1; |
701 | } | 745 | } |
702 | 746 | ||
703 | static void pci_bus_size_cardbus(struct pci_bus *bus) | 747 | unsigned long pci_cardbus_resource_alignment(struct resource *res) |
748 | { | ||
749 | if (res->flags & IORESOURCE_IO) | ||
750 | return pci_cardbus_io_size; | ||
751 | if (res->flags & IORESOURCE_MEM) | ||
752 | return pci_cardbus_mem_size; | ||
753 | return 0; | ||
754 | } | ||
755 | |||
756 | static void pci_bus_size_cardbus(struct pci_bus *bus, | ||
757 | struct resource_list_x *realloc_head) | ||
704 | { | 758 | { |
705 | struct pci_dev *bridge = bus->self; | 759 | struct pci_dev *bridge = bus->self; |
706 | struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; | 760 | struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; |
@@ -711,12 +765,14 @@ static void pci_bus_size_cardbus(struct pci_bus *bus) | |||
711 | * a fixed amount of bus space for CardBus bridges. | 765 | * a fixed amount of bus space for CardBus bridges. |
712 | */ | 766 | */ |
713 | b_res[0].start = 0; | 767 | b_res[0].start = 0; |
714 | b_res[0].end = pci_cardbus_io_size - 1; | ||
715 | b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; | 768 | b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; |
769 | if (realloc_head) | ||
770 | add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size, 0 /* dont care */); | ||
716 | 771 | ||
717 | b_res[1].start = 0; | 772 | b_res[1].start = 0; |
718 | b_res[1].end = pci_cardbus_io_size - 1; | ||
719 | b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; | 773 | b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; |
774 | if (realloc_head) | ||
775 | add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size, 0 /* dont care */); | ||
720 | 776 | ||
721 | /* | 777 | /* |
722 | * Check whether prefetchable memory is supported | 778 | * Check whether prefetchable memory is supported |
@@ -736,21 +792,31 @@ static void pci_bus_size_cardbus(struct pci_bus *bus) | |||
736 | */ | 792 | */ |
737 | if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { | 793 | if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { |
738 | b_res[2].start = 0; | 794 | b_res[2].start = 0; |
739 | b_res[2].end = pci_cardbus_mem_size - 1; | ||
740 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN; | 795 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN; |
796 | if (realloc_head) | ||
797 | add_to_list(realloc_head, bridge, b_res+2, pci_cardbus_mem_size, 0 /* dont care */); | ||
741 | 798 | ||
742 | b_res[3].start = 0; | 799 | b_res[3].start = 0; |
743 | b_res[3].end = pci_cardbus_mem_size - 1; | ||
744 | b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; | 800 | b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; |
801 | if (realloc_head) | ||
802 | add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size, 0 /* dont care */); | ||
745 | } else { | 803 | } else { |
746 | b_res[3].start = 0; | 804 | b_res[3].start = 0; |
747 | b_res[3].end = pci_cardbus_mem_size * 2 - 1; | ||
748 | b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; | 805 | b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; |
806 | if (realloc_head) | ||
807 | add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size * 2, 0 /* dont care */); | ||
749 | } | 808 | } |
809 | |||
810 | /* set the size of the resource to zero, so that the resource does not | ||
811 | * get assigned during required-resource allocation cycle but gets assigned | ||
812 | * during the optional-resource allocation cycle. | ||
813 | */ | ||
814 | b_res[0].start = b_res[1].start = b_res[2].start = b_res[3].start = 1; | ||
815 | b_res[0].end = b_res[1].end = b_res[2].end = b_res[3].end = 0; | ||
750 | } | 816 | } |
751 | 817 | ||
752 | void __ref __pci_bus_size_bridges(struct pci_bus *bus, | 818 | void __ref __pci_bus_size_bridges(struct pci_bus *bus, |
753 | struct resource_list_x *add_head) | 819 | struct resource_list_x *realloc_head) |
754 | { | 820 | { |
755 | struct pci_dev *dev; | 821 | struct pci_dev *dev; |
756 | unsigned long mask, prefmask; | 822 | unsigned long mask, prefmask; |
@@ -763,12 +829,12 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus, | |||
763 | 829 | ||
764 | switch (dev->class >> 8) { | 830 | switch (dev->class >> 8) { |
765 | case PCI_CLASS_BRIDGE_CARDBUS: | 831 | case PCI_CLASS_BRIDGE_CARDBUS: |
766 | pci_bus_size_cardbus(b); | 832 | pci_bus_size_cardbus(b, realloc_head); |
767 | break; | 833 | break; |
768 | 834 | ||
769 | case PCI_CLASS_BRIDGE_PCI: | 835 | case PCI_CLASS_BRIDGE_PCI: |
770 | default: | 836 | default: |
771 | __pci_bus_size_bridges(b, add_head); | 837 | __pci_bus_size_bridges(b, realloc_head); |
772 | break; | 838 | break; |
773 | } | 839 | } |
774 | } | 840 | } |
@@ -792,7 +858,7 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus, | |||
792 | * Follow thru | 858 | * Follow thru |
793 | */ | 859 | */ |
794 | default: | 860 | default: |
795 | pbus_size_io(bus, 0, additional_io_size, add_head); | 861 | pbus_size_io(bus, 0, additional_io_size, realloc_head); |
796 | /* If the bridge supports prefetchable range, size it | 862 | /* If the bridge supports prefetchable range, size it |
797 | separately. If it doesn't, or its prefetchable window | 863 | separately. If it doesn't, or its prefetchable window |
798 | has already been allocated by arch code, try | 864 | has already been allocated by arch code, try |
@@ -800,11 +866,11 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus, | |||
800 | resources. */ | 866 | resources. */ |
801 | mask = IORESOURCE_MEM; | 867 | mask = IORESOURCE_MEM; |
802 | prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; | 868 | prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; |
803 | if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, add_head)) | 869 | if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, realloc_head)) |
804 | mask = prefmask; /* Success, size non-prefetch only. */ | 870 | mask = prefmask; /* Success, size non-prefetch only. */ |
805 | else | 871 | else |
806 | additional_mem_size += additional_mem_size; | 872 | additional_mem_size += additional_mem_size; |
807 | pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, add_head); | 873 | pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, realloc_head); |
808 | break; | 874 | break; |
809 | } | 875 | } |
810 | } | 876 | } |
@@ -816,20 +882,20 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus) | |||
816 | EXPORT_SYMBOL(pci_bus_size_bridges); | 882 | EXPORT_SYMBOL(pci_bus_size_bridges); |
817 | 883 | ||
818 | static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, | 884 | static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, |
819 | struct resource_list_x *add_head, | 885 | struct resource_list_x *realloc_head, |
820 | struct resource_list_x *fail_head) | 886 | struct resource_list_x *fail_head) |
821 | { | 887 | { |
822 | struct pci_bus *b; | 888 | struct pci_bus *b; |
823 | struct pci_dev *dev; | 889 | struct pci_dev *dev; |
824 | 890 | ||
825 | pbus_assign_resources_sorted(bus, add_head, fail_head); | 891 | pbus_assign_resources_sorted(bus, realloc_head, fail_head); |
826 | 892 | ||
827 | list_for_each_entry(dev, &bus->devices, bus_list) { | 893 | list_for_each_entry(dev, &bus->devices, bus_list) { |
828 | b = dev->subordinate; | 894 | b = dev->subordinate; |
829 | if (!b) | 895 | if (!b) |
830 | continue; | 896 | continue; |
831 | 897 | ||
832 | __pci_bus_assign_resources(b, add_head, fail_head); | 898 | __pci_bus_assign_resources(b, realloc_head, fail_head); |
833 | 899 | ||
834 | switch (dev->class >> 8) { | 900 | switch (dev->class >> 8) { |
835 | case PCI_CLASS_BRIDGE_PCI: | 901 | case PCI_CLASS_BRIDGE_PCI: |
@@ -1039,7 +1105,7 @@ void __init | |||
1039 | pci_assign_unassigned_resources(void) | 1105 | pci_assign_unassigned_resources(void) |
1040 | { | 1106 | { |
1041 | struct pci_bus *bus; | 1107 | struct pci_bus *bus; |
1042 | struct resource_list_x add_list; /* list of resources that | 1108 | struct resource_list_x realloc_list; /* list of resources that |
1043 | want additional resources */ | 1109 | want additional resources */ |
1044 | int tried_times = 0; | 1110 | int tried_times = 0; |
1045 | enum release_type rel_type = leaf_only; | 1111 | enum release_type rel_type = leaf_only; |
@@ -1052,7 +1118,7 @@ pci_assign_unassigned_resources(void) | |||
1052 | 1118 | ||
1053 | 1119 | ||
1054 | head.next = NULL; | 1120 | head.next = NULL; |
1055 | add_list.next = NULL; | 1121 | realloc_list.next = NULL; |
1056 | 1122 | ||
1057 | pci_try_num = max_depth + 1; | 1123 | pci_try_num = max_depth + 1; |
1058 | printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", | 1124 | printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", |
@@ -1062,12 +1128,12 @@ again: | |||
1062 | /* Depth first, calculate sizes and alignments of all | 1128 | /* Depth first, calculate sizes and alignments of all |
1063 | subordinate buses. */ | 1129 | subordinate buses. */ |
1064 | list_for_each_entry(bus, &pci_root_buses, node) | 1130 | list_for_each_entry(bus, &pci_root_buses, node) |
1065 | __pci_bus_size_bridges(bus, &add_list); | 1131 | __pci_bus_size_bridges(bus, &realloc_list); |
1066 | 1132 | ||
1067 | /* Depth last, allocate resources and update the hardware. */ | 1133 | /* Depth last, allocate resources and update the hardware. */ |
1068 | list_for_each_entry(bus, &pci_root_buses, node) | 1134 | list_for_each_entry(bus, &pci_root_buses, node) |
1069 | __pci_bus_assign_resources(bus, &add_list, &head); | 1135 | __pci_bus_assign_resources(bus, &realloc_list, &head); |
1070 | BUG_ON(add_list.next); | 1136 | BUG_ON(realloc_list.next); |
1071 | tried_times++; | 1137 | tried_times++; |
1072 | 1138 | ||
1073 | /* any device complain? */ | 1139 | /* any device complain? */ |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 319f359906e8..51a9095c7da4 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -128,16 +128,16 @@ void pci_disable_bridge_window(struct pci_dev *dev) | |||
128 | } | 128 | } |
129 | #endif /* CONFIG_PCI_QUIRKS */ | 129 | #endif /* CONFIG_PCI_QUIRKS */ |
130 | 130 | ||
131 | |||
132 | |||
131 | static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, | 133 | static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, |
132 | int resno) | 134 | int resno, resource_size_t size, resource_size_t align) |
133 | { | 135 | { |
134 | struct resource *res = dev->resource + resno; | 136 | struct resource *res = dev->resource + resno; |
135 | resource_size_t size, min, align; | 137 | resource_size_t min; |
136 | int ret; | 138 | int ret; |
137 | 139 | ||
138 | size = resource_size(res); | ||
139 | min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; | 140 | min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; |
140 | align = pci_resource_alignment(dev, res); | ||
141 | 141 | ||
142 | /* First, try exact prefetching match.. */ | 142 | /* First, try exact prefetching match.. */ |
143 | ret = pci_bus_alloc_resource(bus, res, size, align, min, | 143 | ret = pci_bus_alloc_resource(bus, res, size, align, min, |
@@ -154,56 +154,101 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, | |||
154 | ret = pci_bus_alloc_resource(bus, res, size, align, min, 0, | 154 | ret = pci_bus_alloc_resource(bus, res, size, align, min, 0, |
155 | pcibios_align_resource, dev); | 155 | pcibios_align_resource, dev); |
156 | } | 156 | } |
157 | return ret; | ||
158 | } | ||
157 | 159 | ||
158 | if (ret < 0 && dev->fw_addr[resno]) { | 160 | static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, |
159 | struct resource *root, *conflict; | 161 | int resno, resource_size_t size) |
160 | resource_size_t start, end; | 162 | { |
163 | struct resource *root, *conflict; | ||
164 | resource_size_t start, end; | ||
165 | int ret = 0; | ||
161 | 166 | ||
162 | /* | 167 | if (res->flags & IORESOURCE_IO) |
163 | * If we failed to assign anything, let's try the address | 168 | root = &ioport_resource; |
164 | * where firmware left it. That at least has a chance of | 169 | else |
165 | * working, which is better than just leaving it disabled. | 170 | root = &iomem_resource; |
166 | */ | 171 | |
172 | start = res->start; | ||
173 | end = res->end; | ||
174 | res->start = dev->fw_addr[resno]; | ||
175 | res->end = res->start + size - 1; | ||
176 | dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n", | ||
177 | resno, res); | ||
178 | conflict = request_resource_conflict(root, res); | ||
179 | if (conflict) { | ||
180 | dev_info(&dev->dev, | ||
181 | "BAR %d: %pR conflicts with %s %pR\n", resno, | ||
182 | res, conflict->name, conflict); | ||
183 | res->start = start; | ||
184 | res->end = end; | ||
185 | ret = 1; | ||
186 | } | ||
187 | return ret; | ||
188 | } | ||
189 | |||
190 | static int _pci_assign_resource(struct pci_dev *dev, int resno, int size, resource_size_t min_align) | ||
191 | { | ||
192 | struct resource *res = dev->resource + resno; | ||
193 | struct pci_bus *bus; | ||
194 | int ret; | ||
195 | char *type; | ||
167 | 196 | ||
168 | if (res->flags & IORESOURCE_IO) | 197 | bus = dev->bus; |
169 | root = &ioport_resource; | 198 | while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) { |
199 | if (!bus->parent || !bus->self->transparent) | ||
200 | break; | ||
201 | bus = bus->parent; | ||
202 | } | ||
203 | |||
204 | if (ret) { | ||
205 | if (res->flags & IORESOURCE_MEM) | ||
206 | if (res->flags & IORESOURCE_PREFETCH) | ||
207 | type = "mem pref"; | ||
208 | else | ||
209 | type = "mem"; | ||
210 | else if (res->flags & IORESOURCE_IO) | ||
211 | type = "io"; | ||
170 | else | 212 | else |
171 | root = &iomem_resource; | 213 | type = "unknown"; |
172 | 214 | dev_info(&dev->dev, | |
173 | start = res->start; | 215 | "BAR %d: can't assign %s (size %#llx)\n", |
174 | end = res->end; | 216 | resno, type, (unsigned long long) resource_size(res)); |
175 | res->start = dev->fw_addr[resno]; | ||
176 | res->end = res->start + size - 1; | ||
177 | dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n", | ||
178 | resno, res); | ||
179 | conflict = request_resource_conflict(root, res); | ||
180 | if (conflict) { | ||
181 | dev_info(&dev->dev, | ||
182 | "BAR %d: %pR conflicts with %s %pR\n", resno, | ||
183 | res, conflict->name, conflict); | ||
184 | res->start = start; | ||
185 | res->end = end; | ||
186 | } else | ||
187 | ret = 0; | ||
188 | } | 217 | } |
189 | 218 | ||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize, | ||
223 | resource_size_t min_align) | ||
224 | { | ||
225 | struct resource *res = dev->resource + resno; | ||
226 | resource_size_t new_size; | ||
227 | int ret; | ||
228 | |||
229 | if (!res->parent) { | ||
230 | dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resouce %pR " | ||
231 | "\n", resno, res); | ||
232 | return -EINVAL; | ||
233 | } | ||
234 | |||
235 | new_size = resource_size(res) + addsize + min_align; | ||
236 | ret = _pci_assign_resource(dev, resno, new_size, min_align); | ||
190 | if (!ret) { | 237 | if (!ret) { |
191 | res->flags &= ~IORESOURCE_STARTALIGN; | 238 | res->flags &= ~IORESOURCE_STARTALIGN; |
192 | dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); | 239 | dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); |
193 | if (resno < PCI_BRIDGE_RESOURCES) | 240 | if (resno < PCI_BRIDGE_RESOURCES) |
194 | pci_update_resource(dev, resno); | 241 | pci_update_resource(dev, resno); |
195 | } | 242 | } |
196 | |||
197 | return ret; | 243 | return ret; |
198 | } | 244 | } |
199 | 245 | ||
200 | int pci_assign_resource(struct pci_dev *dev, int resno) | 246 | int pci_assign_resource(struct pci_dev *dev, int resno) |
201 | { | 247 | { |
202 | struct resource *res = dev->resource + resno; | 248 | struct resource *res = dev->resource + resno; |
203 | resource_size_t align; | 249 | resource_size_t align, size; |
204 | struct pci_bus *bus; | 250 | struct pci_bus *bus; |
205 | int ret; | 251 | int ret; |
206 | char *type; | ||
207 | 252 | ||
208 | align = pci_resource_alignment(dev, res); | 253 | align = pci_resource_alignment(dev, res); |
209 | if (!align) { | 254 | if (!align) { |
@@ -213,34 +258,27 @@ int pci_assign_resource(struct pci_dev *dev, int resno) | |||
213 | } | 258 | } |
214 | 259 | ||
215 | bus = dev->bus; | 260 | bus = dev->bus; |
216 | while ((ret = __pci_assign_resource(bus, dev, resno))) { | 261 | size = resource_size(res); |
217 | if (bus->parent && bus->self->transparent) | 262 | ret = _pci_assign_resource(dev, resno, size, align); |
218 | bus = bus->parent; | ||
219 | else | ||
220 | bus = NULL; | ||
221 | if (bus) | ||
222 | continue; | ||
223 | break; | ||
224 | } | ||
225 | 263 | ||
226 | if (ret) { | 264 | /* |
227 | if (res->flags & IORESOURCE_MEM) | 265 | * If we failed to assign anything, let's try the address |
228 | if (res->flags & IORESOURCE_PREFETCH) | 266 | * where firmware left it. That at least has a chance of |
229 | type = "mem pref"; | 267 | * working, which is better than just leaving it disabled. |
230 | else | 268 | */ |
231 | type = "mem"; | 269 | if (ret < 0 && dev->fw_addr[resno]) |
232 | else if (res->flags & IORESOURCE_IO) | 270 | ret = pci_revert_fw_address(res, dev, resno, size); |
233 | type = "io"; | ||
234 | else | ||
235 | type = "unknown"; | ||
236 | dev_info(&dev->dev, | ||
237 | "BAR %d: can't assign %s (size %#llx)\n", | ||
238 | resno, type, (unsigned long long) resource_size(res)); | ||
239 | } | ||
240 | 271 | ||
272 | if (!ret) { | ||
273 | res->flags &= ~IORESOURCE_STARTALIGN; | ||
274 | dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); | ||
275 | if (resno < PCI_BRIDGE_RESOURCES) | ||
276 | pci_update_resource(dev, resno); | ||
277 | } | ||
241 | return ret; | 278 | return ret; |
242 | } | 279 | } |
243 | 280 | ||
281 | |||
244 | /* Sort resources by alignment */ | 282 | /* Sort resources by alignment */ |
245 | void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) | 283 | void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) |
246 | { | 284 | { |
diff --git a/drivers/power/max8997_charger.c b/drivers/power/max8997_charger.c index 7106b49b26e4..ffc5033ea9c9 100644 --- a/drivers/power/max8997_charger.c +++ b/drivers/power/max8997_charger.c | |||
@@ -20,6 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/err.h> | 22 | #include <linux/err.h> |
23 | #include <linux/module.h> | ||
23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
24 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
25 | #include <linux/power_supply.h> | 26 | #include <linux/power_supply.h> |
diff --git a/drivers/power/max8998_charger.c b/drivers/power/max8998_charger.c index cc21fa2120be..ef8efadb58cb 100644 --- a/drivers/power/max8998_charger.c +++ b/drivers/power/max8998_charger.c | |||
@@ -20,6 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/err.h> | 22 | #include <linux/err.h> |
23 | #include <linux/module.h> | ||
23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
24 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
25 | #include <linux/power_supply.h> | 26 | #include <linux/power_supply.h> |
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c index a675e31b4f13..d32d0d70f9ba 100644 --- a/drivers/power/s3c_adc_battery.c +++ b/drivers/power/s3c_adc_battery.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/s3c_adc_battery.h> | 20 | #include <linux/s3c_adc_battery.h> |
21 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/module.h> | ||
23 | 24 | ||
24 | #include <plat/adc.h> | 25 | #include <plat/adc.h> |
25 | 26 | ||
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index ee893581d4b7..ebe77dd87daf 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c | |||
@@ -505,8 +505,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, | |||
505 | rdev->dev.dma_mask = &rdev->dma_mask; | 505 | rdev->dev.dma_mask = &rdev->dma_mask; |
506 | rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | 506 | rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); |
507 | 507 | ||
508 | if ((rdev->pef & RIO_PEF_INB_DOORBELL) && | 508 | if (rdev->dst_ops & RIO_DST_OPS_DOORBELL) |
509 | (rdev->dst_ops & RIO_DST_OPS_DOORBELL)) | ||
510 | rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], | 509 | rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], |
511 | 0, 0xffff); | 510 | 0, 0xffff); |
512 | 511 | ||
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 3195dbd3ec34..44e91e598f8d 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c | |||
@@ -639,7 +639,7 @@ EXPORT_SYMBOL_GPL(rtc_irq_unregister); | |||
639 | static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled) | 639 | static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled) |
640 | { | 640 | { |
641 | /* | 641 | /* |
642 | * We unconditionally cancel the timer here, because otherwise | 642 | * We always cancel the timer here first, because otherwise |
643 | * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); | 643 | * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); |
644 | * when we manage to start the timer before the callback | 644 | * when we manage to start the timer before the callback |
645 | * returns HRTIMER_RESTART. | 645 | * returns HRTIMER_RESTART. |
@@ -708,7 +708,7 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq) | |||
708 | int err = 0; | 708 | int err = 0; |
709 | unsigned long flags; | 709 | unsigned long flags; |
710 | 710 | ||
711 | if (freq <= 0 || freq > 5000) | 711 | if (freq <= 0 || freq > RTC_MAX_FREQ) |
712 | return -EINVAL; | 712 | return -EINVAL; |
713 | retry: | 713 | retry: |
714 | spin_lock_irqsave(&rtc->irq_task_lock, flags); | 714 | spin_lock_irqsave(&rtc->irq_task_lock, flags); |
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 9329dbb9ebab..4e7c04e773e0 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
@@ -152,10 +152,6 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) | |||
152 | goto retry_get_time; | 152 | goto retry_get_time; |
153 | } | 153 | } |
154 | 154 | ||
155 | pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n", | ||
156 | 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday, | ||
157 | rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec); | ||
158 | |||
159 | rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); | 155 | rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); |
160 | rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); | 156 | rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); |
161 | rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); | 157 | rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); |
@@ -164,6 +160,11 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) | |||
164 | rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); | 160 | rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); |
165 | 161 | ||
166 | rtc_tm->tm_year += 100; | 162 | rtc_tm->tm_year += 100; |
163 | |||
164 | pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n", | ||
165 | 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday, | ||
166 | rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec); | ||
167 | |||
167 | rtc_tm->tm_mon -= 1; | 168 | rtc_tm->tm_mon -= 1; |
168 | 169 | ||
169 | clk_disable(rtc_clk); | 170 | clk_disable(rtc_clk); |
@@ -269,10 +270,9 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
269 | clk_enable(rtc_clk); | 270 | clk_enable(rtc_clk); |
270 | pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n", | 271 | pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n", |
271 | alrm->enabled, | 272 | alrm->enabled, |
272 | 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, | 273 | 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, |
273 | tm->tm_hour, tm->tm_min, tm->tm_sec); | 274 | tm->tm_hour, tm->tm_min, tm->tm_sec); |
274 | 275 | ||
275 | |||
276 | alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; | 276 | alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; |
277 | writeb(0x00, base + S3C2410_RTCALM); | 277 | writeb(0x00, base + S3C2410_RTCALM); |
278 | 278 | ||
@@ -319,49 +319,7 @@ static int s3c_rtc_proc(struct device *dev, struct seq_file *seq) | |||
319 | return 0; | 319 | return 0; |
320 | } | 320 | } |
321 | 321 | ||
322 | static int s3c_rtc_open(struct device *dev) | ||
323 | { | ||
324 | struct platform_device *pdev = to_platform_device(dev); | ||
325 | struct rtc_device *rtc_dev = platform_get_drvdata(pdev); | ||
326 | int ret; | ||
327 | |||
328 | ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq, | ||
329 | IRQF_DISABLED, "s3c2410-rtc alarm", rtc_dev); | ||
330 | |||
331 | if (ret) { | ||
332 | dev_err(dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret); | ||
333 | return ret; | ||
334 | } | ||
335 | |||
336 | ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq, | ||
337 | IRQF_DISABLED, "s3c2410-rtc tick", rtc_dev); | ||
338 | |||
339 | if (ret) { | ||
340 | dev_err(dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret); | ||
341 | goto tick_err; | ||
342 | } | ||
343 | |||
344 | return ret; | ||
345 | |||
346 | tick_err: | ||
347 | free_irq(s3c_rtc_alarmno, rtc_dev); | ||
348 | return ret; | ||
349 | } | ||
350 | |||
351 | static void s3c_rtc_release(struct device *dev) | ||
352 | { | ||
353 | struct platform_device *pdev = to_platform_device(dev); | ||
354 | struct rtc_device *rtc_dev = platform_get_drvdata(pdev); | ||
355 | |||
356 | /* do not clear AIE here, it may be needed for wake */ | ||
357 | |||
358 | free_irq(s3c_rtc_alarmno, rtc_dev); | ||
359 | free_irq(s3c_rtc_tickno, rtc_dev); | ||
360 | } | ||
361 | |||
362 | static const struct rtc_class_ops s3c_rtcops = { | 322 | static const struct rtc_class_ops s3c_rtcops = { |
363 | .open = s3c_rtc_open, | ||
364 | .release = s3c_rtc_release, | ||
365 | .read_time = s3c_rtc_gettime, | 323 | .read_time = s3c_rtc_gettime, |
366 | .set_time = s3c_rtc_settime, | 324 | .set_time = s3c_rtc_settime, |
367 | .read_alarm = s3c_rtc_getalarm, | 325 | .read_alarm = s3c_rtc_getalarm, |
@@ -425,6 +383,9 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev) | |||
425 | { | 383 | { |
426 | struct rtc_device *rtc = platform_get_drvdata(dev); | 384 | struct rtc_device *rtc = platform_get_drvdata(dev); |
427 | 385 | ||
386 | free_irq(s3c_rtc_alarmno, rtc); | ||
387 | free_irq(s3c_rtc_tickno, rtc); | ||
388 | |||
428 | platform_set_drvdata(dev, NULL); | 389 | platform_set_drvdata(dev, NULL); |
429 | rtc_device_unregister(rtc); | 390 | rtc_device_unregister(rtc); |
430 | 391 | ||
@@ -548,10 +509,32 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev) | |||
548 | 509 | ||
549 | s3c_rtc_setfreq(&pdev->dev, 1); | 510 | s3c_rtc_setfreq(&pdev->dev, 1); |
550 | 511 | ||
512 | ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq, | ||
513 | IRQF_DISABLED, "s3c2410-rtc alarm", rtc); | ||
514 | if (ret) { | ||
515 | dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret); | ||
516 | goto err_alarm_irq; | ||
517 | } | ||
518 | |||
519 | ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq, | ||
520 | IRQF_DISABLED, "s3c2410-rtc tick", rtc); | ||
521 | if (ret) { | ||
522 | dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret); | ||
523 | free_irq(s3c_rtc_alarmno, rtc); | ||
524 | goto err_tick_irq; | ||
525 | } | ||
526 | |||
551 | clk_disable(rtc_clk); | 527 | clk_disable(rtc_clk); |
552 | 528 | ||
553 | return 0; | 529 | return 0; |
554 | 530 | ||
531 | err_tick_irq: | ||
532 | free_irq(s3c_rtc_alarmno, rtc); | ||
533 | |||
534 | err_alarm_irq: | ||
535 | platform_set_drvdata(pdev, NULL); | ||
536 | rtc_device_unregister(rtc); | ||
537 | |||
555 | err_nortc: | 538 | err_nortc: |
556 | s3c_rtc_enable(pdev, 0); | 539 | s3c_rtc_enable(pdev, 0); |
557 | clk_disable(rtc_clk); | 540 | clk_disable(rtc_clk); |
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c index c01c0cb0af4e..b99a11a9dd69 100644 --- a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c +++ b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c | |||
@@ -812,7 +812,7 @@ int AthCreateCommandList(struct ps_cmd_packet **HciPacketList, u32 *numPackets) | |||
812 | for(count = 0; count < Patch_Count; count++) { | 812 | for(count = 0; count < Patch_Count; count++) { |
813 | 813 | ||
814 | AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Freeing Patch Buffer %d \r\n",count)); | 814 | AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Freeing Patch Buffer %d \r\n",count)); |
815 | kfree(RamPatch[Patch_Count].Data); | 815 | kfree(RamPatch[count].Data); |
816 | } | 816 | } |
817 | 817 | ||
818 | for(count = 0; count < Tag_Count; count++) { | 818 | for(count = 0; count < Tag_Count; count++) { |
diff --git a/drivers/staging/dt3155v4l/dt3155v4l.c b/drivers/staging/dt3155v4l/dt3155v4l.c index fe02d22274b4..05aa41cf875b 100644 --- a/drivers/staging/dt3155v4l/dt3155v4l.c +++ b/drivers/staging/dt3155v4l/dt3155v4l.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/stringify.h> | 22 | #include <linux/stringify.h> |
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include <linux/kthread.h> | 24 | #include <linux/kthread.h> |
25 | #include <linux/slab.h> | ||
25 | #include <media/v4l2-dev.h> | 26 | #include <media/v4l2-dev.h> |
26 | #include <media/v4l2-ioctl.h> | 27 | #include <media/v4l2-ioctl.h> |
27 | #include <media/videobuf2-dma-contig.h> | 28 | #include <media/videobuf2-dma-contig.h> |
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c index 627a98b4ec30..9e728b3415e3 100644 --- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c +++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/string.h> | 22 | #include <linux/string.h> |
23 | #include <linux/vmalloc.h> | 23 | #include <linux/vmalloc.h> |
24 | #include <linux/netdevice.h> | 24 | #include <linux/netdevice.h> |
25 | #include <asm/io.h> | ||
25 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
26 | #include "ft1000.h" | 27 | #include "ft1000.h" |
27 | 28 | ||
diff --git a/drivers/staging/gma500/gem_glue.c b/drivers/staging/gma500/gem_glue.c index 779ac1a12d24..daac12120653 100644 --- a/drivers/staging/gma500/gem_glue.c +++ b/drivers/staging/gma500/gem_glue.c | |||
@@ -20,26 +20,6 @@ | |||
20 | #include <drm/drmP.h> | 20 | #include <drm/drmP.h> |
21 | #include <drm/drm.h> | 21 | #include <drm/drm.h> |
22 | 22 | ||
23 | /** | ||
24 | * Initialize an already allocated GEM object of the specified size with | ||
25 | * no GEM provided backing store. Instead the caller is responsible for | ||
26 | * backing the object and handling it. | ||
27 | */ | ||
28 | int drm_gem_private_object_init(struct drm_device *dev, | ||
29 | struct drm_gem_object *obj, size_t size) | ||
30 | { | ||
31 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); | ||
32 | |||
33 | obj->dev = dev; | ||
34 | obj->filp = NULL; | ||
35 | |||
36 | kref_init(&obj->refcount); | ||
37 | atomic_set(&obj->handle_count, 0); | ||
38 | obj->size = size; | ||
39 | |||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | void drm_gem_object_release_wrap(struct drm_gem_object *obj) | 23 | void drm_gem_object_release_wrap(struct drm_gem_object *obj) |
44 | { | 24 | { |
45 | /* Remove the list map if one is present */ | 25 | /* Remove the list map if one is present */ |
@@ -51,8 +31,7 @@ void drm_gem_object_release_wrap(struct drm_gem_object *obj) | |||
51 | kfree(list->map); | 31 | kfree(list->map); |
52 | list->map = NULL; | 32 | list->map = NULL; |
53 | } | 33 | } |
54 | if (obj->filp) | 34 | drm_gem_object_release(obj); |
55 | drm_gem_object_release(obj); | ||
56 | } | 35 | } |
57 | 36 | ||
58 | /** | 37 | /** |
diff --git a/drivers/staging/gma500/gem_glue.h b/drivers/staging/gma500/gem_glue.h index a0f2bc4e4ae7..ce5ce30f74db 100644 --- a/drivers/staging/gma500/gem_glue.h +++ b/drivers/staging/gma500/gem_glue.h | |||
@@ -1,4 +1,2 @@ | |||
1 | extern void drm_gem_object_release_wrap(struct drm_gem_object *obj); | 1 | extern void drm_gem_object_release_wrap(struct drm_gem_object *obj); |
2 | extern int drm_gem_private_object_init(struct drm_device *dev, | ||
3 | struct drm_gem_object *obj, size_t size); | ||
4 | extern int gem_create_mmap_offset(struct drm_gem_object *obj); | 2 | extern int gem_create_mmap_offset(struct drm_gem_object *obj); |
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.c b/drivers/staging/gma500/mdfld_dsi_dbi.c index 02e17c9c8637..fd211f3467c4 100644 --- a/drivers/staging/gma500/mdfld_dsi_dbi.c +++ b/drivers/staging/gma500/mdfld_dsi_dbi.c | |||
@@ -711,10 +711,11 @@ struct mdfld_dsi_encoder *mdfld_dsi_dbi_init(struct drm_device *dev, | |||
711 | /* Create drm encoder object */ | 711 | /* Create drm encoder object */ |
712 | connector = &dsi_connector->base.base; | 712 | connector = &dsi_connector->base.base; |
713 | encoder = &dbi_output->base.base; | 713 | encoder = &dbi_output->base.base; |
714 | /* Review this if we ever get MIPI-HDMI bridges or similar */ | ||
714 | drm_encoder_init(dev, | 715 | drm_encoder_init(dev, |
715 | encoder, | 716 | encoder, |
716 | p_funcs->encoder_funcs, | 717 | p_funcs->encoder_funcs, |
717 | DRM_MODE_ENCODER_MIPI); | 718 | DRM_MODE_ENCODER_LVDS); |
718 | drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs); | 719 | drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs); |
719 | 720 | ||
720 | /* Attach to given connector */ | 721 | /* Attach to given connector */ |
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.h b/drivers/staging/gma500/mdfld_dsi_dbi.h index dc6242c51d0b..f0fa986fd934 100644 --- a/drivers/staging/gma500/mdfld_dsi_dbi.h +++ b/drivers/staging/gma500/mdfld_dsi_dbi.h | |||
@@ -42,9 +42,6 @@ | |||
42 | #include "mdfld_dsi_output.h" | 42 | #include "mdfld_dsi_output.h" |
43 | #include "mdfld_output.h" | 43 | #include "mdfld_output.h" |
44 | 44 | ||
45 | #define DRM_MODE_ENCODER_MIPI 5 | ||
46 | |||
47 | |||
48 | /* | 45 | /* |
49 | * DBI encoder which inherits from mdfld_dsi_encoder | 46 | * DBI encoder which inherits from mdfld_dsi_encoder |
50 | */ | 47 | */ |
diff --git a/drivers/staging/gma500/mdfld_dsi_dpi.c b/drivers/staging/gma500/mdfld_dsi_dpi.c index 6e03a91e947e..e685f1217baa 100644 --- a/drivers/staging/gma500/mdfld_dsi_dpi.c +++ b/drivers/staging/gma500/mdfld_dsi_dpi.c | |||
@@ -777,10 +777,15 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, | |||
777 | /* Create drm encoder object */ | 777 | /* Create drm encoder object */ |
778 | connector = &dsi_connector->base.base; | 778 | connector = &dsi_connector->base.base; |
779 | encoder = &dpi_output->base.base; | 779 | encoder = &dpi_output->base.base; |
780 | /* | ||
781 | * On existing hardware this will be a panel of some form, | ||
782 | * if future devices also have HDMI bridges this will need | ||
783 | * revisiting | ||
784 | */ | ||
780 | drm_encoder_init(dev, | 785 | drm_encoder_init(dev, |
781 | encoder, | 786 | encoder, |
782 | p_funcs->encoder_funcs, | 787 | p_funcs->encoder_funcs, |
783 | DRM_MODE_ENCODER_MIPI); | 788 | DRM_MODE_ENCODER_LVDS); |
784 | drm_encoder_helper_add(encoder, | 789 | drm_encoder_helper_add(encoder, |
785 | p_funcs->encoder_helper_funcs); | 790 | p_funcs->encoder_helper_funcs); |
786 | 791 | ||
diff --git a/drivers/staging/gma500/mdfld_dsi_output.c b/drivers/staging/gma500/mdfld_dsi_output.c index 7536095c30a0..9050c0f78b15 100644 --- a/drivers/staging/gma500/mdfld_dsi_output.c +++ b/drivers/staging/gma500/mdfld_dsi_output.c | |||
@@ -955,7 +955,9 @@ void mdfld_dsi_output_init(struct drm_device *dev, | |||
955 | psb_output->type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2; | 955 | psb_output->type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2; |
956 | 956 | ||
957 | connector = &psb_output->base; | 957 | connector = &psb_output->base; |
958 | drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs, DRM_MODE_CONNECTOR_MIPI); | 958 | /* Revisit type if MIPI/HDMI bridges ever appear on Medfield */ |
959 | drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs, | ||
960 | DRM_MODE_CONNECTOR_LVDS); | ||
959 | drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs); | 961 | drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs); |
960 | 962 | ||
961 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 963 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
diff --git a/drivers/staging/gma500/medfield.h b/drivers/staging/gma500/medfield.h index 38165e8367e5..09e9687431f1 100644 --- a/drivers/staging/gma500/medfield.h +++ b/drivers/staging/gma500/medfield.h | |||
@@ -21,8 +21,6 @@ | |||
21 | * DEALINGS IN THE SOFTWARE. | 21 | * DEALINGS IN THE SOFTWARE. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #define DRM_MODE_ENCODER_MIPI 5 | ||
25 | |||
26 | /* Medfield DSI controller registers */ | 24 | /* Medfield DSI controller registers */ |
27 | 25 | ||
28 | #define MIPIA_DEVICE_READY_REG 0xb000 | 26 | #define MIPIA_DEVICE_READY_REG 0xb000 |
diff --git a/drivers/staging/gma500/psb_drv.h b/drivers/staging/gma500/psb_drv.h index 72f487a2a1b7..fd4732dd783a 100644 --- a/drivers/staging/gma500/psb_drv.h +++ b/drivers/staging/gma500/psb_drv.h | |||
@@ -35,7 +35,6 @@ | |||
35 | 35 | ||
36 | /* Append new drm mode definition here, align with libdrm definition */ | 36 | /* Append new drm mode definition here, align with libdrm definition */ |
37 | #define DRM_MODE_SCALE_NO_SCALE 2 | 37 | #define DRM_MODE_SCALE_NO_SCALE 2 |
38 | #define DRM_MODE_CONNECTOR_MIPI 15 | ||
39 | 38 | ||
40 | enum { | 39 | enum { |
41 | CHIP_PSB_8108 = 0, /* Poulsbo */ | 40 | CHIP_PSB_8108 = 0, /* Poulsbo */ |
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c index 3612574ca520..d286b2223181 100644 --- a/drivers/staging/hv/blkvsc_drv.c +++ b/drivers/staging/hv/blkvsc_drv.c | |||
@@ -325,7 +325,7 @@ static int blkvsc_do_operation(struct block_device_context *blkdev, | |||
325 | 325 | ||
326 | page_buf = alloc_page(GFP_KERNEL); | 326 | page_buf = alloc_page(GFP_KERNEL); |
327 | if (!page_buf) { | 327 | if (!page_buf) { |
328 | kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); | 328 | kmem_cache_free(blkdev->request_pool, blkvsc_req); |
329 | return -ENOMEM; | 329 | return -ENOMEM; |
330 | } | 330 | } |
331 | 331 | ||
@@ -422,7 +422,7 @@ cleanup: | |||
422 | 422 | ||
423 | __free_page(page_buf); | 423 | __free_page(page_buf); |
424 | 424 | ||
425 | kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); | 425 | kmem_cache_free(blkdev->request_pool, blkvsc_req); |
426 | 426 | ||
427 | return ret; | 427 | return ret; |
428 | } | 428 | } |
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c index bf1988884e93..cf5d15da76ad 100644 --- a/drivers/staging/iio/accel/adis16203_core.c +++ b/drivers/staging/iio/accel/adis16203_core.c | |||
@@ -311,13 +311,17 @@ static int adis16203_read_raw(struct iio_dev *indio_dev, | |||
311 | mutex_lock(&indio_dev->mlock); | 311 | mutex_lock(&indio_dev->mlock); |
312 | addr = adis16203_addresses[chan->address][0]; | 312 | addr = adis16203_addresses[chan->address][0]; |
313 | ret = adis16203_spi_read_reg_16(indio_dev, addr, &val16); | 313 | ret = adis16203_spi_read_reg_16(indio_dev, addr, &val16); |
314 | if (ret) | 314 | if (ret) { |
315 | mutex_unlock(&indio_dev->mlock); | ||
315 | return ret; | 316 | return ret; |
317 | } | ||
316 | 318 | ||
317 | if (val16 & ADIS16203_ERROR_ACTIVE) { | 319 | if (val16 & ADIS16203_ERROR_ACTIVE) { |
318 | ret = adis16203_check_status(indio_dev); | 320 | ret = adis16203_check_status(indio_dev); |
319 | if (ret) | 321 | if (ret) { |
322 | mutex_unlock(&indio_dev->mlock); | ||
320 | return ret; | 323 | return ret; |
324 | } | ||
321 | } | 325 | } |
322 | val16 = val16 & ((1 << chan->scan_type.realbits) - 1); | 326 | val16 = val16 & ((1 << chan->scan_type.realbits) - 1); |
323 | if (chan->scan_type.sign == 's') | 327 | if (chan->scan_type.sign == 's') |
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c index cfd09b3b9937..3e2b62654b7d 100644 --- a/drivers/staging/iio/accel/adis16204_core.c +++ b/drivers/staging/iio/accel/adis16204_core.c | |||
@@ -341,13 +341,17 @@ static int adis16204_read_raw(struct iio_dev *indio_dev, | |||
341 | mutex_lock(&indio_dev->mlock); | 341 | mutex_lock(&indio_dev->mlock); |
342 | addr = adis16204_addresses[chan->address][0]; | 342 | addr = adis16204_addresses[chan->address][0]; |
343 | ret = adis16204_spi_read_reg_16(indio_dev, addr, &val16); | 343 | ret = adis16204_spi_read_reg_16(indio_dev, addr, &val16); |
344 | if (ret) | 344 | if (ret) { |
345 | mutex_unlock(&indio_dev->mlock); | ||
345 | return ret; | 346 | return ret; |
347 | } | ||
346 | 348 | ||
347 | if (val16 & ADIS16204_ERROR_ACTIVE) { | 349 | if (val16 & ADIS16204_ERROR_ACTIVE) { |
348 | ret = adis16204_check_status(indio_dev); | 350 | ret = adis16204_check_status(indio_dev); |
349 | if (ret) | 351 | if (ret) { |
352 | mutex_unlock(&indio_dev->mlock); | ||
350 | return ret; | 353 | return ret; |
354 | } | ||
351 | } | 355 | } |
352 | val16 = val16 & ((1 << chan->scan_type.realbits) - 1); | 356 | val16 = val16 & ((1 << chan->scan_type.realbits) - 1); |
353 | if (chan->scan_type.sign == 's') | 357 | if (chan->scan_type.sign == 's') |
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c index 55f3a7bcaf0a..bec1fa8de9b9 100644 --- a/drivers/staging/iio/accel/adis16209_core.c +++ b/drivers/staging/iio/accel/adis16209_core.c | |||
@@ -337,13 +337,17 @@ static int adis16209_read_raw(struct iio_dev *indio_dev, | |||
337 | mutex_lock(&indio_dev->mlock); | 337 | mutex_lock(&indio_dev->mlock); |
338 | addr = adis16209_addresses[chan->address][0]; | 338 | addr = adis16209_addresses[chan->address][0]; |
339 | ret = adis16209_spi_read_reg_16(indio_dev, addr, &val16); | 339 | ret = adis16209_spi_read_reg_16(indio_dev, addr, &val16); |
340 | if (ret) | 340 | if (ret) { |
341 | mutex_unlock(&indio_dev->mlock); | ||
341 | return ret; | 342 | return ret; |
343 | } | ||
342 | 344 | ||
343 | if (val16 & ADIS16209_ERROR_ACTIVE) { | 345 | if (val16 & ADIS16209_ERROR_ACTIVE) { |
344 | ret = adis16209_check_status(indio_dev); | 346 | ret = adis16209_check_status(indio_dev); |
345 | if (ret) | 347 | if (ret) { |
348 | mutex_unlock(&indio_dev->mlock); | ||
346 | return ret; | 349 | return ret; |
350 | } | ||
347 | } | 351 | } |
348 | val16 = val16 & ((1 << chan->scan_type.realbits) - 1); | 352 | val16 = val16 & ((1 << chan->scan_type.realbits) - 1); |
349 | if (chan->scan_type.sign == 's') | 353 | if (chan->scan_type.sign == 's') |
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c index 4a4eafc58630..aee8b69173c4 100644 --- a/drivers/staging/iio/accel/adis16240_core.c +++ b/drivers/staging/iio/accel/adis16240_core.c | |||
@@ -370,13 +370,17 @@ static int adis16240_read_raw(struct iio_dev *indio_dev, | |||
370 | mutex_lock(&indio_dev->mlock); | 370 | mutex_lock(&indio_dev->mlock); |
371 | addr = adis16240_addresses[chan->address][0]; | 371 | addr = adis16240_addresses[chan->address][0]; |
372 | ret = adis16240_spi_read_reg_16(indio_dev, addr, &val16); | 372 | ret = adis16240_spi_read_reg_16(indio_dev, addr, &val16); |
373 | if (ret) | 373 | if (ret) { |
374 | mutex_unlock(&indio_dev->mlock); | ||
374 | return ret; | 375 | return ret; |
376 | } | ||
375 | 377 | ||
376 | if (val16 & ADIS16240_ERROR_ACTIVE) { | 378 | if (val16 & ADIS16240_ERROR_ACTIVE) { |
377 | ret = adis16240_check_status(indio_dev); | 379 | ret = adis16240_check_status(indio_dev); |
378 | if (ret) | 380 | if (ret) { |
381 | mutex_unlock(&indio_dev->mlock); | ||
379 | return ret; | 382 | return ret; |
383 | } | ||
380 | } | 384 | } |
381 | val16 = val16 & ((1 << chan->scan_type.realbits) - 1); | 385 | val16 = val16 & ((1 << chan->scan_type.realbits) - 1); |
382 | if (chan->scan_type.sign == 's') | 386 | if (chan->scan_type.sign == 's') |
diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/staging/iio/gyro/adis16260_core.c index 05797f404bea..f2d43cfcc493 100644 --- a/drivers/staging/iio/gyro/adis16260_core.c +++ b/drivers/staging/iio/gyro/adis16260_core.c | |||
@@ -446,13 +446,17 @@ static int adis16260_read_raw(struct iio_dev *indio_dev, | |||
446 | mutex_lock(&indio_dev->mlock); | 446 | mutex_lock(&indio_dev->mlock); |
447 | addr = adis16260_addresses[chan->address][0]; | 447 | addr = adis16260_addresses[chan->address][0]; |
448 | ret = adis16260_spi_read_reg_16(indio_dev, addr, &val16); | 448 | ret = adis16260_spi_read_reg_16(indio_dev, addr, &val16); |
449 | if (ret) | 449 | if (ret) { |
450 | mutex_unlock(&indio_dev->mlock); | ||
450 | return ret; | 451 | return ret; |
452 | } | ||
451 | 453 | ||
452 | if (val16 & ADIS16260_ERROR_ACTIVE) { | 454 | if (val16 & ADIS16260_ERROR_ACTIVE) { |
453 | ret = adis16260_check_status(indio_dev); | 455 | ret = adis16260_check_status(indio_dev); |
454 | if (ret) | 456 | if (ret) { |
457 | mutex_unlock(&indio_dev->mlock); | ||
455 | return ret; | 458 | return ret; |
459 | } | ||
456 | } | 460 | } |
457 | val16 = val16 & ((1 << chan->scan_type.realbits) - 1); | 461 | val16 = val16 & ((1 << chan->scan_type.realbits) - 1); |
458 | if (chan->scan_type.sign == 's') | 462 | if (chan->scan_type.sign == 's') |
diff --git a/drivers/staging/nvec/TODO b/drivers/staging/nvec/TODO index 77b47f763f22..649d6b70deaa 100644 --- a/drivers/staging/nvec/TODO +++ b/drivers/staging/nvec/TODO | |||
@@ -4,5 +4,7 @@ ToDo list (incomplete, unordered) | |||
4 | - add compile as module support | 4 | - add compile as module support |
5 | - move nvec devices to mfd cells? | 5 | - move nvec devices to mfd cells? |
6 | - adjust to kernel style | 6 | - adjust to kernel style |
7 | 7 | - fix clk usage | |
8 | 8 | should not be using clk_get_sys(), but clk_get(&pdev->dev, conn) | |
9 | where conn is either NULL if the device only has one clock, or | ||
10 | the device specific name if it has multiple clocks. | ||
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c index 6766f468639f..4bb5fffca5b9 100644 --- a/drivers/staging/rtl8192u/r819xU_firmware.c +++ b/drivers/staging/rtl8192u/r819xU_firmware.c | |||
@@ -399,10 +399,7 @@ download_firmware_fail: | |||
399 | 399 | ||
400 | } | 400 | } |
401 | 401 | ||
402 | 402 | MODULE_FIRMWARE("RTL8192U/boot.img"); | |
403 | 403 | MODULE_FIRMWARE("RTL8192U/main.img"); | |
404 | 404 | MODULE_FIRMWARE("RTL8192U/data.img"); | |
405 | |||
406 | |||
407 | |||
408 | 405 | ||
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c index 5ff59f27d101..16c73fbff51f 100644 --- a/drivers/staging/rts_pstor/rtsx.c +++ b/drivers/staging/rts_pstor/rtsx.c | |||
@@ -66,12 +66,6 @@ static int msi_en; | |||
66 | module_param(msi_en, int, S_IRUGO | S_IWUSR); | 66 | module_param(msi_en, int, S_IRUGO | S_IWUSR); |
67 | MODULE_PARM_DESC(msi_en, "enable msi"); | 67 | MODULE_PARM_DESC(msi_en, "enable msi"); |
68 | 68 | ||
69 | /* These are used to make sure the module doesn't unload before all the | ||
70 | * threads have exited. | ||
71 | */ | ||
72 | static atomic_t total_threads = ATOMIC_INIT(0); | ||
73 | static DECLARE_COMPLETION(threads_gone); | ||
74 | |||
75 | static irqreturn_t rtsx_interrupt(int irq, void *dev_id); | 69 | static irqreturn_t rtsx_interrupt(int irq, void *dev_id); |
76 | 70 | ||
77 | /*********************************************************************** | 71 | /*********************************************************************** |
@@ -192,7 +186,7 @@ static int queuecommand_lck(struct scsi_cmnd *srb, | |||
192 | /* enqueue the command and wake up the control thread */ | 186 | /* enqueue the command and wake up the control thread */ |
193 | srb->scsi_done = done; | 187 | srb->scsi_done = done; |
194 | chip->srb = srb; | 188 | chip->srb = srb; |
195 | up(&(dev->sema)); | 189 | complete(&dev->cmnd_ready); |
196 | 190 | ||
197 | return 0; | 191 | return 0; |
198 | } | 192 | } |
@@ -475,7 +469,7 @@ static int rtsx_control_thread(void *__dev) | |||
475 | current->flags |= PF_NOFREEZE; | 469 | current->flags |= PF_NOFREEZE; |
476 | 470 | ||
477 | for (;;) { | 471 | for (;;) { |
478 | if (down_interruptible(&dev->sema)) | 472 | if (wait_for_completion_interruptible(&dev->cmnd_ready)) |
479 | break; | 473 | break; |
480 | 474 | ||
481 | /* lock the device pointers */ | 475 | /* lock the device pointers */ |
@@ -557,8 +551,6 @@ SkipForAbort: | |||
557 | mutex_unlock(&dev->dev_mutex); | 551 | mutex_unlock(&dev->dev_mutex); |
558 | } /* for (;;) */ | 552 | } /* for (;;) */ |
559 | 553 | ||
560 | scsi_host_put(host); | ||
561 | |||
562 | /* notify the exit routine that we're actually exiting now | 554 | /* notify the exit routine that we're actually exiting now |
563 | * | 555 | * |
564 | * complete()/wait_for_completion() is similar to up()/down(), | 556 | * complete()/wait_for_completion() is similar to up()/down(), |
@@ -573,7 +565,7 @@ SkipForAbort: | |||
573 | * This is important in preemption kernels, which transfer the flow | 565 | * This is important in preemption kernels, which transfer the flow |
574 | * of execution immediately upon a complete(). | 566 | * of execution immediately upon a complete(). |
575 | */ | 567 | */ |
576 | complete_and_exit(&threads_gone, 0); | 568 | complete_and_exit(&dev->control_exit, 0); |
577 | } | 569 | } |
578 | 570 | ||
579 | 571 | ||
@@ -581,7 +573,6 @@ static int rtsx_polling_thread(void *__dev) | |||
581 | { | 573 | { |
582 | struct rtsx_dev *dev = (struct rtsx_dev *)__dev; | 574 | struct rtsx_dev *dev = (struct rtsx_dev *)__dev; |
583 | struct rtsx_chip *chip = dev->chip; | 575 | struct rtsx_chip *chip = dev->chip; |
584 | struct Scsi_Host *host = rtsx_to_host(dev); | ||
585 | struct sd_info *sd_card = &(chip->sd_card); | 576 | struct sd_info *sd_card = &(chip->sd_card); |
586 | struct xd_info *xd_card = &(chip->xd_card); | 577 | struct xd_info *xd_card = &(chip->xd_card); |
587 | struct ms_info *ms_card = &(chip->ms_card); | 578 | struct ms_info *ms_card = &(chip->ms_card); |
@@ -621,8 +612,7 @@ static int rtsx_polling_thread(void *__dev) | |||
621 | mutex_unlock(&dev->dev_mutex); | 612 | mutex_unlock(&dev->dev_mutex); |
622 | } | 613 | } |
623 | 614 | ||
624 | scsi_host_put(host); | 615 | complete_and_exit(&dev->polling_exit, 0); |
625 | complete_and_exit(&threads_gone, 0); | ||
626 | } | 616 | } |
627 | 617 | ||
628 | /* | 618 | /* |
@@ -699,29 +689,38 @@ static void rtsx_release_resources(struct rtsx_dev *dev) | |||
699 | { | 689 | { |
700 | printk(KERN_INFO "-- %s\n", __func__); | 690 | printk(KERN_INFO "-- %s\n", __func__); |
701 | 691 | ||
692 | /* Tell the control thread to exit. The SCSI host must | ||
693 | * already have been removed so it won't try to queue | ||
694 | * any more commands. | ||
695 | */ | ||
696 | printk(KERN_INFO "-- sending exit command to thread\n"); | ||
697 | complete(&dev->cmnd_ready); | ||
698 | if (dev->ctl_thread) | ||
699 | wait_for_completion(&dev->control_exit); | ||
700 | if (dev->polling_thread) | ||
701 | wait_for_completion(&dev->polling_exit); | ||
702 | |||
703 | wait_timeout(200); | ||
704 | |||
702 | if (dev->rtsx_resv_buf) { | 705 | if (dev->rtsx_resv_buf) { |
703 | dma_free_coherent(&(dev->pci->dev), HOST_CMDS_BUF_LEN, | 706 | dma_free_coherent(&(dev->pci->dev), RTSX_RESV_BUF_LEN, |
704 | dev->rtsx_resv_buf, dev->rtsx_resv_buf_addr); | 707 | dev->rtsx_resv_buf, dev->rtsx_resv_buf_addr); |
705 | dev->chip->host_cmds_ptr = NULL; | 708 | dev->chip->host_cmds_ptr = NULL; |
706 | dev->chip->host_sg_tbl_ptr = NULL; | 709 | dev->chip->host_sg_tbl_ptr = NULL; |
707 | } | 710 | } |
708 | 711 | ||
709 | pci_disable_device(dev->pci); | 712 | if (dev->irq > 0) |
710 | pci_release_regions(dev->pci); | ||
711 | |||
712 | if (dev->irq > 0) { | ||
713 | free_irq(dev->irq, (void *)dev); | 713 | free_irq(dev->irq, (void *)dev); |
714 | } | 714 | if (dev->chip->msi_en) |
715 | if (dev->chip->msi_en) { | ||
716 | pci_disable_msi(dev->pci); | 715 | pci_disable_msi(dev->pci); |
717 | } | 716 | if (dev->remap_addr) |
717 | iounmap(dev->remap_addr); | ||
718 | 718 | ||
719 | /* Tell the control thread to exit. The SCSI host must | 719 | pci_disable_device(dev->pci); |
720 | * already have been removed so it won't try to queue | 720 | pci_release_regions(dev->pci); |
721 | * any more commands. | 721 | |
722 | */ | 722 | rtsx_release_chip(dev->chip); |
723 | printk(KERN_INFO "-- sending exit command to thread\n"); | 723 | kfree(dev->chip); |
724 | up(&dev->sema); | ||
725 | } | 724 | } |
726 | 725 | ||
727 | /* First stage of disconnect processing: stop all commands and remove | 726 | /* First stage of disconnect processing: stop all commands and remove |
@@ -739,6 +738,7 @@ static void quiesce_and_remove_host(struct rtsx_dev *dev) | |||
739 | scsi_unlock(host); | 738 | scsi_unlock(host); |
740 | mutex_unlock(&dev->dev_mutex); | 739 | mutex_unlock(&dev->dev_mutex); |
741 | wake_up(&dev->delay_wait); | 740 | wake_up(&dev->delay_wait); |
741 | wait_for_completion(&dev->scanning_done); | ||
742 | 742 | ||
743 | /* Wait some time to let other threads exist */ | 743 | /* Wait some time to let other threads exist */ |
744 | wait_timeout(100); | 744 | wait_timeout(100); |
@@ -793,8 +793,7 @@ static int rtsx_scan_thread(void *__dev) | |||
793 | /* Should we unbind if no devices were detected? */ | 793 | /* Should we unbind if no devices were detected? */ |
794 | } | 794 | } |
795 | 795 | ||
796 | scsi_host_put(rtsx_to_host(dev)); | 796 | complete_and_exit(&dev->scanning_done, 0); |
797 | complete_and_exit(&threads_gone, 0); | ||
798 | } | 797 | } |
799 | 798 | ||
800 | static void rtsx_init_options(struct rtsx_chip *chip) | 799 | static void rtsx_init_options(struct rtsx_chip *chip) |
@@ -941,8 +940,11 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id | |||
941 | 940 | ||
942 | spin_lock_init(&dev->reg_lock); | 941 | spin_lock_init(&dev->reg_lock); |
943 | mutex_init(&(dev->dev_mutex)); | 942 | mutex_init(&(dev->dev_mutex)); |
944 | sema_init(&(dev->sema), 0); | 943 | init_completion(&dev->cmnd_ready); |
944 | init_completion(&dev->control_exit); | ||
945 | init_completion(&dev->polling_exit); | ||
945 | init_completion(&(dev->notify)); | 946 | init_completion(&(dev->notify)); |
947 | init_completion(&dev->scanning_done); | ||
946 | init_waitqueue_head(&dev->delay_wait); | 948 | init_waitqueue_head(&dev->delay_wait); |
947 | 949 | ||
948 | dev->pci = pci; | 950 | dev->pci = pci; |
@@ -992,28 +994,22 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id | |||
992 | pci_set_master(pci); | 994 | pci_set_master(pci); |
993 | synchronize_irq(dev->irq); | 995 | synchronize_irq(dev->irq); |
994 | 996 | ||
995 | err = scsi_add_host(host, &pci->dev); | ||
996 | if (err) { | ||
997 | printk(KERN_ERR "Unable to add the scsi host\n"); | ||
998 | goto errout; | ||
999 | } | ||
1000 | |||
1001 | rtsx_init_chip(dev->chip); | 997 | rtsx_init_chip(dev->chip); |
1002 | 998 | ||
1003 | /* Start up our control thread */ | 999 | /* Start up our control thread */ |
1004 | th = kthread_create(rtsx_control_thread, dev, CR_DRIVER_NAME); | 1000 | th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME); |
1005 | if (IS_ERR(th)) { | 1001 | if (IS_ERR(th)) { |
1006 | printk(KERN_ERR "Unable to start control thread\n"); | 1002 | printk(KERN_ERR "Unable to start control thread\n"); |
1007 | err = PTR_ERR(th); | 1003 | err = PTR_ERR(th); |
1008 | goto errout; | 1004 | goto errout; |
1009 | } | 1005 | } |
1006 | dev->ctl_thread = th; | ||
1010 | 1007 | ||
1011 | /* Take a reference to the host for the control thread and | 1008 | err = scsi_add_host(host, &pci->dev); |
1012 | * count it among all the threads we have launched. Then | 1009 | if (err) { |
1013 | * start it up. */ | 1010 | printk(KERN_ERR "Unable to add the scsi host\n"); |
1014 | scsi_host_get(rtsx_to_host(dev)); | 1011 | goto errout; |
1015 | atomic_inc(&total_threads); | 1012 | } |
1016 | wake_up_process(th); | ||
1017 | 1013 | ||
1018 | /* Start up the thread for delayed SCSI-device scanning */ | 1014 | /* Start up the thread for delayed SCSI-device scanning */ |
1019 | th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan"); | 1015 | th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan"); |
@@ -1024,28 +1020,17 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id | |||
1024 | goto errout; | 1020 | goto errout; |
1025 | } | 1021 | } |
1026 | 1022 | ||
1027 | /* Take a reference to the host for the scanning thread and | ||
1028 | * count it among all the threads we have launched. Then | ||
1029 | * start it up. */ | ||
1030 | scsi_host_get(rtsx_to_host(dev)); | ||
1031 | atomic_inc(&total_threads); | ||
1032 | wake_up_process(th); | 1023 | wake_up_process(th); |
1033 | 1024 | ||
1034 | /* Start up the thread for polling thread */ | 1025 | /* Start up the thread for polling thread */ |
1035 | th = kthread_create(rtsx_polling_thread, dev, "rtsx-polling"); | 1026 | th = kthread_run(rtsx_polling_thread, dev, "rtsx-polling"); |
1036 | if (IS_ERR(th)) { | 1027 | if (IS_ERR(th)) { |
1037 | printk(KERN_ERR "Unable to start the device-polling thread\n"); | 1028 | printk(KERN_ERR "Unable to start the device-polling thread\n"); |
1038 | quiesce_and_remove_host(dev); | 1029 | quiesce_and_remove_host(dev); |
1039 | err = PTR_ERR(th); | 1030 | err = PTR_ERR(th); |
1040 | goto errout; | 1031 | goto errout; |
1041 | } | 1032 | } |
1042 | 1033 | dev->polling_thread = th; | |
1043 | /* Take a reference to the host for the polling thread and | ||
1044 | * count it among all the threads we have launched. Then | ||
1045 | * start it up. */ | ||
1046 | scsi_host_get(rtsx_to_host(dev)); | ||
1047 | atomic_inc(&total_threads); | ||
1048 | wake_up_process(th); | ||
1049 | 1034 | ||
1050 | pci_set_drvdata(pci, dev); | 1035 | pci_set_drvdata(pci, dev); |
1051 | 1036 | ||
@@ -1108,16 +1093,6 @@ static void __exit rtsx_exit(void) | |||
1108 | 1093 | ||
1109 | pci_unregister_driver(&driver); | 1094 | pci_unregister_driver(&driver); |
1110 | 1095 | ||
1111 | /* Don't return until all of our control and scanning threads | ||
1112 | * have exited. Since each thread signals threads_gone as its | ||
1113 | * last act, we have to call wait_for_completion the right number | ||
1114 | * of times. | ||
1115 | */ | ||
1116 | while (atomic_read(&total_threads) > 0) { | ||
1117 | wait_for_completion(&threads_gone); | ||
1118 | atomic_dec(&total_threads); | ||
1119 | } | ||
1120 | |||
1121 | printk(KERN_INFO "%s module exit\n", CR_DRIVER_NAME); | 1096 | printk(KERN_INFO "%s module exit\n", CR_DRIVER_NAME); |
1122 | } | 1097 | } |
1123 | 1098 | ||
diff --git a/drivers/staging/rts_pstor/rtsx.h b/drivers/staging/rts_pstor/rtsx.h index 247615ba1d2a..86e47c2e3e3c 100644 --- a/drivers/staging/rts_pstor/rtsx.h +++ b/drivers/staging/rts_pstor/rtsx.h | |||
@@ -112,9 +112,16 @@ struct rtsx_dev { | |||
112 | /* locks */ | 112 | /* locks */ |
113 | spinlock_t reg_lock; | 113 | spinlock_t reg_lock; |
114 | 114 | ||
115 | struct task_struct *ctl_thread; /* the control thread */ | ||
116 | struct task_struct *polling_thread; /* the polling thread */ | ||
117 | |||
115 | /* mutual exclusion and synchronization structures */ | 118 | /* mutual exclusion and synchronization structures */ |
116 | struct semaphore sema; /* to sleep thread on */ | 119 | struct completion cmnd_ready; /* to sleep thread on */ |
120 | struct completion control_exit; /* control thread exit */ | ||
121 | struct completion polling_exit; /* polling thread exit */ | ||
117 | struct completion notify; /* thread begin/end */ | 122 | struct completion notify; /* thread begin/end */ |
123 | struct completion scanning_done; /* wait for scan thread */ | ||
124 | |||
118 | wait_queue_head_t delay_wait; /* wait during scan, reset */ | 125 | wait_queue_head_t delay_wait; /* wait during scan, reset */ |
119 | struct mutex dev_mutex; | 126 | struct mutex dev_mutex; |
120 | 127 | ||
diff --git a/drivers/staging/solo6x10/core.c b/drivers/staging/solo6x10/core.c index 76779949f141..f974f6412ad7 100644 --- a/drivers/staging/solo6x10/core.c +++ b/drivers/staging/solo6x10/core.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/pci.h> | 22 | #include <linux/pci.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/slab.h> | ||
24 | #include <linux/videodev2.h> | 25 | #include <linux/videodev2.h> |
25 | #include "solo6x10.h" | 26 | #include "solo6x10.h" |
26 | #include "tw28.h" | 27 | #include "tw28.h" |
diff --git a/drivers/staging/solo6x10/enc.c b/drivers/staging/solo6x10/enc.c index 285f7f350062..de502599bb19 100644 --- a/drivers/staging/solo6x10/enc.c +++ b/drivers/staging/solo6x10/enc.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/slab.h> | ||
21 | #include "solo6x10.h" | 22 | #include "solo6x10.h" |
22 | #include "osd-font.h" | 23 | #include "osd-font.h" |
23 | 24 | ||
diff --git a/drivers/staging/solo6x10/g723.c b/drivers/staging/solo6x10/g723.c index bd8eb92c94b1..59274bfca95b 100644 --- a/drivers/staging/solo6x10/g723.c +++ b/drivers/staging/solo6x10/g723.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/mempool.h> | 21 | #include <linux/mempool.h> |
22 | #include <linux/poll.h> | 22 | #include <linux/poll.h> |
23 | #include <linux/kthread.h> | 23 | #include <linux/kthread.h> |
24 | #include <linux/slab.h> | ||
24 | #include <linux/freezer.h> | 25 | #include <linux/freezer.h> |
25 | #include <sound/core.h> | 26 | #include <sound/core.h> |
26 | #include <sound/initval.h> | 27 | #include <sound/initval.h> |
diff --git a/drivers/staging/solo6x10/p2m.c b/drivers/staging/solo6x10/p2m.c index 5717eabb04a4..56210f0fc5ec 100644 --- a/drivers/staging/solo6x10/p2m.c +++ b/drivers/staging/solo6x10/p2m.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/slab.h> | ||
21 | #include <linux/scatterlist.h> | 22 | #include <linux/scatterlist.h> |
22 | #include "solo6x10.h" | 23 | #include "solo6x10.h" |
23 | 24 | ||
diff --git a/drivers/staging/solo6x10/solo6x10.h b/drivers/staging/solo6x10/solo6x10.h index 17c06bd6cc91..abee7213202f 100644 --- a/drivers/staging/solo6x10/solo6x10.h +++ b/drivers/staging/solo6x10/solo6x10.h | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
29 | #include <linux/wait.h> | 29 | #include <linux/wait.h> |
30 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
31 | #include <linux/slab.h> | ||
31 | #include <asm/io.h> | 32 | #include <asm/io.h> |
32 | #include <linux/atomic.h> | 33 | #include <linux/atomic.h> |
33 | #include <linux/videodev2.h> | 34 | #include <linux/videodev2.h> |
diff --git a/drivers/staging/speakup/devsynth.c b/drivers/staging/speakup/devsynth.c index 39dc586fc8bb..940769ef883f 100644 --- a/drivers/staging/speakup/devsynth.c +++ b/drivers/staging/speakup/devsynth.c | |||
@@ -18,13 +18,14 @@ static ssize_t speakup_file_write(struct file *fp, const char *buffer, | |||
18 | { | 18 | { |
19 | size_t count = nbytes; | 19 | size_t count = nbytes; |
20 | const char *ptr = buffer; | 20 | const char *ptr = buffer; |
21 | int bytes; | 21 | size_t bytes; |
22 | unsigned long flags; | 22 | unsigned long flags; |
23 | u_char buf[256]; | 23 | u_char buf[256]; |
24 | |||
24 | if (synth == NULL) | 25 | if (synth == NULL) |
25 | return -ENODEV; | 26 | return -ENODEV; |
26 | while (count > 0) { | 27 | while (count > 0) { |
27 | bytes = min_t(size_t, count, sizeof(buf)); | 28 | bytes = min(count, sizeof(buf)); |
28 | if (copy_from_user(buf, ptr, bytes)) | 29 | if (copy_from_user(buf, ptr, bytes)) |
29 | return -EFAULT; | 30 | return -EFAULT; |
30 | count -= bytes; | 31 | count -= bytes; |
diff --git a/drivers/staging/zcache/Makefile b/drivers/staging/zcache/Makefile index f5ec64f94470..60daa272c204 100644 --- a/drivers/staging/zcache/Makefile +++ b/drivers/staging/zcache/Makefile | |||
@@ -1,3 +1,3 @@ | |||
1 | zcache-y := tmem.o | 1 | zcache-y := zcache-main.o tmem.o |
2 | 2 | ||
3 | obj-$(CONFIG_ZCACHE) += zcache.o | 3 | obj-$(CONFIG_ZCACHE) += zcache.o |
diff --git a/drivers/staging/zcache/zcache.c b/drivers/staging/zcache/zcache-main.c index 65a81a0d7c49..855a5bb56a47 100644 --- a/drivers/staging/zcache/zcache.c +++ b/drivers/staging/zcache/zcache-main.c | |||
@@ -19,6 +19,7 @@ | |||
19 | * http://marc.info/?l=linux-mm&m=127811271605009 | 19 | * http://marc.info/?l=linux-mm&m=127811271605009 |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/module.h> | ||
22 | #include <linux/cpu.h> | 23 | #include <linux/cpu.h> |
23 | #include <linux/highmem.h> | 24 | #include <linux/highmem.h> |
24 | #include <linux/list.h> | 25 | #include <linux/list.h> |
@@ -27,6 +28,7 @@ | |||
27 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
28 | #include <linux/types.h> | 29 | #include <linux/types.h> |
29 | #include <linux/atomic.h> | 30 | #include <linux/atomic.h> |
31 | #include <linux/math64.h> | ||
30 | #include "tmem.h" | 32 | #include "tmem.h" |
31 | 33 | ||
32 | #include "../zram/xvmalloc.h" /* if built in drivers/staging */ | 34 | #include "../zram/xvmalloc.h" /* if built in drivers/staging */ |
@@ -53,6 +55,9 @@ | |||
53 | 55 | ||
54 | #define MAX_CLIENTS 16 | 56 | #define MAX_CLIENTS 16 |
55 | #define LOCAL_CLIENT ((uint16_t)-1) | 57 | #define LOCAL_CLIENT ((uint16_t)-1) |
58 | |||
59 | MODULE_LICENSE("GPL"); | ||
60 | |||
56 | struct zcache_client { | 61 | struct zcache_client { |
57 | struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT]; | 62 | struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT]; |
58 | struct xv_pool *xvpool; | 63 | struct xv_pool *xvpool; |
@@ -1158,6 +1163,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph, | |||
1158 | uint16_t client_id = get_client_id_from_client(cli); | 1163 | uint16_t client_id = get_client_id_from_client(cli); |
1159 | unsigned long zv_mean_zsize; | 1164 | unsigned long zv_mean_zsize; |
1160 | unsigned long curr_pers_pampd_count; | 1165 | unsigned long curr_pers_pampd_count; |
1166 | u64 total_zsize; | ||
1161 | 1167 | ||
1162 | if (eph) { | 1168 | if (eph) { |
1163 | ret = zcache_compress(page, &cdata, &clen); | 1169 | ret = zcache_compress(page, &cdata, &clen); |
@@ -1190,8 +1196,9 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph, | |||
1190 | } | 1196 | } |
1191 | /* reject if mean compression is too poor */ | 1197 | /* reject if mean compression is too poor */ |
1192 | if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) { | 1198 | if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) { |
1193 | zv_mean_zsize = xv_get_total_size_bytes(cli->xvpool) / | 1199 | total_zsize = xv_get_total_size_bytes(cli->xvpool); |
1194 | curr_pers_pampd_count; | 1200 | zv_mean_zsize = div_u64(total_zsize, |
1201 | curr_pers_pampd_count); | ||
1195 | if (zv_mean_zsize > zv_max_mean_zsize) { | 1202 | if (zv_mean_zsize > zv_max_mean_zsize) { |
1196 | zcache_mean_compress_poor++; | 1203 | zcache_mean_compress_poor++; |
1197 | goto out; | 1204 | goto out; |
@@ -1929,9 +1936,9 @@ __setup("nofrontswap", no_frontswap); | |||
1929 | 1936 | ||
1930 | static int __init zcache_init(void) | 1937 | static int __init zcache_init(void) |
1931 | { | 1938 | { |
1932 | #ifdef CONFIG_SYSFS | ||
1933 | int ret = 0; | 1939 | int ret = 0; |
1934 | 1940 | ||
1941 | #ifdef CONFIG_SYSFS | ||
1935 | ret = sysfs_create_group(mm_kobj, &zcache_attr_group); | 1942 | ret = sysfs_create_group(mm_kobj, &zcache_attr_group); |
1936 | if (ret) { | 1943 | if (ret) { |
1937 | pr_err("zcache: can't create sysfs\n"); | 1944 | pr_err("zcache: can't create sysfs\n"); |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index c24fb10de60b..6a4ea29c2f36 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -2243,7 +2243,6 @@ static int iscsit_handle_snack( | |||
2243 | case 0: | 2243 | case 0: |
2244 | return iscsit_handle_recovery_datain_or_r2t(conn, buf, | 2244 | return iscsit_handle_recovery_datain_or_r2t(conn, buf, |
2245 | hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); | 2245 | hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); |
2246 | return 0; | ||
2247 | case ISCSI_FLAG_SNACK_TYPE_STATUS: | 2246 | case ISCSI_FLAG_SNACK_TYPE_STATUS: |
2248 | return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt, | 2247 | return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt, |
2249 | hdr->begrun, hdr->runlength); | 2248 | hdr->begrun, hdr->runlength); |
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index f095e65b1ccf..f1643dbf6a92 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c | |||
@@ -268,7 +268,7 @@ struct se_tpg_np *lio_target_call_addnptotpg( | |||
268 | ISCSI_TCP); | 268 | ISCSI_TCP); |
269 | if (IS_ERR(tpg_np)) { | 269 | if (IS_ERR(tpg_np)) { |
270 | iscsit_put_tpg(tpg); | 270 | iscsit_put_tpg(tpg); |
271 | return ERR_PTR(PTR_ERR(tpg_np)); | 271 | return ERR_CAST(tpg_np); |
272 | } | 272 | } |
273 | pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n"); | 273 | pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n"); |
274 | 274 | ||
@@ -1285,7 +1285,7 @@ struct se_wwn *lio_target_call_coreaddtiqn( | |||
1285 | 1285 | ||
1286 | tiqn = iscsit_add_tiqn((unsigned char *)name); | 1286 | tiqn = iscsit_add_tiqn((unsigned char *)name); |
1287 | if (IS_ERR(tiqn)) | 1287 | if (IS_ERR(tiqn)) |
1288 | return ERR_PTR(PTR_ERR(tiqn)); | 1288 | return ERR_CAST(tiqn); |
1289 | /* | 1289 | /* |
1290 | * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group. | 1290 | * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group. |
1291 | */ | 1291 | */ |
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index 980650792cf6..c4c68da3e500 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c | |||
@@ -834,7 +834,7 @@ static int iscsit_attach_ooo_cmdsn( | |||
834 | */ | 834 | */ |
835 | list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, | 835 | list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, |
836 | ooo_list) { | 836 | ooo_list) { |
837 | while (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) | 837 | if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) |
838 | continue; | 838 | continue; |
839 | 839 | ||
840 | list_add(&ooo_cmdsn->ooo_list, | 840 | list_add(&ooo_cmdsn->ooo_list, |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index bcaf82f47037..daad362a93ce 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
@@ -1013,19 +1013,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) | |||
1013 | ISCSI_LOGIN_STATUS_TARGET_ERROR); | 1013 | ISCSI_LOGIN_STATUS_TARGET_ERROR); |
1014 | goto new_sess_out; | 1014 | goto new_sess_out; |
1015 | } | 1015 | } |
1016 | #if 0 | 1016 | snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", |
1017 | if (!iscsi_ntop6((const unsigned char *) | 1017 | &sock_in6.sin6_addr.in6_u); |
1018 | &sock_in6.sin6_addr.in6_u, | 1018 | conn->login_port = ntohs(sock_in6.sin6_port); |
1019 | (char *)&conn->ipv6_login_ip[0], | ||
1020 | IPV6_ADDRESS_SPACE)) { | ||
1021 | pr_err("iscsi_ntop6() failed\n"); | ||
1022 | iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, | ||
1023 | ISCSI_LOGIN_STATUS_TARGET_ERROR); | ||
1024 | goto new_sess_out; | ||
1025 | } | ||
1026 | #else | ||
1027 | pr_debug("Skipping iscsi_ntop6()\n"); | ||
1028 | #endif | ||
1029 | } else { | 1019 | } else { |
1030 | memset(&sock_in, 0, sizeof(struct sockaddr_in)); | 1020 | memset(&sock_in, 0, sizeof(struct sockaddr_in)); |
1031 | 1021 | ||
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 252e246cf51e..497b2e718a76 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c | |||
@@ -545,13 +545,13 @@ int iscsi_copy_param_list( | |||
545 | struct iscsi_param_list *src_param_list, | 545 | struct iscsi_param_list *src_param_list, |
546 | int leading) | 546 | int leading) |
547 | { | 547 | { |
548 | struct iscsi_param *new_param = NULL, *param = NULL; | 548 | struct iscsi_param *param = NULL; |
549 | struct iscsi_param *new_param = NULL; | ||
549 | struct iscsi_param_list *param_list = NULL; | 550 | struct iscsi_param_list *param_list = NULL; |
550 | 551 | ||
551 | param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); | 552 | param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); |
552 | if (!param_list) { | 553 | if (!param_list) { |
553 | pr_err("Unable to allocate memory for" | 554 | pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); |
554 | " struct iscsi_param_list.\n"); | ||
555 | goto err_out; | 555 | goto err_out; |
556 | } | 556 | } |
557 | INIT_LIST_HEAD(¶m_list->param_list); | 557 | INIT_LIST_HEAD(¶m_list->param_list); |
@@ -567,8 +567,17 @@ int iscsi_copy_param_list( | |||
567 | 567 | ||
568 | new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL); | 568 | new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL); |
569 | if (!new_param) { | 569 | if (!new_param) { |
570 | pr_err("Unable to allocate memory for" | 570 | pr_err("Unable to allocate memory for struct iscsi_param.\n"); |
571 | " struct iscsi_param.\n"); | 571 | goto err_out; |
572 | } | ||
573 | |||
574 | new_param->name = kstrdup(param->name, GFP_KERNEL); | ||
575 | new_param->value = kstrdup(param->value, GFP_KERNEL); | ||
576 | if (!new_param->value || !new_param->name) { | ||
577 | kfree(new_param->value); | ||
578 | kfree(new_param->name); | ||
579 | kfree(new_param); | ||
580 | pr_err("Unable to allocate memory for parameter name/value.\n"); | ||
572 | goto err_out; | 581 | goto err_out; |
573 | } | 582 | } |
574 | 583 | ||
@@ -580,32 +589,12 @@ int iscsi_copy_param_list( | |||
580 | new_param->use = param->use; | 589 | new_param->use = param->use; |
581 | new_param->type_range = param->type_range; | 590 | new_param->type_range = param->type_range; |
582 | 591 | ||
583 | new_param->name = kzalloc(strlen(param->name) + 1, GFP_KERNEL); | ||
584 | if (!new_param->name) { | ||
585 | pr_err("Unable to allocate memory for" | ||
586 | " parameter name.\n"); | ||
587 | goto err_out; | ||
588 | } | ||
589 | |||
590 | new_param->value = kzalloc(strlen(param->value) + 1, | ||
591 | GFP_KERNEL); | ||
592 | if (!new_param->value) { | ||
593 | pr_err("Unable to allocate memory for" | ||
594 | " parameter value.\n"); | ||
595 | goto err_out; | ||
596 | } | ||
597 | |||
598 | memcpy(new_param->name, param->name, strlen(param->name)); | ||
599 | new_param->name[strlen(param->name)] = '\0'; | ||
600 | memcpy(new_param->value, param->value, strlen(param->value)); | ||
601 | new_param->value[strlen(param->value)] = '\0'; | ||
602 | |||
603 | list_add_tail(&new_param->p_list, ¶m_list->param_list); | 592 | list_add_tail(&new_param->p_list, ¶m_list->param_list); |
604 | } | 593 | } |
605 | 594 | ||
606 | if (!list_empty(¶m_list->param_list)) | 595 | if (!list_empty(¶m_list->param_list)) { |
607 | *dst_param_list = param_list; | 596 | *dst_param_list = param_list; |
608 | else { | 597 | } else { |
609 | pr_err("No parameters allocated.\n"); | 598 | pr_err("No parameters allocated.\n"); |
610 | goto err_out; | 599 | goto err_out; |
611 | } | 600 | } |
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index a1acb0167902..a0d23bc0fc98 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c | |||
@@ -243,7 +243,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( | |||
243 | if (!cmd->tmr_req) { | 243 | if (!cmd->tmr_req) { |
244 | pr_err("Unable to allocate memory for" | 244 | pr_err("Unable to allocate memory for" |
245 | " Task Management command!\n"); | 245 | " Task Management command!\n"); |
246 | return NULL; | 246 | goto out; |
247 | } | 247 | } |
248 | /* | 248 | /* |
249 | * TASK_REASSIGN for ERL=2 / connection stays inside of | 249 | * TASK_REASSIGN for ERL=2 / connection stays inside of |
@@ -298,8 +298,6 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( | |||
298 | return cmd; | 298 | return cmd; |
299 | out: | 299 | out: |
300 | iscsit_release_cmd(cmd); | 300 | iscsit_release_cmd(cmd); |
301 | if (se_cmd) | ||
302 | transport_free_se_cmd(se_cmd); | ||
303 | return NULL; | 301 | return NULL; |
304 | } | 302 | } |
305 | 303 | ||
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 8ae09a1bdf74..89ae923c5da6 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c | |||
@@ -67,6 +67,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) | |||
67 | { | 67 | { |
68 | struct se_lun *lun = cmd->se_lun; | 68 | struct se_lun *lun = cmd->se_lun; |
69 | struct se_device *dev = cmd->se_dev; | 69 | struct se_device *dev = cmd->se_dev; |
70 | struct se_portal_group *tpg = lun->lun_sep->sep_tpg; | ||
70 | unsigned char *buf; | 71 | unsigned char *buf; |
71 | 72 | ||
72 | /* | 73 | /* |
@@ -81,9 +82,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd) | |||
81 | 82 | ||
82 | buf = transport_kmap_first_data_page(cmd); | 83 | buf = transport_kmap_first_data_page(cmd); |
83 | 84 | ||
84 | buf[0] = dev->transport->get_device_type(dev); | 85 | if (dev == tpg->tpg_virt_lun0.lun_se_dev) { |
85 | if (buf[0] == TYPE_TAPE) | 86 | buf[0] = 0x3f; /* Not connected */ |
86 | buf[1] = 0x80; | 87 | } else { |
88 | buf[0] = dev->transport->get_device_type(dev); | ||
89 | if (buf[0] == TYPE_TAPE) | ||
90 | buf[1] = 0x80; | ||
91 | } | ||
87 | buf[2] = dev->transport->get_device_rev(dev); | 92 | buf[2] = dev->transport->get_device_rev(dev); |
88 | 93 | ||
89 | /* | 94 | /* |
@@ -915,8 +920,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) | |||
915 | length += target_modesense_control(dev, &buf[offset+length]); | 920 | length += target_modesense_control(dev, &buf[offset+length]); |
916 | break; | 921 | break; |
917 | default: | 922 | default: |
918 | pr_err("Got Unknown Mode Page: 0x%02x\n", | 923 | pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", |
919 | cdb[2] & 0x3f); | 924 | cdb[2] & 0x3f, cdb[3]); |
920 | return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; | 925 | return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; |
921 | } | 926 | } |
922 | offset += length; | 927 | offset += length; |
@@ -1072,8 +1077,6 @@ target_emulate_unmap(struct se_task *task) | |||
1072 | size -= 16; | 1077 | size -= 16; |
1073 | } | 1078 | } |
1074 | 1079 | ||
1075 | task->task_scsi_status = GOOD; | ||
1076 | transport_complete_task(task, 1); | ||
1077 | err: | 1080 | err: |
1078 | transport_kunmap_first_data_page(cmd); | 1081 | transport_kunmap_first_data_page(cmd); |
1079 | 1082 | ||
@@ -1085,24 +1088,17 @@ err: | |||
1085 | * Note this is not used for TCM/pSCSI passthrough | 1088 | * Note this is not used for TCM/pSCSI passthrough |
1086 | */ | 1089 | */ |
1087 | static int | 1090 | static int |
1088 | target_emulate_write_same(struct se_task *task, int write_same32) | 1091 | target_emulate_write_same(struct se_task *task, u32 num_blocks) |
1089 | { | 1092 | { |
1090 | struct se_cmd *cmd = task->task_se_cmd; | 1093 | struct se_cmd *cmd = task->task_se_cmd; |
1091 | struct se_device *dev = cmd->se_dev; | 1094 | struct se_device *dev = cmd->se_dev; |
1092 | sector_t range; | 1095 | sector_t range; |
1093 | sector_t lba = cmd->t_task_lba; | 1096 | sector_t lba = cmd->t_task_lba; |
1094 | unsigned int num_blocks; | ||
1095 | int ret; | 1097 | int ret; |
1096 | /* | 1098 | /* |
1097 | * Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict | 1099 | * Use the explicit range when non zero is supplied, otherwise calculate |
1098 | * range when non zero is supplied, otherwise calculate the remaining | 1100 | * the remaining range based on ->get_blocks() - starting LBA. |
1099 | * range based on ->get_blocks() - starting LBA. | ||
1100 | */ | 1101 | */ |
1101 | if (write_same32) | ||
1102 | num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); | ||
1103 | else | ||
1104 | num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); | ||
1105 | |||
1106 | if (num_blocks != 0) | 1102 | if (num_blocks != 0) |
1107 | range = num_blocks; | 1103 | range = num_blocks; |
1108 | else | 1104 | else |
@@ -1117,8 +1113,6 @@ target_emulate_write_same(struct se_task *task, int write_same32) | |||
1117 | return ret; | 1113 | return ret; |
1118 | } | 1114 | } |
1119 | 1115 | ||
1120 | task->task_scsi_status = GOOD; | ||
1121 | transport_complete_task(task, 1); | ||
1122 | return 0; | 1116 | return 0; |
1123 | } | 1117 | } |
1124 | 1118 | ||
@@ -1165,13 +1159,23 @@ transport_emulate_control_cdb(struct se_task *task) | |||
1165 | } | 1159 | } |
1166 | ret = target_emulate_unmap(task); | 1160 | ret = target_emulate_unmap(task); |
1167 | break; | 1161 | break; |
1162 | case WRITE_SAME: | ||
1163 | if (!dev->transport->do_discard) { | ||
1164 | pr_err("WRITE_SAME emulation not supported" | ||
1165 | " for: %s\n", dev->transport->name); | ||
1166 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | ||
1167 | } | ||
1168 | ret = target_emulate_write_same(task, | ||
1169 | get_unaligned_be16(&cmd->t_task_cdb[7])); | ||
1170 | break; | ||
1168 | case WRITE_SAME_16: | 1171 | case WRITE_SAME_16: |
1169 | if (!dev->transport->do_discard) { | 1172 | if (!dev->transport->do_discard) { |
1170 | pr_err("WRITE_SAME_16 emulation not supported" | 1173 | pr_err("WRITE_SAME_16 emulation not supported" |
1171 | " for: %s\n", dev->transport->name); | 1174 | " for: %s\n", dev->transport->name); |
1172 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1175 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1173 | } | 1176 | } |
1174 | ret = target_emulate_write_same(task, 0); | 1177 | ret = target_emulate_write_same(task, |
1178 | get_unaligned_be32(&cmd->t_task_cdb[10])); | ||
1175 | break; | 1179 | break; |
1176 | case VARIABLE_LENGTH_CMD: | 1180 | case VARIABLE_LENGTH_CMD: |
1177 | service_action = | 1181 | service_action = |
@@ -1184,7 +1188,8 @@ transport_emulate_control_cdb(struct se_task *task) | |||
1184 | dev->transport->name); | 1188 | dev->transport->name); |
1185 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1189 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1186 | } | 1190 | } |
1187 | ret = target_emulate_write_same(task, 1); | 1191 | ret = target_emulate_write_same(task, |
1192 | get_unaligned_be32(&cmd->t_task_cdb[28])); | ||
1188 | break; | 1193 | break; |
1189 | default: | 1194 | default: |
1190 | pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" | 1195 | pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" |
@@ -1219,8 +1224,14 @@ transport_emulate_control_cdb(struct se_task *task) | |||
1219 | 1224 | ||
1220 | if (ret < 0) | 1225 | if (ret < 0) |
1221 | return ret; | 1226 | return ret; |
1222 | task->task_scsi_status = GOOD; | 1227 | /* |
1223 | transport_complete_task(task, 1); | 1228 | * Handle the successful completion here unless a caller |
1229 | * has explictly requested an asychronous completion. | ||
1230 | */ | ||
1231 | if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { | ||
1232 | task->task_scsi_status = GOOD; | ||
1233 | transport_complete_task(task, 1); | ||
1234 | } | ||
1224 | 1235 | ||
1225 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | 1236 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; |
1226 | } | 1237 | } |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index b38b6c993e65..ca6e4a4df134 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) | |||
472 | struct se_dev_entry *deve; | 472 | struct se_dev_entry *deve; |
473 | u32 i; | 473 | u32 i; |
474 | 474 | ||
475 | spin_lock_bh(&tpg->acl_node_lock); | 475 | spin_lock_irq(&tpg->acl_node_lock); |
476 | list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { | 476 | list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { |
477 | spin_unlock_bh(&tpg->acl_node_lock); | 477 | spin_unlock_irq(&tpg->acl_node_lock); |
478 | 478 | ||
479 | spin_lock_irq(&nacl->device_list_lock); | 479 | spin_lock_irq(&nacl->device_list_lock); |
480 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 480 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
@@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) | |||
491 | } | 491 | } |
492 | spin_unlock_irq(&nacl->device_list_lock); | 492 | spin_unlock_irq(&nacl->device_list_lock); |
493 | 493 | ||
494 | spin_lock_bh(&tpg->acl_node_lock); | 494 | spin_lock_irq(&tpg->acl_node_lock); |
495 | } | 495 | } |
496 | spin_unlock_bh(&tpg->acl_node_lock); | 496 | spin_unlock_irq(&tpg->acl_node_lock); |
497 | } | 497 | } |
498 | 498 | ||
499 | static struct se_port *core_alloc_port(struct se_device *dev) | 499 | static struct se_port *core_alloc_port(struct se_device *dev) |
@@ -839,6 +839,24 @@ int se_dev_check_shutdown(struct se_device *dev) | |||
839 | return ret; | 839 | return ret; |
840 | } | 840 | } |
841 | 841 | ||
842 | u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) | ||
843 | { | ||
844 | u32 tmp, aligned_max_sectors; | ||
845 | /* | ||
846 | * Limit max_sectors to a PAGE_SIZE aligned value for modern | ||
847 | * transport_allocate_data_tasks() operation. | ||
848 | */ | ||
849 | tmp = rounddown((max_sectors * block_size), PAGE_SIZE); | ||
850 | aligned_max_sectors = (tmp / block_size); | ||
851 | if (max_sectors != aligned_max_sectors) { | ||
852 | printk(KERN_INFO "Rounding down aligned max_sectors from %u" | ||
853 | " to %u\n", max_sectors, aligned_max_sectors); | ||
854 | return aligned_max_sectors; | ||
855 | } | ||
856 | |||
857 | return max_sectors; | ||
858 | } | ||
859 | |||
842 | void se_dev_set_default_attribs( | 860 | void se_dev_set_default_attribs( |
843 | struct se_device *dev, | 861 | struct se_device *dev, |
844 | struct se_dev_limits *dev_limits) | 862 | struct se_dev_limits *dev_limits) |
@@ -878,6 +896,11 @@ void se_dev_set_default_attribs( | |||
878 | * max_sectors is based on subsystem plugin dependent requirements. | 896 | * max_sectors is based on subsystem plugin dependent requirements. |
879 | */ | 897 | */ |
880 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; | 898 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; |
899 | /* | ||
900 | * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() | ||
901 | */ | ||
902 | limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors, | ||
903 | limits->logical_block_size); | ||
881 | dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; | 904 | dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; |
882 | /* | 905 | /* |
883 | * Set optimal_sectors from max_sectors, which can be lowered via | 906 | * Set optimal_sectors from max_sectors, which can be lowered via |
@@ -1242,6 +1265,11 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) | |||
1242 | return -EINVAL; | 1265 | return -EINVAL; |
1243 | } | 1266 | } |
1244 | } | 1267 | } |
1268 | /* | ||
1269 | * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() | ||
1270 | */ | ||
1271 | max_sectors = se_dev_align_max_sectors(max_sectors, | ||
1272 | dev->se_sub_dev->se_dev_attrib.block_size); | ||
1245 | 1273 | ||
1246 | dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; | 1274 | dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; |
1247 | pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", | 1275 | pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", |
@@ -1344,15 +1372,17 @@ struct se_lun *core_dev_add_lun( | |||
1344 | */ | 1372 | */ |
1345 | if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { | 1373 | if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { |
1346 | struct se_node_acl *acl; | 1374 | struct se_node_acl *acl; |
1347 | spin_lock_bh(&tpg->acl_node_lock); | 1375 | spin_lock_irq(&tpg->acl_node_lock); |
1348 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { | 1376 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { |
1349 | if (acl->dynamic_node_acl) { | 1377 | if (acl->dynamic_node_acl && |
1350 | spin_unlock_bh(&tpg->acl_node_lock); | 1378 | (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || |
1379 | !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { | ||
1380 | spin_unlock_irq(&tpg->acl_node_lock); | ||
1351 | core_tpg_add_node_to_devs(acl, tpg); | 1381 | core_tpg_add_node_to_devs(acl, tpg); |
1352 | spin_lock_bh(&tpg->acl_node_lock); | 1382 | spin_lock_irq(&tpg->acl_node_lock); |
1353 | } | 1383 | } |
1354 | } | 1384 | } |
1355 | spin_unlock_bh(&tpg->acl_node_lock); | 1385 | spin_unlock_irq(&tpg->acl_node_lock); |
1356 | } | 1386 | } |
1357 | 1387 | ||
1358 | return lun_p; | 1388 | return lun_p; |
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index f1654694f4ea..55bbe0847a6d 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -481,7 +481,7 @@ static struct config_group *target_fabric_make_nodeacl( | |||
481 | 481 | ||
482 | se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); | 482 | se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); |
483 | if (IS_ERR(se_nacl)) | 483 | if (IS_ERR(se_nacl)) |
484 | return ERR_PTR(PTR_ERR(se_nacl)); | 484 | return ERR_CAST(se_nacl); |
485 | 485 | ||
486 | nacl_cg = &se_nacl->acl_group; | 486 | nacl_cg = &se_nacl->acl_group; |
487 | nacl_cg->default_groups = se_nacl->acl_default_groups; | 487 | nacl_cg->default_groups = se_nacl->acl_default_groups; |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 1c1b849cd4fb..7fd3a161f7cc 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -1598,14 +1598,14 @@ static int core_scsi3_decode_spec_i_port( | |||
1598 | * from the decoded fabric module specific TransportID | 1598 | * from the decoded fabric module specific TransportID |
1599 | * at *i_str. | 1599 | * at *i_str. |
1600 | */ | 1600 | */ |
1601 | spin_lock_bh(&tmp_tpg->acl_node_lock); | 1601 | spin_lock_irq(&tmp_tpg->acl_node_lock); |
1602 | dest_node_acl = __core_tpg_get_initiator_node_acl( | 1602 | dest_node_acl = __core_tpg_get_initiator_node_acl( |
1603 | tmp_tpg, i_str); | 1603 | tmp_tpg, i_str); |
1604 | if (dest_node_acl) { | 1604 | if (dest_node_acl) { |
1605 | atomic_inc(&dest_node_acl->acl_pr_ref_count); | 1605 | atomic_inc(&dest_node_acl->acl_pr_ref_count); |
1606 | smp_mb__after_atomic_inc(); | 1606 | smp_mb__after_atomic_inc(); |
1607 | } | 1607 | } |
1608 | spin_unlock_bh(&tmp_tpg->acl_node_lock); | 1608 | spin_unlock_irq(&tmp_tpg->acl_node_lock); |
1609 | 1609 | ||
1610 | if (!dest_node_acl) { | 1610 | if (!dest_node_acl) { |
1611 | core_scsi3_tpg_undepend_item(tmp_tpg); | 1611 | core_scsi3_tpg_undepend_item(tmp_tpg); |
@@ -3496,14 +3496,14 @@ after_iport_check: | |||
3496 | /* | 3496 | /* |
3497 | * Locate the destination struct se_node_acl from the received Transport ID | 3497 | * Locate the destination struct se_node_acl from the received Transport ID |
3498 | */ | 3498 | */ |
3499 | spin_lock_bh(&dest_se_tpg->acl_node_lock); | 3499 | spin_lock_irq(&dest_se_tpg->acl_node_lock); |
3500 | dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, | 3500 | dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, |
3501 | initiator_str); | 3501 | initiator_str); |
3502 | if (dest_node_acl) { | 3502 | if (dest_node_acl) { |
3503 | atomic_inc(&dest_node_acl->acl_pr_ref_count); | 3503 | atomic_inc(&dest_node_acl->acl_pr_ref_count); |
3504 | smp_mb__after_atomic_inc(); | 3504 | smp_mb__after_atomic_inc(); |
3505 | } | 3505 | } |
3506 | spin_unlock_bh(&dest_se_tpg->acl_node_lock); | 3506 | spin_unlock_irq(&dest_se_tpg->acl_node_lock); |
3507 | 3507 | ||
3508 | if (!dest_node_acl) { | 3508 | if (!dest_node_acl) { |
3509 | pr_err("Unable to locate %s dest_node_acl for" | 3509 | pr_err("Unable to locate %s dest_node_acl for" |
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 3dd81d24d9a9..e567e129c697 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
@@ -390,12 +390,10 @@ static int rd_MEMCPY_read(struct rd_request *req) | |||
390 | length = req->rd_size; | 390 | length = req->rd_size; |
391 | 391 | ||
392 | dst = sg_virt(&sg_d[i++]) + dst_offset; | 392 | dst = sg_virt(&sg_d[i++]) + dst_offset; |
393 | if (!dst) | 393 | BUG_ON(!dst); |
394 | BUG(); | ||
395 | 394 | ||
396 | src = sg_virt(&sg_s[j]) + src_offset; | 395 | src = sg_virt(&sg_s[j]) + src_offset; |
397 | if (!src) | 396 | BUG_ON(!src); |
398 | BUG(); | ||
399 | 397 | ||
400 | dst_offset = 0; | 398 | dst_offset = 0; |
401 | src_offset = length; | 399 | src_offset = length; |
@@ -415,8 +413,7 @@ static int rd_MEMCPY_read(struct rd_request *req) | |||
415 | length = req->rd_size; | 413 | length = req->rd_size; |
416 | 414 | ||
417 | dst = sg_virt(&sg_d[i]) + dst_offset; | 415 | dst = sg_virt(&sg_d[i]) + dst_offset; |
418 | if (!dst) | 416 | BUG_ON(!dst); |
419 | BUG(); | ||
420 | 417 | ||
421 | if (sg_d[i].length == length) { | 418 | if (sg_d[i].length == length) { |
422 | i++; | 419 | i++; |
@@ -425,8 +422,7 @@ static int rd_MEMCPY_read(struct rd_request *req) | |||
425 | dst_offset = length; | 422 | dst_offset = length; |
426 | 423 | ||
427 | src = sg_virt(&sg_s[j++]) + src_offset; | 424 | src = sg_virt(&sg_s[j++]) + src_offset; |
428 | if (!src) | 425 | BUG_ON(!src); |
429 | BUG(); | ||
430 | 426 | ||
431 | src_offset = 0; | 427 | src_offset = 0; |
432 | page_end = 1; | 428 | page_end = 1; |
@@ -510,12 +506,10 @@ static int rd_MEMCPY_write(struct rd_request *req) | |||
510 | length = req->rd_size; | 506 | length = req->rd_size; |
511 | 507 | ||
512 | src = sg_virt(&sg_s[i++]) + src_offset; | 508 | src = sg_virt(&sg_s[i++]) + src_offset; |
513 | if (!src) | 509 | BUG_ON(!src); |
514 | BUG(); | ||
515 | 510 | ||
516 | dst = sg_virt(&sg_d[j]) + dst_offset; | 511 | dst = sg_virt(&sg_d[j]) + dst_offset; |
517 | if (!dst) | 512 | BUG_ON(!dst); |
518 | BUG(); | ||
519 | 513 | ||
520 | src_offset = 0; | 514 | src_offset = 0; |
521 | dst_offset = length; | 515 | dst_offset = length; |
@@ -535,8 +529,7 @@ static int rd_MEMCPY_write(struct rd_request *req) | |||
535 | length = req->rd_size; | 529 | length = req->rd_size; |
536 | 530 | ||
537 | src = sg_virt(&sg_s[i]) + src_offset; | 531 | src = sg_virt(&sg_s[i]) + src_offset; |
538 | if (!src) | 532 | BUG_ON(!src); |
539 | BUG(); | ||
540 | 533 | ||
541 | if (sg_s[i].length == length) { | 534 | if (sg_s[i].length == length) { |
542 | i++; | 535 | i++; |
@@ -545,8 +538,7 @@ static int rd_MEMCPY_write(struct rd_request *req) | |||
545 | src_offset = length; | 538 | src_offset = length; |
546 | 539 | ||
547 | dst = sg_virt(&sg_d[j++]) + dst_offset; | 540 | dst = sg_virt(&sg_d[j++]) + dst_offset; |
548 | if (!dst) | 541 | BUG_ON(!dst); |
549 | BUG(); | ||
550 | 542 | ||
551 | dst_offset = 0; | 543 | dst_offset = 0; |
552 | page_end = 1; | 544 | page_end = 1; |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 4f1ba4c5ef11..162b736c7342 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
@@ -137,15 +137,15 @@ struct se_node_acl *core_tpg_get_initiator_node_acl( | |||
137 | { | 137 | { |
138 | struct se_node_acl *acl; | 138 | struct se_node_acl *acl; |
139 | 139 | ||
140 | spin_lock_bh(&tpg->acl_node_lock); | 140 | spin_lock_irq(&tpg->acl_node_lock); |
141 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { | 141 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { |
142 | if (!strcmp(acl->initiatorname, initiatorname) && | 142 | if (!strcmp(acl->initiatorname, initiatorname) && |
143 | !acl->dynamic_node_acl) { | 143 | !acl->dynamic_node_acl) { |
144 | spin_unlock_bh(&tpg->acl_node_lock); | 144 | spin_unlock_irq(&tpg->acl_node_lock); |
145 | return acl; | 145 | return acl; |
146 | } | 146 | } |
147 | } | 147 | } |
148 | spin_unlock_bh(&tpg->acl_node_lock); | 148 | spin_unlock_irq(&tpg->acl_node_lock); |
149 | 149 | ||
150 | return NULL; | 150 | return NULL; |
151 | } | 151 | } |
@@ -298,13 +298,21 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
298 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); | 298 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); |
299 | return NULL; | 299 | return NULL; |
300 | } | 300 | } |
301 | /* | ||
302 | * Here we only create demo-mode MappedLUNs from the active | ||
303 | * TPG LUNs if the fabric is not explictly asking for | ||
304 | * tpg_check_demo_mode_login_only() == 1. | ||
305 | */ | ||
306 | if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) && | ||
307 | (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1)) | ||
308 | do { ; } while (0); | ||
309 | else | ||
310 | core_tpg_add_node_to_devs(acl, tpg); | ||
301 | 311 | ||
302 | core_tpg_add_node_to_devs(acl, tpg); | 312 | spin_lock_irq(&tpg->acl_node_lock); |
303 | |||
304 | spin_lock_bh(&tpg->acl_node_lock); | ||
305 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); | 313 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); |
306 | tpg->num_node_acls++; | 314 | tpg->num_node_acls++; |
307 | spin_unlock_bh(&tpg->acl_node_lock); | 315 | spin_unlock_irq(&tpg->acl_node_lock); |
308 | 316 | ||
309 | pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" | 317 | pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" |
310 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), | 318 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
@@ -354,7 +362,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
354 | { | 362 | { |
355 | struct se_node_acl *acl = NULL; | 363 | struct se_node_acl *acl = NULL; |
356 | 364 | ||
357 | spin_lock_bh(&tpg->acl_node_lock); | 365 | spin_lock_irq(&tpg->acl_node_lock); |
358 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | 366 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); |
359 | if (acl) { | 367 | if (acl) { |
360 | if (acl->dynamic_node_acl) { | 368 | if (acl->dynamic_node_acl) { |
@@ -362,7 +370,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
362 | pr_debug("%s_TPG[%u] - Replacing dynamic ACL" | 370 | pr_debug("%s_TPG[%u] - Replacing dynamic ACL" |
363 | " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), | 371 | " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
364 | tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); | 372 | tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); |
365 | spin_unlock_bh(&tpg->acl_node_lock); | 373 | spin_unlock_irq(&tpg->acl_node_lock); |
366 | /* | 374 | /* |
367 | * Release the locally allocated struct se_node_acl | 375 | * Release the locally allocated struct se_node_acl |
368 | * because * core_tpg_add_initiator_node_acl() returned | 376 | * because * core_tpg_add_initiator_node_acl() returned |
@@ -378,10 +386,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
378 | " Node %s already exists for TPG %u, ignoring" | 386 | " Node %s already exists for TPG %u, ignoring" |
379 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), | 387 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), |
380 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); | 388 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
381 | spin_unlock_bh(&tpg->acl_node_lock); | 389 | spin_unlock_irq(&tpg->acl_node_lock); |
382 | return ERR_PTR(-EEXIST); | 390 | return ERR_PTR(-EEXIST); |
383 | } | 391 | } |
384 | spin_unlock_bh(&tpg->acl_node_lock); | 392 | spin_unlock_irq(&tpg->acl_node_lock); |
385 | 393 | ||
386 | if (!se_nacl) { | 394 | if (!se_nacl) { |
387 | pr_err("struct se_node_acl pointer is NULL\n"); | 395 | pr_err("struct se_node_acl pointer is NULL\n"); |
@@ -418,10 +426,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
418 | return ERR_PTR(-EINVAL); | 426 | return ERR_PTR(-EINVAL); |
419 | } | 427 | } |
420 | 428 | ||
421 | spin_lock_bh(&tpg->acl_node_lock); | 429 | spin_lock_irq(&tpg->acl_node_lock); |
422 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); | 430 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); |
423 | tpg->num_node_acls++; | 431 | tpg->num_node_acls++; |
424 | spin_unlock_bh(&tpg->acl_node_lock); | 432 | spin_unlock_irq(&tpg->acl_node_lock); |
425 | 433 | ||
426 | done: | 434 | done: |
427 | pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" | 435 | pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" |
@@ -445,14 +453,14 @@ int core_tpg_del_initiator_node_acl( | |||
445 | struct se_session *sess, *sess_tmp; | 453 | struct se_session *sess, *sess_tmp; |
446 | int dynamic_acl = 0; | 454 | int dynamic_acl = 0; |
447 | 455 | ||
448 | spin_lock_bh(&tpg->acl_node_lock); | 456 | spin_lock_irq(&tpg->acl_node_lock); |
449 | if (acl->dynamic_node_acl) { | 457 | if (acl->dynamic_node_acl) { |
450 | acl->dynamic_node_acl = 0; | 458 | acl->dynamic_node_acl = 0; |
451 | dynamic_acl = 1; | 459 | dynamic_acl = 1; |
452 | } | 460 | } |
453 | list_del(&acl->acl_list); | 461 | list_del(&acl->acl_list); |
454 | tpg->num_node_acls--; | 462 | tpg->num_node_acls--; |
455 | spin_unlock_bh(&tpg->acl_node_lock); | 463 | spin_unlock_irq(&tpg->acl_node_lock); |
456 | 464 | ||
457 | spin_lock_bh(&tpg->session_lock); | 465 | spin_lock_bh(&tpg->session_lock); |
458 | list_for_each_entry_safe(sess, sess_tmp, | 466 | list_for_each_entry_safe(sess, sess_tmp, |
@@ -503,21 +511,21 @@ int core_tpg_set_initiator_node_queue_depth( | |||
503 | struct se_node_acl *acl; | 511 | struct se_node_acl *acl; |
504 | int dynamic_acl = 0; | 512 | int dynamic_acl = 0; |
505 | 513 | ||
506 | spin_lock_bh(&tpg->acl_node_lock); | 514 | spin_lock_irq(&tpg->acl_node_lock); |
507 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | 515 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); |
508 | if (!acl) { | 516 | if (!acl) { |
509 | pr_err("Access Control List entry for %s Initiator" | 517 | pr_err("Access Control List entry for %s Initiator" |
510 | " Node %s does not exists for TPG %hu, ignoring" | 518 | " Node %s does not exists for TPG %hu, ignoring" |
511 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), | 519 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), |
512 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); | 520 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
513 | spin_unlock_bh(&tpg->acl_node_lock); | 521 | spin_unlock_irq(&tpg->acl_node_lock); |
514 | return -ENODEV; | 522 | return -ENODEV; |
515 | } | 523 | } |
516 | if (acl->dynamic_node_acl) { | 524 | if (acl->dynamic_node_acl) { |
517 | acl->dynamic_node_acl = 0; | 525 | acl->dynamic_node_acl = 0; |
518 | dynamic_acl = 1; | 526 | dynamic_acl = 1; |
519 | } | 527 | } |
520 | spin_unlock_bh(&tpg->acl_node_lock); | 528 | spin_unlock_irq(&tpg->acl_node_lock); |
521 | 529 | ||
522 | spin_lock_bh(&tpg->session_lock); | 530 | spin_lock_bh(&tpg->session_lock); |
523 | list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { | 531 | list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { |
@@ -533,10 +541,10 @@ int core_tpg_set_initiator_node_queue_depth( | |||
533 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); | 541 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); |
534 | spin_unlock_bh(&tpg->session_lock); | 542 | spin_unlock_bh(&tpg->session_lock); |
535 | 543 | ||
536 | spin_lock_bh(&tpg->acl_node_lock); | 544 | spin_lock_irq(&tpg->acl_node_lock); |
537 | if (dynamic_acl) | 545 | if (dynamic_acl) |
538 | acl->dynamic_node_acl = 1; | 546 | acl->dynamic_node_acl = 1; |
539 | spin_unlock_bh(&tpg->acl_node_lock); | 547 | spin_unlock_irq(&tpg->acl_node_lock); |
540 | return -EEXIST; | 548 | return -EEXIST; |
541 | } | 549 | } |
542 | /* | 550 | /* |
@@ -571,10 +579,10 @@ int core_tpg_set_initiator_node_queue_depth( | |||
571 | if (init_sess) | 579 | if (init_sess) |
572 | tpg->se_tpg_tfo->close_session(init_sess); | 580 | tpg->se_tpg_tfo->close_session(init_sess); |
573 | 581 | ||
574 | spin_lock_bh(&tpg->acl_node_lock); | 582 | spin_lock_irq(&tpg->acl_node_lock); |
575 | if (dynamic_acl) | 583 | if (dynamic_acl) |
576 | acl->dynamic_node_acl = 1; | 584 | acl->dynamic_node_acl = 1; |
577 | spin_unlock_bh(&tpg->acl_node_lock); | 585 | spin_unlock_irq(&tpg->acl_node_lock); |
578 | return -EINVAL; | 586 | return -EINVAL; |
579 | } | 587 | } |
580 | spin_unlock_bh(&tpg->session_lock); | 588 | spin_unlock_bh(&tpg->session_lock); |
@@ -590,10 +598,10 @@ int core_tpg_set_initiator_node_queue_depth( | |||
590 | initiatorname, tpg->se_tpg_tfo->get_fabric_name(), | 598 | initiatorname, tpg->se_tpg_tfo->get_fabric_name(), |
591 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | 599 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
592 | 600 | ||
593 | spin_lock_bh(&tpg->acl_node_lock); | 601 | spin_lock_irq(&tpg->acl_node_lock); |
594 | if (dynamic_acl) | 602 | if (dynamic_acl) |
595 | acl->dynamic_node_acl = 1; | 603 | acl->dynamic_node_acl = 1; |
596 | spin_unlock_bh(&tpg->acl_node_lock); | 604 | spin_unlock_irq(&tpg->acl_node_lock); |
597 | 605 | ||
598 | return 0; | 606 | return 0; |
599 | } | 607 | } |
@@ -717,20 +725,20 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) | |||
717 | * not been released because of TFO->tpg_check_demo_mode_cache() == 1 | 725 | * not been released because of TFO->tpg_check_demo_mode_cache() == 1 |
718 | * in transport_deregister_session(). | 726 | * in transport_deregister_session(). |
719 | */ | 727 | */ |
720 | spin_lock_bh(&se_tpg->acl_node_lock); | 728 | spin_lock_irq(&se_tpg->acl_node_lock); |
721 | list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, | 729 | list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, |
722 | acl_list) { | 730 | acl_list) { |
723 | list_del(&nacl->acl_list); | 731 | list_del(&nacl->acl_list); |
724 | se_tpg->num_node_acls--; | 732 | se_tpg->num_node_acls--; |
725 | spin_unlock_bh(&se_tpg->acl_node_lock); | 733 | spin_unlock_irq(&se_tpg->acl_node_lock); |
726 | 734 | ||
727 | core_tpg_wait_for_nacl_pr_ref(nacl); | 735 | core_tpg_wait_for_nacl_pr_ref(nacl); |
728 | core_free_device_list_for_node(nacl, se_tpg); | 736 | core_free_device_list_for_node(nacl, se_tpg); |
729 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); | 737 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); |
730 | 738 | ||
731 | spin_lock_bh(&se_tpg->acl_node_lock); | 739 | spin_lock_irq(&se_tpg->acl_node_lock); |
732 | } | 740 | } |
733 | spin_unlock_bh(&se_tpg->acl_node_lock); | 741 | spin_unlock_irq(&se_tpg->acl_node_lock); |
734 | 742 | ||
735 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) | 743 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) |
736 | core_tpg_release_virtual_lun0(se_tpg); | 744 | core_tpg_release_virtual_lun0(se_tpg); |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 89760329d5d0..8d0c58ea6316 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -389,17 +389,18 @@ void transport_deregister_session(struct se_session *se_sess) | |||
389 | { | 389 | { |
390 | struct se_portal_group *se_tpg = se_sess->se_tpg; | 390 | struct se_portal_group *se_tpg = se_sess->se_tpg; |
391 | struct se_node_acl *se_nacl; | 391 | struct se_node_acl *se_nacl; |
392 | unsigned long flags; | ||
392 | 393 | ||
393 | if (!se_tpg) { | 394 | if (!se_tpg) { |
394 | transport_free_session(se_sess); | 395 | transport_free_session(se_sess); |
395 | return; | 396 | return; |
396 | } | 397 | } |
397 | 398 | ||
398 | spin_lock_bh(&se_tpg->session_lock); | 399 | spin_lock_irqsave(&se_tpg->session_lock, flags); |
399 | list_del(&se_sess->sess_list); | 400 | list_del(&se_sess->sess_list); |
400 | se_sess->se_tpg = NULL; | 401 | se_sess->se_tpg = NULL; |
401 | se_sess->fabric_sess_ptr = NULL; | 402 | se_sess->fabric_sess_ptr = NULL; |
402 | spin_unlock_bh(&se_tpg->session_lock); | 403 | spin_unlock_irqrestore(&se_tpg->session_lock, flags); |
403 | 404 | ||
404 | /* | 405 | /* |
405 | * Determine if we need to do extra work for this initiator node's | 406 | * Determine if we need to do extra work for this initiator node's |
@@ -407,22 +408,22 @@ void transport_deregister_session(struct se_session *se_sess) | |||
407 | */ | 408 | */ |
408 | se_nacl = se_sess->se_node_acl; | 409 | se_nacl = se_sess->se_node_acl; |
409 | if (se_nacl) { | 410 | if (se_nacl) { |
410 | spin_lock_bh(&se_tpg->acl_node_lock); | 411 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); |
411 | if (se_nacl->dynamic_node_acl) { | 412 | if (se_nacl->dynamic_node_acl) { |
412 | if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( | 413 | if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( |
413 | se_tpg)) { | 414 | se_tpg)) { |
414 | list_del(&se_nacl->acl_list); | 415 | list_del(&se_nacl->acl_list); |
415 | se_tpg->num_node_acls--; | 416 | se_tpg->num_node_acls--; |
416 | spin_unlock_bh(&se_tpg->acl_node_lock); | 417 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); |
417 | 418 | ||
418 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | 419 | core_tpg_wait_for_nacl_pr_ref(se_nacl); |
419 | core_free_device_list_for_node(se_nacl, se_tpg); | 420 | core_free_device_list_for_node(se_nacl, se_tpg); |
420 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, | 421 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, |
421 | se_nacl); | 422 | se_nacl); |
422 | spin_lock_bh(&se_tpg->acl_node_lock); | 423 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); |
423 | } | 424 | } |
424 | } | 425 | } |
425 | spin_unlock_bh(&se_tpg->acl_node_lock); | 426 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); |
426 | } | 427 | } |
427 | 428 | ||
428 | transport_free_session(se_sess); | 429 | transport_free_session(se_sess); |
@@ -2053,8 +2054,14 @@ static void transport_generic_request_failure( | |||
2053 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | 2054 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; |
2054 | break; | 2055 | break; |
2055 | } | 2056 | } |
2056 | 2057 | /* | |
2057 | if (!sc) | 2058 | * If a fabric does not define a cmd->se_tfo->new_cmd_map caller, |
2059 | * make the call to transport_send_check_condition_and_sense() | ||
2060 | * directly. Otherwise expect the fabric to make the call to | ||
2061 | * transport_send_check_condition_and_sense() after handling | ||
2062 | * possible unsoliticied write data payloads. | ||
2063 | */ | ||
2064 | if (!sc && !cmd->se_tfo->new_cmd_map) | ||
2058 | transport_new_cmd_failure(cmd); | 2065 | transport_new_cmd_failure(cmd); |
2059 | else { | 2066 | else { |
2060 | ret = transport_send_check_condition_and_sense(cmd, | 2067 | ret = transport_send_check_condition_and_sense(cmd, |
@@ -2847,12 +2854,42 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) | |||
2847 | " transport_dev_end_lba(): %llu\n", | 2854 | " transport_dev_end_lba(): %llu\n", |
2848 | cmd->t_task_lba, sectors, | 2855 | cmd->t_task_lba, sectors, |
2849 | transport_dev_end_lba(dev)); | 2856 | transport_dev_end_lba(dev)); |
2850 | pr_err(" We should return CHECK_CONDITION" | 2857 | return -EINVAL; |
2851 | " but we don't yet\n"); | ||
2852 | return 0; | ||
2853 | } | 2858 | } |
2854 | 2859 | ||
2855 | return sectors; | 2860 | return 0; |
2861 | } | ||
2862 | |||
2863 | static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) | ||
2864 | { | ||
2865 | /* | ||
2866 | * Determine if the received WRITE_SAME is used to for direct | ||
2867 | * passthrough into Linux/SCSI with struct request via TCM/pSCSI | ||
2868 | * or we are signaling the use of internal WRITE_SAME + UNMAP=1 | ||
2869 | * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code. | ||
2870 | */ | ||
2871 | int passthrough = (dev->transport->transport_type == | ||
2872 | TRANSPORT_PLUGIN_PHBA_PDEV); | ||
2873 | |||
2874 | if (!passthrough) { | ||
2875 | if ((flags[0] & 0x04) || (flags[0] & 0x02)) { | ||
2876 | pr_err("WRITE_SAME PBDATA and LBDATA" | ||
2877 | " bits not supported for Block Discard" | ||
2878 | " Emulation\n"); | ||
2879 | return -ENOSYS; | ||
2880 | } | ||
2881 | /* | ||
2882 | * Currently for the emulated case we only accept | ||
2883 | * tpws with the UNMAP=1 bit set. | ||
2884 | */ | ||
2885 | if (!(flags[0] & 0x08)) { | ||
2886 | pr_err("WRITE_SAME w/o UNMAP bit not" | ||
2887 | " supported for Block Discard Emulation\n"); | ||
2888 | return -ENOSYS; | ||
2889 | } | ||
2890 | } | ||
2891 | |||
2892 | return 0; | ||
2856 | } | 2893 | } |
2857 | 2894 | ||
2858 | /* transport_generic_cmd_sequencer(): | 2895 | /* transport_generic_cmd_sequencer(): |
@@ -3065,7 +3102,7 @@ static int transport_generic_cmd_sequencer( | |||
3065 | goto out_unsupported_cdb; | 3102 | goto out_unsupported_cdb; |
3066 | 3103 | ||
3067 | if (sectors) | 3104 | if (sectors) |
3068 | size = transport_get_size(sectors, cdb, cmd); | 3105 | size = transport_get_size(1, cdb, cmd); |
3069 | else { | 3106 | else { |
3070 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" | 3107 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" |
3071 | " supported\n"); | 3108 | " supported\n"); |
@@ -3075,27 +3112,9 @@ static int transport_generic_cmd_sequencer( | |||
3075 | cmd->t_task_lba = get_unaligned_be64(&cdb[12]); | 3112 | cmd->t_task_lba = get_unaligned_be64(&cdb[12]); |
3076 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | 3113 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3077 | 3114 | ||
3078 | /* | 3115 | if (target_check_write_same_discard(&cdb[10], dev) < 0) |
3079 | * Skip the remaining assignments for TCM/PSCSI passthrough | ||
3080 | */ | ||
3081 | if (passthrough) | ||
3082 | break; | ||
3083 | |||
3084 | if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { | ||
3085 | pr_err("WRITE_SAME PBDATA and LBDATA" | ||
3086 | " bits not supported for Block Discard" | ||
3087 | " Emulation\n"); | ||
3088 | goto out_invalid_cdb_field; | 3116 | goto out_invalid_cdb_field; |
3089 | } | 3117 | |
3090 | /* | ||
3091 | * Currently for the emulated case we only accept | ||
3092 | * tpws with the UNMAP=1 bit set. | ||
3093 | */ | ||
3094 | if (!(cdb[10] & 0x08)) { | ||
3095 | pr_err("WRITE_SAME w/o UNMAP bit not" | ||
3096 | " supported for Block Discard Emulation\n"); | ||
3097 | goto out_invalid_cdb_field; | ||
3098 | } | ||
3099 | break; | 3118 | break; |
3100 | default: | 3119 | default: |
3101 | pr_err("VARIABLE_LENGTH_CMD service action" | 3120 | pr_err("VARIABLE_LENGTH_CMD service action" |
@@ -3330,10 +3349,12 @@ static int transport_generic_cmd_sequencer( | |||
3330 | cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; | 3349 | cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; |
3331 | /* | 3350 | /* |
3332 | * Check to ensure that LBA + Range does not exceed past end of | 3351 | * Check to ensure that LBA + Range does not exceed past end of |
3333 | * device. | 3352 | * device for IBLOCK and FILEIO ->do_sync_cache() backend calls |
3334 | */ | 3353 | */ |
3335 | if (!transport_cmd_get_valid_sectors(cmd)) | 3354 | if ((cmd->t_task_lba != 0) || (sectors != 0)) { |
3336 | goto out_invalid_cdb_field; | 3355 | if (transport_cmd_get_valid_sectors(cmd) < 0) |
3356 | goto out_invalid_cdb_field; | ||
3357 | } | ||
3337 | break; | 3358 | break; |
3338 | case UNMAP: | 3359 | case UNMAP: |
3339 | size = get_unaligned_be16(&cdb[7]); | 3360 | size = get_unaligned_be16(&cdb[7]); |
@@ -3345,40 +3366,38 @@ static int transport_generic_cmd_sequencer( | |||
3345 | goto out_unsupported_cdb; | 3366 | goto out_unsupported_cdb; |
3346 | 3367 | ||
3347 | if (sectors) | 3368 | if (sectors) |
3348 | size = transport_get_size(sectors, cdb, cmd); | 3369 | size = transport_get_size(1, cdb, cmd); |
3349 | else { | 3370 | else { |
3350 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); | 3371 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); |
3351 | goto out_invalid_cdb_field; | 3372 | goto out_invalid_cdb_field; |
3352 | } | 3373 | } |
3353 | 3374 | ||
3354 | cmd->t_task_lba = get_unaligned_be64(&cdb[2]); | 3375 | cmd->t_task_lba = get_unaligned_be64(&cdb[2]); |
3355 | passthrough = (dev->transport->transport_type == | 3376 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3356 | TRANSPORT_PLUGIN_PHBA_PDEV); | 3377 | |
3357 | /* | 3378 | if (target_check_write_same_discard(&cdb[1], dev) < 0) |
3358 | * Determine if the received WRITE_SAME_16 is used to for direct | 3379 | goto out_invalid_cdb_field; |
3359 | * passthrough into Linux/SCSI with struct request via TCM/pSCSI | 3380 | break; |
3360 | * or we are signaling the use of internal WRITE_SAME + UNMAP=1 | 3381 | case WRITE_SAME: |
3361 | * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and | 3382 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); |
3362 | * TCM/FILEIO subsystem plugin backstores. | 3383 | if (sector_ret) |
3363 | */ | 3384 | goto out_unsupported_cdb; |
3364 | if (!passthrough) { | 3385 | |
3365 | if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { | 3386 | if (sectors) |
3366 | pr_err("WRITE_SAME PBDATA and LBDATA" | 3387 | size = transport_get_size(1, cdb, cmd); |
3367 | " bits not supported for Block Discard" | 3388 | else { |
3368 | " Emulation\n"); | 3389 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); |
3369 | goto out_invalid_cdb_field; | 3390 | goto out_invalid_cdb_field; |
3370 | } | ||
3371 | /* | ||
3372 | * Currently for the emulated case we only accept | ||
3373 | * tpws with the UNMAP=1 bit set. | ||
3374 | */ | ||
3375 | if (!(cdb[1] & 0x08)) { | ||
3376 | pr_err("WRITE_SAME w/o UNMAP bit not " | ||
3377 | " supported for Block Discard Emulation\n"); | ||
3378 | goto out_invalid_cdb_field; | ||
3379 | } | ||
3380 | } | 3391 | } |
3392 | |||
3393 | cmd->t_task_lba = get_unaligned_be32(&cdb[2]); | ||
3381 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | 3394 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3395 | /* | ||
3396 | * Follow sbcr26 with WRITE_SAME (10) and check for the existence | ||
3397 | * of byte 1 bit 3 UNMAP instead of original reserved field | ||
3398 | */ | ||
3399 | if (target_check_write_same_discard(&cdb[1], dev) < 0) | ||
3400 | goto out_invalid_cdb_field; | ||
3382 | break; | 3401 | break; |
3383 | case ALLOW_MEDIUM_REMOVAL: | 3402 | case ALLOW_MEDIUM_REMOVAL: |
3384 | case GPCMD_CLOSE_TRACK: | 3403 | case GPCMD_CLOSE_TRACK: |
@@ -3873,9 +3892,7 @@ EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); | |||
3873 | static int transport_new_cmd_obj(struct se_cmd *cmd) | 3892 | static int transport_new_cmd_obj(struct se_cmd *cmd) |
3874 | { | 3893 | { |
3875 | struct se_device *dev = cmd->se_dev; | 3894 | struct se_device *dev = cmd->se_dev; |
3876 | u32 task_cdbs; | 3895 | int set_counts = 1, rc, task_cdbs; |
3877 | u32 rc; | ||
3878 | int set_counts = 1; | ||
3879 | 3896 | ||
3880 | /* | 3897 | /* |
3881 | * Setup any BIDI READ tasks and memory from | 3898 | * Setup any BIDI READ tasks and memory from |
@@ -3893,7 +3910,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) | |||
3893 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 3910 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3894 | cmd->scsi_sense_reason = | 3911 | cmd->scsi_sense_reason = |
3895 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 3912 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
3896 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 3913 | return -EINVAL; |
3897 | } | 3914 | } |
3898 | atomic_inc(&cmd->t_fe_count); | 3915 | atomic_inc(&cmd->t_fe_count); |
3899 | atomic_inc(&cmd->t_se_count); | 3916 | atomic_inc(&cmd->t_se_count); |
@@ -3912,7 +3929,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) | |||
3912 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 3929 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3913 | cmd->scsi_sense_reason = | 3930 | cmd->scsi_sense_reason = |
3914 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 3931 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
3915 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 3932 | return -EINVAL; |
3916 | } | 3933 | } |
3917 | 3934 | ||
3918 | if (set_counts) { | 3935 | if (set_counts) { |
@@ -4028,8 +4045,6 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) | |||
4028 | if (!task->task_sg) | 4045 | if (!task->task_sg) |
4029 | continue; | 4046 | continue; |
4030 | 4047 | ||
4031 | BUG_ON(!task->task_padded_sg); | ||
4032 | |||
4033 | if (!sg_first) { | 4048 | if (!sg_first) { |
4034 | sg_first = task->task_sg; | 4049 | sg_first = task->task_sg; |
4035 | chained_nents = task->task_sg_nents; | 4050 | chained_nents = task->task_sg_nents; |
@@ -4037,9 +4052,19 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) | |||
4037 | sg_chain(sg_prev, sg_prev_nents, task->task_sg); | 4052 | sg_chain(sg_prev, sg_prev_nents, task->task_sg); |
4038 | chained_nents += task->task_sg_nents; | 4053 | chained_nents += task->task_sg_nents; |
4039 | } | 4054 | } |
4055 | /* | ||
4056 | * For the padded tasks, use the extra SGL vector allocated | ||
4057 | * in transport_allocate_data_tasks() for the sg_prev_nents | ||
4058 | * offset into sg_chain() above.. The last task of a | ||
4059 | * multi-task list, or a single task will not have | ||
4060 | * task->task_sg_padded set.. | ||
4061 | */ | ||
4062 | if (task->task_padded_sg) | ||
4063 | sg_prev_nents = (task->task_sg_nents + 1); | ||
4064 | else | ||
4065 | sg_prev_nents = task->task_sg_nents; | ||
4040 | 4066 | ||
4041 | sg_prev = task->task_sg; | 4067 | sg_prev = task->task_sg; |
4042 | sg_prev_nents = task->task_sg_nents; | ||
4043 | } | 4068 | } |
4044 | /* | 4069 | /* |
4045 | * Setup the starting pointer and total t_tasks_sg_linked_no including | 4070 | * Setup the starting pointer and total t_tasks_sg_linked_no including |
@@ -4091,7 +4116,7 @@ static int transport_allocate_data_tasks( | |||
4091 | 4116 | ||
4092 | cmd_sg = sgl; | 4117 | cmd_sg = sgl; |
4093 | for (i = 0; i < task_count; i++) { | 4118 | for (i = 0; i < task_count; i++) { |
4094 | unsigned int task_size; | 4119 | unsigned int task_size, task_sg_nents_padded; |
4095 | int count; | 4120 | int count; |
4096 | 4121 | ||
4097 | task = transport_generic_get_task(cmd, data_direction); | 4122 | task = transport_generic_get_task(cmd, data_direction); |
@@ -4110,30 +4135,33 @@ static int transport_allocate_data_tasks( | |||
4110 | 4135 | ||
4111 | /* Update new cdb with updated lba/sectors */ | 4136 | /* Update new cdb with updated lba/sectors */ |
4112 | cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); | 4137 | cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); |
4113 | 4138 | /* | |
4139 | * This now assumes that passed sg_ents are in PAGE_SIZE chunks | ||
4140 | * in order to calculate the number per task SGL entries | ||
4141 | */ | ||
4142 | task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE); | ||
4114 | /* | 4143 | /* |
4115 | * Check if the fabric module driver is requesting that all | 4144 | * Check if the fabric module driver is requesting that all |
4116 | * struct se_task->task_sg[] be chained together.. If so, | 4145 | * struct se_task->task_sg[] be chained together.. If so, |
4117 | * then allocate an extra padding SG entry for linking and | 4146 | * then allocate an extra padding SG entry for linking and |
4118 | * marking the end of the chained SGL. | 4147 | * marking the end of the chained SGL for every task except |
4119 | * Possibly over-allocate task sgl size by using cmd sgl size. | 4148 | * the last one for (task_count > 1) operation, or skipping |
4120 | * It's so much easier and only a waste when task_count > 1. | 4149 | * the extra padding for the (task_count == 1) case. |
4121 | * That is extremely rare. | ||
4122 | */ | 4150 | */ |
4123 | task->task_sg_nents = sgl_nents; | 4151 | if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { |
4124 | if (cmd->se_tfo->task_sg_chaining) { | 4152 | task_sg_nents_padded = (task->task_sg_nents + 1); |
4125 | task->task_sg_nents++; | ||
4126 | task->task_padded_sg = 1; | 4153 | task->task_padded_sg = 1; |
4127 | } | 4154 | } else |
4155 | task_sg_nents_padded = task->task_sg_nents; | ||
4128 | 4156 | ||
4129 | task->task_sg = kmalloc(sizeof(struct scatterlist) * | 4157 | task->task_sg = kmalloc(sizeof(struct scatterlist) * |
4130 | task->task_sg_nents, GFP_KERNEL); | 4158 | task_sg_nents_padded, GFP_KERNEL); |
4131 | if (!task->task_sg) { | 4159 | if (!task->task_sg) { |
4132 | cmd->se_dev->transport->free_task(task); | 4160 | cmd->se_dev->transport->free_task(task); |
4133 | return -ENOMEM; | 4161 | return -ENOMEM; |
4134 | } | 4162 | } |
4135 | 4163 | ||
4136 | sg_init_table(task->task_sg, task->task_sg_nents); | 4164 | sg_init_table(task->task_sg, task_sg_nents_padded); |
4137 | 4165 | ||
4138 | task_size = task->task_size; | 4166 | task_size = task->task_size; |
4139 | 4167 | ||
@@ -4230,10 +4258,13 @@ static u32 transport_allocate_tasks( | |||
4230 | struct scatterlist *sgl, | 4258 | struct scatterlist *sgl, |
4231 | unsigned int sgl_nents) | 4259 | unsigned int sgl_nents) |
4232 | { | 4260 | { |
4233 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) | 4261 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { |
4262 | if (transport_cmd_get_valid_sectors(cmd) < 0) | ||
4263 | return -EINVAL; | ||
4264 | |||
4234 | return transport_allocate_data_tasks(cmd, lba, data_direction, | 4265 | return transport_allocate_data_tasks(cmd, lba, data_direction, |
4235 | sgl, sgl_nents); | 4266 | sgl, sgl_nents); |
4236 | else | 4267 | } else |
4237 | return transport_allocate_control_task(cmd); | 4268 | return transport_allocate_control_task(cmd); |
4238 | 4269 | ||
4239 | } | 4270 | } |
@@ -4726,6 +4757,13 @@ int transport_send_check_condition_and_sense( | |||
4726 | */ | 4757 | */ |
4727 | switch (reason) { | 4758 | switch (reason) { |
4728 | case TCM_NON_EXISTENT_LUN: | 4759 | case TCM_NON_EXISTENT_LUN: |
4760 | /* CURRENT ERROR */ | ||
4761 | buffer[offset] = 0x70; | ||
4762 | /* ILLEGAL REQUEST */ | ||
4763 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | ||
4764 | /* LOGICAL UNIT NOT SUPPORTED */ | ||
4765 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25; | ||
4766 | break; | ||
4729 | case TCM_UNSUPPORTED_SCSI_OPCODE: | 4767 | case TCM_UNSUPPORTED_SCSI_OPCODE: |
4730 | case TCM_SECTOR_COUNT_TOO_MANY: | 4768 | case TCM_SECTOR_COUNT_TOO_MANY: |
4731 | /* CURRENT ERROR */ | 4769 | /* CURRENT ERROR */ |
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 8781d1e423df..b15879d43e22 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c | |||
@@ -256,7 +256,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) | |||
256 | struct se_portal_group *se_tpg = &tpg->se_tpg; | 256 | struct se_portal_group *se_tpg = &tpg->se_tpg; |
257 | struct se_node_acl *se_acl; | 257 | struct se_node_acl *se_acl; |
258 | 258 | ||
259 | spin_lock_bh(&se_tpg->acl_node_lock); | 259 | spin_lock_irq(&se_tpg->acl_node_lock); |
260 | list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { | 260 | list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { |
261 | acl = container_of(se_acl, struct ft_node_acl, se_node_acl); | 261 | acl = container_of(se_acl, struct ft_node_acl, se_node_acl); |
262 | pr_debug("acl %p port_name %llx\n", | 262 | pr_debug("acl %p port_name %llx\n", |
@@ -270,7 +270,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) | |||
270 | break; | 270 | break; |
271 | } | 271 | } |
272 | } | 272 | } |
273 | spin_unlock_bh(&se_tpg->acl_node_lock); | 273 | spin_unlock_irq(&se_tpg->acl_node_lock); |
274 | return found; | 274 | return found; |
275 | } | 275 | } |
276 | 276 | ||
@@ -655,9 +655,7 @@ static void __exit ft_exit(void) | |||
655 | synchronize_rcu(); | 655 | synchronize_rcu(); |
656 | } | 656 | } |
657 | 657 | ||
658 | #ifdef MODULE | ||
659 | MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION); | 658 | MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION); |
660 | MODULE_LICENSE("GPL"); | 659 | MODULE_LICENSE("GPL"); |
661 | module_init(ft_init); | 660 | module_init(ft_init); |
662 | module_exit(ft_exit); | 661 | module_exit(ft_exit); |
663 | #endif /* MODULE */ | ||
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 385acb895ab3..3f94ac34dce3 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c | |||
@@ -268,7 +268,7 @@ usbtmc_abort_bulk_in_status: | |||
268 | dev_err(dev, "usb_bulk_msg returned %d\n", rv); | 268 | dev_err(dev, "usb_bulk_msg returned %d\n", rv); |
269 | goto exit; | 269 | goto exit; |
270 | } | 270 | } |
271 | } while ((actual = max_size) && | 271 | } while ((actual == max_size) && |
272 | (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN)); | 272 | (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN)); |
273 | 273 | ||
274 | if (actual == max_size) { | 274 | if (actual == max_size) { |
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index c962608b4b9a..26678cadfb21 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c | |||
@@ -123,10 +123,11 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, | |||
123 | } | 123 | } |
124 | 124 | ||
125 | if (usb_endpoint_xfer_isoc(&ep->desc)) | 125 | if (usb_endpoint_xfer_isoc(&ep->desc)) |
126 | max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1) * | 126 | max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) * |
127 | (desc->bmAttributes + 1); | 127 | le16_to_cpu(ep->desc.wMaxPacketSize); |
128 | else if (usb_endpoint_xfer_int(&ep->desc)) | 128 | else if (usb_endpoint_xfer_int(&ep->desc)) |
129 | max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1); | 129 | max_tx = le16_to_cpu(ep->desc.wMaxPacketSize) * |
130 | (desc->bMaxBurst + 1); | ||
130 | else | 131 | else |
131 | max_tx = 999999; | 132 | max_tx = 999999; |
132 | if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) { | 133 | if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) { |
@@ -134,10 +135,10 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, | |||
134 | "config %d interface %d altsetting %d ep %d: " | 135 | "config %d interface %d altsetting %d ep %d: " |
135 | "setting to %d\n", | 136 | "setting to %d\n", |
136 | usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int", | 137 | usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int", |
137 | desc->wBytesPerInterval, | 138 | le16_to_cpu(desc->wBytesPerInterval), |
138 | cfgno, inum, asnum, ep->desc.bEndpointAddress, | 139 | cfgno, inum, asnum, ep->desc.bEndpointAddress, |
139 | max_tx); | 140 | max_tx); |
140 | ep->ss_ep_comp.wBytesPerInterval = max_tx; | 141 | ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx); |
141 | } | 142 | } |
142 | } | 143 | } |
143 | 144 | ||
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 44b6b40aafb4..5a084b9cfa3c 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig | |||
@@ -310,7 +310,7 @@ config USB_PXA_U2O | |||
310 | # musb builds in ../musb along with host support | 310 | # musb builds in ../musb along with host support |
311 | config USB_GADGET_MUSB_HDRC | 311 | config USB_GADGET_MUSB_HDRC |
312 | tristate "Inventra HDRC USB Peripheral (TI, ADI, ...)" | 312 | tristate "Inventra HDRC USB Peripheral (TI, ADI, ...)" |
313 | depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG) | 313 | depends on USB_MUSB_HDRC |
314 | select USB_GADGET_DUALSPEED | 314 | select USB_GADGET_DUALSPEED |
315 | help | 315 | help |
316 | This OTG-capable silicon IP is used in dual designs including | 316 | This OTG-capable silicon IP is used in dual designs including |
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c index 98cbc06c30fd..ddb118a76807 100644 --- a/drivers/usb/gadget/at91_udc.c +++ b/drivers/usb/gadget/at91_udc.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/list.h> | 35 | #include <linux/list.h> |
36 | #include <linux/interrupt.h> | 36 | #include <linux/interrupt.h> |
37 | #include <linux/proc_fs.h> | 37 | #include <linux/proc_fs.h> |
38 | #include <linux/prefetch.h> | ||
38 | #include <linux/clk.h> | 39 | #include <linux/clk.h> |
39 | #include <linux/usb/ch9.h> | 40 | #include <linux/usb/ch9.h> |
40 | #include <linux/usb/gadget.h> | 41 | #include <linux/usb/gadget.h> |
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 5ef87794fd32..aef47414f5d5 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c | |||
@@ -1079,10 +1079,12 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) | |||
1079 | cdev->desc.bMaxPacketSize0 = | 1079 | cdev->desc.bMaxPacketSize0 = |
1080 | cdev->gadget->ep0->maxpacket; | 1080 | cdev->gadget->ep0->maxpacket; |
1081 | if (gadget_is_superspeed(gadget)) { | 1081 | if (gadget_is_superspeed(gadget)) { |
1082 | if (gadget->speed >= USB_SPEED_SUPER) | 1082 | if (gadget->speed >= USB_SPEED_SUPER) { |
1083 | cdev->desc.bcdUSB = cpu_to_le16(0x0300); | 1083 | cdev->desc.bcdUSB = cpu_to_le16(0x0300); |
1084 | else | 1084 | cdev->desc.bMaxPacketSize0 = 9; |
1085 | } else { | ||
1085 | cdev->desc.bcdUSB = cpu_to_le16(0x0210); | 1086 | cdev->desc.bcdUSB = cpu_to_le16(0x0210); |
1087 | } | ||
1086 | } | 1088 | } |
1087 | 1089 | ||
1088 | value = min(w_length, (u16) sizeof cdev->desc); | 1090 | value = min(w_length, (u16) sizeof cdev->desc); |
diff --git a/drivers/usb/gadget/f_hid.c b/drivers/usb/gadget/f_hid.c index 403a48bcf560..83a266bdb40e 100644 --- a/drivers/usb/gadget/f_hid.c +++ b/drivers/usb/gadget/f_hid.c | |||
@@ -367,6 +367,13 @@ static int hidg_setup(struct usb_function *f, | |||
367 | case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8 | 367 | case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8 |
368 | | USB_REQ_GET_DESCRIPTOR): | 368 | | USB_REQ_GET_DESCRIPTOR): |
369 | switch (value >> 8) { | 369 | switch (value >> 8) { |
370 | case HID_DT_HID: | ||
371 | VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n"); | ||
372 | length = min_t(unsigned short, length, | ||
373 | hidg_desc.bLength); | ||
374 | memcpy(req->buf, &hidg_desc, length); | ||
375 | goto respond; | ||
376 | break; | ||
370 | case HID_DT_REPORT: | 377 | case HID_DT_REPORT: |
371 | VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n"); | 378 | VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n"); |
372 | length = min_t(unsigned short, length, | 379 | length = min_t(unsigned short, length, |
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c index 24a924330c81..4ec888f90002 100644 --- a/drivers/usb/gadget/fusb300_udc.c +++ b/drivers/usb/gadget/fusb300_udc.c | |||
@@ -609,107 +609,6 @@ void fusb300_rdcxf(struct fusb300 *fusb300, | |||
609 | } | 609 | } |
610 | } | 610 | } |
611 | 611 | ||
612 | #if 0 | ||
613 | static void fusb300_dbg_fifo(struct fusb300_ep *ep, | ||
614 | u8 entry, u16 length) | ||
615 | { | ||
616 | u32 reg; | ||
617 | u32 i = 0; | ||
618 | u32 j = 0; | ||
619 | |||
620 | reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM); | ||
621 | reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) | | ||
622 | FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG); | ||
623 | reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) | | ||
624 | FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG); | ||
625 | iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM); | ||
626 | |||
627 | for (i = 0; i < (length >> 2); i++) { | ||
628 | if (i * 4 == 1024) | ||
629 | break; | ||
630 | reg = ioread32(ep->fusb300->reg + | ||
631 | FUSB300_OFFSET_BUFDBG_START + i * 4); | ||
632 | printk(KERN_DEBUG" 0x%-8x", reg); | ||
633 | j++; | ||
634 | if ((j % 4) == 0) | ||
635 | printk(KERN_DEBUG "\n"); | ||
636 | } | ||
637 | |||
638 | if (length % 4) { | ||
639 | reg = ioread32(ep->fusb300->reg + | ||
640 | FUSB300_OFFSET_BUFDBG_START + i * 4); | ||
641 | printk(KERN_DEBUG " 0x%x\n", reg); | ||
642 | } | ||
643 | |||
644 | if ((j % 4) != 0) | ||
645 | printk(KERN_DEBUG "\n"); | ||
646 | |||
647 | fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM, | ||
648 | FUSB300_GTM_TST_FIFO_DEG); | ||
649 | } | ||
650 | |||
651 | static void fusb300_cmp_dbg_fifo(struct fusb300_ep *ep, | ||
652 | u8 entry, u16 length, u8 *golden) | ||
653 | { | ||
654 | u32 reg; | ||
655 | u32 i = 0; | ||
656 | u32 golden_value; | ||
657 | u8 *tmp; | ||
658 | |||
659 | tmp = golden; | ||
660 | |||
661 | printk(KERN_DEBUG "fusb300_cmp_dbg_fifo (entry %d) : start\n", entry); | ||
662 | |||
663 | reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM); | ||
664 | reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) | | ||
665 | FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG); | ||
666 | reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) | | ||
667 | FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG); | ||
668 | iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM); | ||
669 | |||
670 | for (i = 0; i < (length >> 2); i++) { | ||
671 | if (i * 4 == 1024) | ||
672 | break; | ||
673 | golden_value = *tmp | *(tmp + 1) << 8 | | ||
674 | *(tmp + 2) << 16 | *(tmp + 3) << 24; | ||
675 | |||
676 | reg = ioread32(ep->fusb300->reg + | ||
677 | FUSB300_OFFSET_BUFDBG_START + i*4); | ||
678 | |||
679 | if (reg != golden_value) { | ||
680 | printk(KERN_DEBUG "0x%x : ", (u32)(ep->fusb300->reg + | ||
681 | FUSB300_OFFSET_BUFDBG_START + i*4)); | ||
682 | printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n", | ||
683 | golden_value, reg); | ||
684 | } | ||
685 | tmp += 4; | ||
686 | } | ||
687 | |||
688 | switch (length % 4) { | ||
689 | case 1: | ||
690 | golden_value = *tmp; | ||
691 | case 2: | ||
692 | golden_value = *tmp | *(tmp + 1) << 8; | ||
693 | case 3: | ||
694 | golden_value = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16; | ||
695 | default: | ||
696 | break; | ||
697 | |||
698 | reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i*4); | ||
699 | if (reg != golden_value) { | ||
700 | printk(KERN_DEBUG "0x%x:", (u32)(ep->fusb300->reg + | ||
701 | FUSB300_OFFSET_BUFDBG_START + i*4)); | ||
702 | printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n", | ||
703 | golden_value, reg); | ||
704 | } | ||
705 | } | ||
706 | |||
707 | printk(KERN_DEBUG "fusb300_cmp_dbg_fifo : end\n"); | ||
708 | fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM, | ||
709 | FUSB300_GTM_TST_FIFO_DEG); | ||
710 | } | ||
711 | #endif | ||
712 | |||
713 | static void fusb300_rdfifo(struct fusb300_ep *ep, | 612 | static void fusb300_rdfifo(struct fusb300_ep *ep, |
714 | struct fusb300_request *req, | 613 | struct fusb300_request *req, |
715 | u32 length) | 614 | u32 length) |
diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c index 7c7b0e120d88..ab98ea926a11 100644 --- a/drivers/usb/gadget/net2272.c +++ b/drivers/usb/gadget/net2272.c | |||
@@ -27,13 +27,13 @@ | |||
27 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
28 | #include <linux/io.h> | 28 | #include <linux/io.h> |
29 | #include <linux/ioport.h> | 29 | #include <linux/ioport.h> |
30 | #include <linux/irq.h> | ||
31 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
32 | #include <linux/list.h> | 31 | #include <linux/list.h> |
33 | #include <linux/module.h> | 32 | #include <linux/module.h> |
34 | #include <linux/moduleparam.h> | 33 | #include <linux/moduleparam.h> |
35 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
36 | #include <linux/platform_device.h> | 35 | #include <linux/platform_device.h> |
36 | #include <linux/prefetch.h> | ||
37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/timer.h> | 39 | #include <linux/timer.h> |
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c index 85c1b0d66293..8d31848aab09 100644 --- a/drivers/usb/gadget/s3c2410_udc.c +++ b/drivers/usb/gadget/s3c2410_udc.c | |||
@@ -2060,6 +2060,7 @@ static int s3c2410_udc_resume(struct platform_device *pdev) | |||
2060 | static const struct platform_device_id s3c_udc_ids[] = { | 2060 | static const struct platform_device_id s3c_udc_ids[] = { |
2061 | { "s3c2410-usbgadget", }, | 2061 | { "s3c2410-usbgadget", }, |
2062 | { "s3c2440-usbgadget", }, | 2062 | { "s3c2440-usbgadget", }, |
2063 | { } | ||
2063 | }; | 2064 | }; |
2064 | MODULE_DEVICE_TABLE(platform, s3c_udc_ids); | 2065 | MODULE_DEVICE_TABLE(platform, s3c_udc_ids); |
2065 | 2066 | ||
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index bf2c8f65e1ae..e051b30c1847 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
@@ -1046,7 +1046,19 @@ static int ehci_hub_control ( | |||
1046 | if (!selector || selector > 5) | 1046 | if (!selector || selector > 5) |
1047 | goto error; | 1047 | goto error; |
1048 | ehci_quiesce(ehci); | 1048 | ehci_quiesce(ehci); |
1049 | |||
1050 | /* Put all enabled ports into suspend */ | ||
1051 | while (ports--) { | ||
1052 | u32 __iomem *sreg = | ||
1053 | &ehci->regs->port_status[ports]; | ||
1054 | |||
1055 | temp = ehci_readl(ehci, sreg) & ~PORT_RWC_BITS; | ||
1056 | if (temp & PORT_PE) | ||
1057 | ehci_writel(ehci, temp | PORT_SUSPEND, | ||
1058 | sreg); | ||
1059 | } | ||
1049 | ehci_halt(ehci); | 1060 | ehci_halt(ehci); |
1061 | temp = ehci_readl(ehci, status_reg); | ||
1050 | temp |= selector << 16; | 1062 | temp |= selector << 16; |
1051 | ehci_writel(ehci, temp, status_reg); | 1063 | ehci_writel(ehci, temp, status_reg); |
1052 | break; | 1064 | break; |
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c index 0c058be35a38..555a73c864b5 100644 --- a/drivers/usb/host/ehci-mxc.c +++ b/drivers/usb/host/ehci-mxc.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/usb/ulpi.h> | 24 | #include <linux/usb/ulpi.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | 26 | ||
27 | #include <mach/hardware.h> | ||
27 | #include <mach/mxc_ehci.h> | 28 | #include <mach/mxc_ehci.h> |
28 | 29 | ||
29 | #include <asm/mach-types.h> | 30 | #include <asm/mach-types.h> |
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index 55a57c23dd0f..45240321ca09 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c | |||
@@ -98,6 +98,18 @@ static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port) | |||
98 | } | 98 | } |
99 | } | 99 | } |
100 | 100 | ||
101 | static void disable_put_regulator( | ||
102 | struct ehci_hcd_omap_platform_data *pdata) | ||
103 | { | ||
104 | int i; | ||
105 | |||
106 | for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) { | ||
107 | if (pdata->regulator[i]) { | ||
108 | regulator_disable(pdata->regulator[i]); | ||
109 | regulator_put(pdata->regulator[i]); | ||
110 | } | ||
111 | } | ||
112 | } | ||
101 | 113 | ||
102 | /* configure so an HC device and id are always provided */ | 114 | /* configure so an HC device and id are always provided */ |
103 | /* always called with process context; sleeping is OK */ | 115 | /* always called with process context; sleeping is OK */ |
@@ -231,9 +243,11 @@ err_add_hcd: | |||
231 | omap_usbhs_disable(dev); | 243 | omap_usbhs_disable(dev); |
232 | 244 | ||
233 | err_enable: | 245 | err_enable: |
246 | disable_put_regulator(pdata); | ||
234 | usb_put_hcd(hcd); | 247 | usb_put_hcd(hcd); |
235 | 248 | ||
236 | err_io: | 249 | err_io: |
250 | iounmap(regs); | ||
237 | return ret; | 251 | return ret; |
238 | } | 252 | } |
239 | 253 | ||
@@ -253,6 +267,8 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev) | |||
253 | 267 | ||
254 | usb_remove_hcd(hcd); | 268 | usb_remove_hcd(hcd); |
255 | omap_usbhs_disable(dev); | 269 | omap_usbhs_disable(dev); |
270 | disable_put_regulator(dev->platform_data); | ||
271 | iounmap(hcd->regs); | ||
256 | usb_put_hcd(hcd); | 272 | usb_put_hcd(hcd); |
257 | return 0; | 273 | return 0; |
258 | } | 274 | } |
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index 55d3d5859ac5..840beda66dd9 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c | |||
@@ -1583,6 +1583,9 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, | |||
1583 | int retval = 0; | 1583 | int retval = 0; |
1584 | 1584 | ||
1585 | spin_lock_irqsave(&priv->lock, spinflags); | 1585 | spin_lock_irqsave(&priv->lock, spinflags); |
1586 | retval = usb_hcd_check_unlink_urb(hcd, urb, status); | ||
1587 | if (retval) | ||
1588 | goto out; | ||
1586 | 1589 | ||
1587 | qh = urb->ep->hcpriv; | 1590 | qh = urb->ep->hcpriv; |
1588 | if (!qh) { | 1591 | if (!qh) { |
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index a9d315906e3d..629a96813fd6 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c | |||
@@ -535,7 +535,7 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev) | |||
535 | iounmap(base); | 535 | iounmap(base); |
536 | } | 536 | } |
537 | 537 | ||
538 | static const struct dmi_system_id __initconst ehci_dmi_nohandoff_table[] = { | 538 | static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = { |
539 | { | 539 | { |
540 | /* Pegatron Lucid (ExoPC) */ | 540 | /* Pegatron Lucid (ExoPC) */ |
541 | .matches = { | 541 | .matches = { |
@@ -817,7 +817,7 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev) | |||
817 | 817 | ||
818 | /* If the BIOS owns the HC, signal that the OS wants it, and wait */ | 818 | /* If the BIOS owns the HC, signal that the OS wants it, and wait */ |
819 | if (val & XHCI_HC_BIOS_OWNED) { | 819 | if (val & XHCI_HC_BIOS_OWNED) { |
820 | writel(val & XHCI_HC_OS_OWNED, base + ext_cap_offset); | 820 | writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); |
821 | 821 | ||
822 | /* Wait for 5 seconds with 10 microsecond polling interval */ | 822 | /* Wait for 5 seconds with 10 microsecond polling interval */ |
823 | timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, | 823 | timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 763f484bc092..1c4432d8fc10 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -345,7 +345,8 @@ static void xhci_event_ring_work(unsigned long arg) | |||
345 | spin_lock_irqsave(&xhci->lock, flags); | 345 | spin_lock_irqsave(&xhci->lock, flags); |
346 | temp = xhci_readl(xhci, &xhci->op_regs->status); | 346 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
347 | xhci_dbg(xhci, "op reg status = 0x%x\n", temp); | 347 | xhci_dbg(xhci, "op reg status = 0x%x\n", temp); |
348 | if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { | 348 | if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || |
349 | (xhci->xhc_state & XHCI_STATE_HALTED)) { | ||
349 | xhci_dbg(xhci, "HW died, polling stopped.\n"); | 350 | xhci_dbg(xhci, "HW died, polling stopped.\n"); |
350 | spin_unlock_irqrestore(&xhci->lock, flags); | 351 | spin_unlock_irqrestore(&xhci->lock, flags); |
351 | return; | 352 | return; |
@@ -939,8 +940,11 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, | |||
939 | return 0; | 940 | return 0; |
940 | } | 941 | } |
941 | 942 | ||
943 | xhci = hcd_to_xhci(hcd); | ||
944 | if (xhci->xhc_state & XHCI_STATE_HALTED) | ||
945 | return -ENODEV; | ||
946 | |||
942 | if (check_virt_dev) { | 947 | if (check_virt_dev) { |
943 | xhci = hcd_to_xhci(hcd); | ||
944 | if (!udev->slot_id || !xhci->devs | 948 | if (!udev->slot_id || !xhci->devs |
945 | || !xhci->devs[udev->slot_id]) { | 949 | || !xhci->devs[udev->slot_id]) { |
946 | printk(KERN_DEBUG "xHCI %s called with unaddressed " | 950 | printk(KERN_DEBUG "xHCI %s called with unaddressed " |
@@ -1242,7 +1246,8 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
1242 | xhci_urb_free_priv(xhci, urb_priv); | 1246 | xhci_urb_free_priv(xhci, urb_priv); |
1243 | return ret; | 1247 | return ret; |
1244 | } | 1248 | } |
1245 | if (xhci->xhc_state & XHCI_STATE_DYING) { | 1249 | if ((xhci->xhc_state & XHCI_STATE_DYING) || |
1250 | (xhci->xhc_state & XHCI_STATE_HALTED)) { | ||
1246 | xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " | 1251 | xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " |
1247 | "non-responsive xHCI host.\n", | 1252 | "non-responsive xHCI host.\n", |
1248 | urb->ep->desc.bEndpointAddress, urb); | 1253 | urb->ep->desc.bEndpointAddress, urb); |
@@ -2665,7 +2670,10 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
2665 | int i, ret; | 2670 | int i, ret; |
2666 | 2671 | ||
2667 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); | 2672 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
2668 | if (ret <= 0) | 2673 | /* If the host is halted due to driver unload, we still need to free the |
2674 | * device. | ||
2675 | */ | ||
2676 | if (ret <= 0 && ret != -ENODEV) | ||
2669 | return; | 2677 | return; |
2670 | 2678 | ||
2671 | virt_dev = xhci->devs[udev->slot_id]; | 2679 | virt_dev = xhci->devs[udev->slot_id]; |
@@ -2679,7 +2687,8 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
2679 | spin_lock_irqsave(&xhci->lock, flags); | 2687 | spin_lock_irqsave(&xhci->lock, flags); |
2680 | /* Don't disable the slot if the host controller is dead. */ | 2688 | /* Don't disable the slot if the host controller is dead. */ |
2681 | state = xhci_readl(xhci, &xhci->op_regs->status); | 2689 | state = xhci_readl(xhci, &xhci->op_regs->status); |
2682 | if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { | 2690 | if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || |
2691 | (xhci->xhc_state & XHCI_STATE_HALTED)) { | ||
2683 | xhci_free_virt_device(xhci, udev->slot_id); | 2692 | xhci_free_virt_device(xhci, udev->slot_id); |
2684 | spin_unlock_irqrestore(&xhci->lock, flags); | 2693 | spin_unlock_irqrestore(&xhci->lock, flags); |
2685 | return; | 2694 | return; |
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index 6192b45959f4..fc34b8b11910 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig | |||
@@ -3,9 +3,6 @@ | |||
3 | # for silicon based on Mentor Graphics INVENTRA designs | 3 | # for silicon based on Mentor Graphics INVENTRA designs |
4 | # | 4 | # |
5 | 5 | ||
6 | comment "Enable Host or Gadget support to see Inventra options" | ||
7 | depends on !USB && USB_GADGET=n | ||
8 | |||
9 | # (M)HDRC = (Multipoint) Highspeed Dual-Role Controller | 6 | # (M)HDRC = (Multipoint) Highspeed Dual-Role Controller |
10 | config USB_MUSB_HDRC | 7 | config USB_MUSB_HDRC |
11 | depends on USB && USB_GADGET | 8 | depends on USB && USB_GADGET |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index b67a062f556b..8c41a2e6ea77 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -1698,6 +1698,8 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) | |||
1698 | 1698 | ||
1699 | is_on = !!is_on; | 1699 | is_on = !!is_on; |
1700 | 1700 | ||
1701 | pm_runtime_get_sync(musb->controller); | ||
1702 | |||
1701 | /* NOTE: this assumes we are sensing vbus; we'd rather | 1703 | /* NOTE: this assumes we are sensing vbus; we'd rather |
1702 | * not pullup unless the B-session is active. | 1704 | * not pullup unless the B-session is active. |
1703 | */ | 1705 | */ |
@@ -1707,6 +1709,9 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) | |||
1707 | musb_pullup(musb, is_on); | 1709 | musb_pullup(musb, is_on); |
1708 | } | 1710 | } |
1709 | spin_unlock_irqrestore(&musb->lock, flags); | 1711 | spin_unlock_irqrestore(&musb->lock, flags); |
1712 | |||
1713 | pm_runtime_put(musb->controller); | ||
1714 | |||
1710 | return 0; | 1715 | return 0; |
1711 | } | 1716 | } |
1712 | 1717 | ||
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c index c784e6c03aac..07c8a73dfe41 100644 --- a/drivers/usb/musb/tusb6010_omap.c +++ b/drivers/usb/musb/tusb6010_omap.c | |||
@@ -89,7 +89,7 @@ static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat) | |||
89 | u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); | 89 | u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); |
90 | 90 | ||
91 | if (reg != 0) { | 91 | if (reg != 0) { |
92 | dev_dbg(musb->controller, "ep%i dmareq0 is busy for ep%i\n", | 92 | dev_dbg(chdat->musb->controller, "ep%i dmareq0 is busy for ep%i\n", |
93 | chdat->epnum, reg & 0xf); | 93 | chdat->epnum, reg & 0xf); |
94 | return -EAGAIN; | 94 | return -EAGAIN; |
95 | } | 95 | } |
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index ba79dbf5adbc..cb2d451d511e 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c | |||
@@ -14,6 +14,7 @@ | |||
14 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 14 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | #include <linux/dma-mapping.h> | ||
17 | #include <linux/io.h> | 18 | #include <linux/io.h> |
18 | #include <linux/module.h> | 19 | #include <linux/module.h> |
19 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
@@ -76,7 +77,7 @@ struct usbhsg_recip_handle { | |||
76 | struct usbhsg_gpriv, mod) | 77 | struct usbhsg_gpriv, mod) |
77 | 78 | ||
78 | #define __usbhsg_for_each_uep(start, pos, g, i) \ | 79 | #define __usbhsg_for_each_uep(start, pos, g, i) \ |
79 | for (i = start, pos = (g)->uep; \ | 80 | for (i = start, pos = (g)->uep + i; \ |
80 | i < (g)->uep_size; \ | 81 | i < (g)->uep_size; \ |
81 | i++, pos = (g)->uep + i) | 82 | i++, pos = (g)->uep + i) |
82 | 83 | ||
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 2e06b90aa1f8..78a2cf9551cc 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -151,6 +151,7 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = { | |||
151 | * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! | 151 | * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! |
152 | */ | 152 | */ |
153 | static struct usb_device_id id_table_combined [] = { | 153 | static struct usb_device_id id_table_combined [] = { |
154 | { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) }, | ||
154 | { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) }, | 155 | { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) }, |
155 | { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) }, | 156 | { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) }, |
156 | { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, | 157 | { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, |
@@ -1171,7 +1172,7 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty, | |||
1171 | case FT2232H: /* FT2232H chip */ | 1172 | case FT2232H: /* FT2232H chip */ |
1172 | case FT4232H: /* FT4232H chip */ | 1173 | case FT4232H: /* FT4232H chip */ |
1173 | case FT232H: /* FT232H chip */ | 1174 | case FT232H: /* FT232H chip */ |
1174 | if ((baud <= 12000000) & (baud >= 1200)) { | 1175 | if ((baud <= 12000000) && (baud >= 1200)) { |
1175 | div_value = ftdi_2232h_baud_to_divisor(baud); | 1176 | div_value = ftdi_2232h_baud_to_divisor(baud); |
1176 | } else if (baud < 1200) { | 1177 | } else if (baud < 1200) { |
1177 | div_value = ftdi_232bm_baud_to_divisor(baud); | 1178 | div_value = ftdi_232bm_baud_to_divisor(baud); |
@@ -1205,7 +1206,10 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port) | |||
1205 | urb_index_value = get_ftdi_divisor(tty, port); | 1206 | urb_index_value = get_ftdi_divisor(tty, port); |
1206 | urb_value = (__u16)urb_index_value; | 1207 | urb_value = (__u16)urb_index_value; |
1207 | urb_index = (__u16)(urb_index_value >> 16); | 1208 | urb_index = (__u16)(urb_index_value >> 16); |
1208 | if (priv->interface) { /* FT2232C */ | 1209 | if ((priv->chip_type == FT2232C) || (priv->chip_type == FT2232H) || |
1210 | (priv->chip_type == FT4232H) || (priv->chip_type == FT232H)) { | ||
1211 | /* Probably the BM type needs the MSB of the encoded fractional | ||
1212 | * divider also moved like for the chips above. Any infos? */ | ||
1209 | urb_index = (__u16)((urb_index << 8) | priv->interface); | 1213 | urb_index = (__u16)((urb_index << 8) | priv->interface); |
1210 | } | 1214 | } |
1211 | 1215 | ||
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 19156d1049fe..bf5227ad3ef7 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -1159,4 +1159,8 @@ | |||
1159 | /* USB-Nano-485*/ | 1159 | /* USB-Nano-485*/ |
1160 | #define FTDI_CTI_NANO_PID 0xF60B | 1160 | #define FTDI_CTI_NANO_PID 0xF60B |
1161 | 1161 | ||
1162 | 1162 | /* | |
1163 | * ZeitControl cardsystems GmbH rfid-readers http://zeitconrol.de | ||
1164 | */ | ||
1165 | /* TagTracer MIFARE*/ | ||
1166 | #define FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID 0xF7C0 | ||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 60b25d8ea0e2..815656198914 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -148,6 +148,10 @@ static void option_instat_callback(struct urb *urb); | |||
148 | #define HUAWEI_PRODUCT_K4505 0x1464 | 148 | #define HUAWEI_PRODUCT_K4505 0x1464 |
149 | #define HUAWEI_PRODUCT_K3765 0x1465 | 149 | #define HUAWEI_PRODUCT_K3765 0x1465 |
150 | #define HUAWEI_PRODUCT_E14AC 0x14AC | 150 | #define HUAWEI_PRODUCT_E14AC 0x14AC |
151 | #define HUAWEI_PRODUCT_K3770 0x14C9 | ||
152 | #define HUAWEI_PRODUCT_K3771 0x14CA | ||
153 | #define HUAWEI_PRODUCT_K4510 0x14CB | ||
154 | #define HUAWEI_PRODUCT_K4511 0x14CC | ||
151 | #define HUAWEI_PRODUCT_ETS1220 0x1803 | 155 | #define HUAWEI_PRODUCT_ETS1220 0x1803 |
152 | #define HUAWEI_PRODUCT_E353 0x1506 | 156 | #define HUAWEI_PRODUCT_E353 0x1506 |
153 | 157 | ||
@@ -547,6 +551,14 @@ static const struct usb_device_id option_ids[] = { | |||
547 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, | 551 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, |
548 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, | 552 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, |
549 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, | 553 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, |
554 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, | ||
555 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, | ||
556 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, | ||
557 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) }, | ||
558 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) }, | ||
559 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) }, | ||
560 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) }, | ||
561 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) }, | ||
550 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, | 562 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, |
551 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, | 563 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, |
552 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, | 564 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, |
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 54a9dab1f33b..aeccc7f0a93c 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
@@ -45,6 +45,7 @@ static const struct usb_device_id id_table[] = { | |||
45 | {USB_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */ | 45 | {USB_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */ |
46 | {USB_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ | 46 | {USB_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ |
47 | {USB_DEVICE(0x05c6, 0x9008)}, /* Generic Gobi QDL device */ | 47 | {USB_DEVICE(0x05c6, 0x9008)}, /* Generic Gobi QDL device */ |
48 | {USB_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */ | ||
48 | {USB_DEVICE(0x05c6, 0x9201)}, /* Generic Gobi QDL device */ | 49 | {USB_DEVICE(0x05c6, 0x9201)}, /* Generic Gobi QDL device */ |
49 | {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ | 50 | {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ |
50 | {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ | 51 | {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ |
@@ -78,6 +79,7 @@ static const struct usb_device_id id_table[] = { | |||
78 | {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ | 79 | {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ |
79 | {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ | 80 | {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ |
80 | {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ | 81 | {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ |
82 | {USB_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */ | ||
81 | {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */ | 83 | {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */ |
82 | {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ | 84 | {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ |
83 | {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */ | 85 | {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */ |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index ccff3483eebc..3041a974faf3 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -1988,6 +1988,16 @@ UNUSUAL_DEV( 0x4146, 0xba01, 0x0100, 0x0100, | |||
1988 | "Micro Mini 1GB", | 1988 | "Micro Mini 1GB", |
1989 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ), | 1989 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ), |
1990 | 1990 | ||
1991 | /* | ||
1992 | * Nick Bowler <nbowler@elliptictech.com> | ||
1993 | * SCSI stack spams (otherwise harmless) error messages. | ||
1994 | */ | ||
1995 | UNUSUAL_DEV( 0xc251, 0x4003, 0x0100, 0x0100, | ||
1996 | "Keil Software, Inc.", | ||
1997 | "V2M MotherBoard", | ||
1998 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
1999 | US_FL_NOT_LOCKABLE), | ||
2000 | |||
1991 | /* Reported by Andrew Simmons <andrew.simmons@gmail.com> */ | 2001 | /* Reported by Andrew Simmons <andrew.simmons@gmail.com> */ |
1992 | UNUSUAL_DEV( 0xed06, 0x4500, 0x0001, 0x0001, | 2002 | UNUSUAL_DEV( 0xed06, 0x4500, 0x0001, 0x0001, |
1993 | "DataStor", | 2003 | "DataStor", |
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c index 05a8832bb3eb..d06886a2bfb5 100644 --- a/drivers/video/backlight/adp8870_bl.c +++ b/drivers/video/backlight/adp8870_bl.c | |||
@@ -1009,4 +1009,4 @@ module_exit(adp8870_exit); | |||
1009 | MODULE_LICENSE("GPL v2"); | 1009 | MODULE_LICENSE("GPL v2"); |
1010 | MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); | 1010 | MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); |
1011 | MODULE_DESCRIPTION("ADP8870 Backlight driver"); | 1011 | MODULE_DESCRIPTION("ADP8870 Backlight driver"); |
1012 | MODULE_ALIAS("platform:adp8870-backlight"); | 1012 | MODULE_ALIAS("i2c:adp8870-backlight"); |
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c index 9f1e389d51d2..b0582917f0c8 100644 --- a/drivers/video/backlight/ep93xx_bl.c +++ b/drivers/video/backlight/ep93xx_bl.c | |||
@@ -11,7 +11,7 @@ | |||
11 | * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors. | 11 | * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | 14 | #include <linux/module.h> | |
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | #include <linux/fb.h> | 17 | #include <linux/fb.h> |
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index b8f38ec6eb18..8b5b2a4124c7 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c | |||
@@ -28,6 +28,8 @@ struct pwm_bl_data { | |||
28 | unsigned int lth_brightness; | 28 | unsigned int lth_brightness; |
29 | int (*notify)(struct device *, | 29 | int (*notify)(struct device *, |
30 | int brightness); | 30 | int brightness); |
31 | void (*notify_after)(struct device *, | ||
32 | int brightness); | ||
31 | int (*check_fb)(struct device *, struct fb_info *); | 33 | int (*check_fb)(struct device *, struct fb_info *); |
32 | }; | 34 | }; |
33 | 35 | ||
@@ -55,6 +57,10 @@ static int pwm_backlight_update_status(struct backlight_device *bl) | |||
55 | pwm_config(pb->pwm, brightness, pb->period); | 57 | pwm_config(pb->pwm, brightness, pb->period); |
56 | pwm_enable(pb->pwm); | 58 | pwm_enable(pb->pwm); |
57 | } | 59 | } |
60 | |||
61 | if (pb->notify_after) | ||
62 | pb->notify_after(pb->dev, brightness); | ||
63 | |||
58 | return 0; | 64 | return 0; |
59 | } | 65 | } |
60 | 66 | ||
@@ -105,6 +111,7 @@ static int pwm_backlight_probe(struct platform_device *pdev) | |||
105 | 111 | ||
106 | pb->period = data->pwm_period_ns; | 112 | pb->period = data->pwm_period_ns; |
107 | pb->notify = data->notify; | 113 | pb->notify = data->notify; |
114 | pb->notify_after = data->notify_after; | ||
108 | pb->check_fb = data->check_fb; | 115 | pb->check_fb = data->check_fb; |
109 | pb->lth_brightness = data->lth_brightness * | 116 | pb->lth_brightness = data->lth_brightness * |
110 | (data->pwm_period_ns / data->max_brightness); | 117 | (data->pwm_period_ns / data->max_brightness); |
@@ -172,6 +179,8 @@ static int pwm_backlight_suspend(struct platform_device *pdev, | |||
172 | pb->notify(pb->dev, 0); | 179 | pb->notify(pb->dev, 0); |
173 | pwm_config(pb->pwm, 0, pb->period); | 180 | pwm_config(pb->pwm, 0, pb->period); |
174 | pwm_disable(pb->pwm); | 181 | pwm_disable(pb->pwm); |
182 | if (pb->notify_after) | ||
183 | pb->notify_after(pb->dev, 0); | ||
175 | return 0; | 184 | return 0; |
176 | } | 185 | } |
177 | 186 | ||
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c index 02bf7bf7160b..b5abaae38e97 100644 --- a/drivers/w1/masters/ds2490.c +++ b/drivers/w1/masters/ds2490.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * dscore.c | 2 | * dscore.c |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -1024,5 +1024,5 @@ module_init(ds_init); | |||
1024 | module_exit(ds_fini); | 1024 | module_exit(ds_fini); |
1025 | 1025 | ||
1026 | MODULE_LICENSE("GPL"); | 1026 | MODULE_LICENSE("GPL"); |
1027 | MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); | 1027 | MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); |
1028 | MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)"); | 1028 | MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)"); |
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c index 334d1ccf9c92..f667c26b2195 100644 --- a/drivers/w1/masters/matrox_w1.c +++ b/drivers/w1/masters/matrox_w1.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * matrox_w1.c | 2 | * matrox_w1.c |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -39,7 +39,7 @@ | |||
39 | #include "../w1_log.h" | 39 | #include "../w1_log.h" |
40 | 40 | ||
41 | MODULE_LICENSE("GPL"); | 41 | MODULE_LICENSE("GPL"); |
42 | MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); | 42 | MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); |
43 | MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio)."); | 43 | MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio)."); |
44 | 44 | ||
45 | static struct pci_device_id matrox_w1_tbl[] = { | 45 | static struct pci_device_id matrox_w1_tbl[] = { |
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c index c37781899d90..7c8cdb8aed26 100644 --- a/drivers/w1/slaves/w1_ds2408.c +++ b/drivers/w1/slaves/w1_ds2408.c | |||
@@ -373,7 +373,7 @@ static int w1_f29_add_slave(struct w1_slave *sl) | |||
373 | static void w1_f29_remove_slave(struct w1_slave *sl) | 373 | static void w1_f29_remove_slave(struct w1_slave *sl) |
374 | { | 374 | { |
375 | int i; | 375 | int i; |
376 | for (i = NB_SYSFS_BIN_FILES; i <= 0; --i) | 376 | for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i) |
377 | sysfs_remove_bin_file(&sl->dev.kobj, | 377 | sysfs_remove_bin_file(&sl->dev.kobj, |
378 | &(w1_f29_sysfs_bin_files[i])); | 378 | &(w1_f29_sysfs_bin_files[i])); |
379 | } | 379 | } |
diff --git a/drivers/w1/slaves/w1_smem.c b/drivers/w1/slaves/w1_smem.c index cc8c02e92593..84655625c870 100644 --- a/drivers/w1/slaves/w1_smem.c +++ b/drivers/w1/slaves/w1_smem.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_smem.c | 2 | * w1_smem.c |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -32,7 +32,7 @@ | |||
32 | #include "../w1_family.h" | 32 | #include "../w1_family.h" |
33 | 33 | ||
34 | MODULE_LICENSE("GPL"); | 34 | MODULE_LICENSE("GPL"); |
35 | MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); | 35 | MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); |
36 | MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family."); | 36 | MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family."); |
37 | 37 | ||
38 | static struct w1_family w1_smem_family_01 = { | 38 | static struct w1_family w1_smem_family_01 = { |
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c index 402928b135d1..a1ef9b5b38cf 100644 --- a/drivers/w1/slaves/w1_therm.c +++ b/drivers/w1/slaves/w1_therm.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_therm.c | 2 | * w1_therm.c |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -34,7 +34,7 @@ | |||
34 | #include "../w1_family.h" | 34 | #include "../w1_family.h" |
35 | 35 | ||
36 | MODULE_LICENSE("GPL"); | 36 | MODULE_LICENSE("GPL"); |
37 | MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); | 37 | MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); |
38 | MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family."); | 38 | MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family."); |
39 | 39 | ||
40 | /* Allow the strong pullup to be disabled, but default to enabled. | 40 | /* Allow the strong pullup to be disabled, but default to enabled. |
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index 6c136c19e982..c37497823851 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1.c | 2 | * w1.c |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -42,7 +42,7 @@ | |||
42 | #include "w1_netlink.h" | 42 | #include "w1_netlink.h" |
43 | 43 | ||
44 | MODULE_LICENSE("GPL"); | 44 | MODULE_LICENSE("GPL"); |
45 | MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); | 45 | MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); |
46 | MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol."); | 46 | MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol."); |
47 | 47 | ||
48 | static int w1_timeout = 10; | 48 | static int w1_timeout = 10; |
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h index 1ce23fc6186c..4d012ca3f32c 100644 --- a/drivers/w1/w1.h +++ b/drivers/w1/w1.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1.h | 2 | * w1.h |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c index 4a099041f28a..63359797c8b1 100644 --- a/drivers/w1/w1_family.c +++ b/drivers/w1/w1_family.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_family.c | 2 | * w1_family.c |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h index 98a1ac0f4693..490cda2281bc 100644 --- a/drivers/w1/w1_family.h +++ b/drivers/w1/w1_family.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_family.h | 2 | * w1_family.h |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c index b50be3f1073d..d220bce2cee4 100644 --- a/drivers/w1/w1_int.c +++ b/drivers/w1/w1_int.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_int.c | 2 | * w1_int.c |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/w1/w1_int.h b/drivers/w1/w1_int.h index 4274082d2262..2ad7d4414bed 100644 --- a/drivers/w1/w1_int.h +++ b/drivers/w1/w1_int.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_int.h | 2 | * w1_int.h |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c index 8e8b64cfafb6..765b37b62a4f 100644 --- a/drivers/w1/w1_io.c +++ b/drivers/w1/w1_io.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_io.c | 2 | * w1_io.c |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/w1/w1_log.h b/drivers/w1/w1_log.h index e6ab7cf08f88..9c7bd62e6bdc 100644 --- a/drivers/w1/w1_log.h +++ b/drivers/w1/w1_log.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_log.h | 2 | * w1_log.h |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c index 55aabd927c60..40788c925d1c 100644 --- a/drivers/w1/w1_netlink.c +++ b/drivers/w1/w1_netlink.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_netlink.c | 2 | * w1_netlink.c |
3 | * | 3 | * |
4 | * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h index 27e950f935b1..b0922dc29658 100644 --- a/drivers/w1/w1_netlink.h +++ b/drivers/w1/w1_netlink.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_netlink.h | 2 | * w1_netlink.h |
3 | * | 3 | * |
4 | * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c index 1b4afd81f872..6ea852e25162 100644 --- a/drivers/xen/xen-selfballoon.c +++ b/drivers/xen/xen-selfballoon.c | |||
@@ -70,6 +70,7 @@ | |||
70 | #include <linux/kernel.h> | 70 | #include <linux/kernel.h> |
71 | #include <linux/mm.h> | 71 | #include <linux/mm.h> |
72 | #include <linux/mman.h> | 72 | #include <linux/mman.h> |
73 | #include <linux/module.h> | ||
73 | #include <linux/workqueue.h> | 74 | #include <linux/workqueue.h> |
74 | #include <xen/balloon.h> | 75 | #include <xen/balloon.h> |
75 | #include <xen/tmem.h> | 76 | #include <xen/tmem.h> |
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 475f9c597cb7..326dc08d3e3f 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h | |||
@@ -39,27 +39,17 @@ | |||
39 | 39 | ||
40 | /* #define DEBUG */ | 40 | /* #define DEBUG */ |
41 | 41 | ||
42 | #ifdef DEBUG | 42 | #define DPRINTK(fmt, ...) \ |
43 | #define DPRINTK(fmt, args...) \ | 43 | pr_debug("pid %d: %s: " fmt "\n", \ |
44 | do { \ | 44 | current->pid, __func__, ##__VA_ARGS__) |
45 | printk(KERN_DEBUG "pid %d: %s: " fmt "\n", \ | 45 | |
46 | current->pid, __func__, ##args); \ | 46 | #define AUTOFS_WARN(fmt, ...) \ |
47 | } while (0) | ||
48 | #else | ||
49 | #define DPRINTK(fmt, args...) do {} while (0) | ||
50 | #endif | ||
51 | |||
52 | #define AUTOFS_WARN(fmt, args...) \ | ||
53 | do { \ | ||
54 | printk(KERN_WARNING "pid %d: %s: " fmt "\n", \ | 47 | printk(KERN_WARNING "pid %d: %s: " fmt "\n", \ |
55 | current->pid, __func__, ##args); \ | 48 | current->pid, __func__, ##__VA_ARGS__) |
56 | } while (0) | ||
57 | 49 | ||
58 | #define AUTOFS_ERROR(fmt, args...) \ | 50 | #define AUTOFS_ERROR(fmt, ...) \ |
59 | do { \ | ||
60 | printk(KERN_ERR "pid %d: %s: " fmt "\n", \ | 51 | printk(KERN_ERR "pid %d: %s: " fmt "\n", \ |
61 | current->pid, __func__, ##args); \ | 52 | current->pid, __func__, ##__VA_ARGS__) |
62 | } while (0) | ||
63 | 53 | ||
64 | /* Unified info structure. This is pointed to by both the dentry and | 54 | /* Unified info structure. This is pointed to by both the dentry and |
65 | inode structures. Each file in the filesystem has an instance of this | 55 | inode structures. Each file in the filesystem has an instance of this |
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 25435987d6ae..e1fbdeef85db 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c | |||
@@ -104,7 +104,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, | |||
104 | size_t pktsz; | 104 | size_t pktsz; |
105 | 105 | ||
106 | DPRINTK("wait id = 0x%08lx, name = %.*s, type=%d", | 106 | DPRINTK("wait id = 0x%08lx, name = %.*s, type=%d", |
107 | wq->wait_queue_token, wq->name.len, wq->name.name, type); | 107 | (unsigned long) wq->wait_queue_token, wq->name.len, wq->name.name, type); |
108 | 108 | ||
109 | memset(&pkt,0,sizeof pkt); /* For security reasons */ | 109 | memset(&pkt,0,sizeof pkt); /* For security reasons */ |
110 | 110 | ||
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 54b8c28bebc8..720d885e8dca 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c | |||
@@ -474,17 +474,22 @@ befs_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
474 | befs_data_stream *data = &befs_ino->i_data.ds; | 474 | befs_data_stream *data = &befs_ino->i_data.ds; |
475 | befs_off_t len = data->size; | 475 | befs_off_t len = data->size; |
476 | 476 | ||
477 | befs_debug(sb, "Follow long symlink"); | 477 | if (len == 0) { |
478 | 478 | befs_error(sb, "Long symlink with illegal length"); | |
479 | link = kmalloc(len, GFP_NOFS); | ||
480 | if (!link) { | ||
481 | link = ERR_PTR(-ENOMEM); | ||
482 | } else if (befs_read_lsymlink(sb, data, link, len) != len) { | ||
483 | kfree(link); | ||
484 | befs_error(sb, "Failed to read entire long symlink"); | ||
485 | link = ERR_PTR(-EIO); | 479 | link = ERR_PTR(-EIO); |
486 | } else { | 480 | } else { |
487 | link[len - 1] = '\0'; | 481 | befs_debug(sb, "Follow long symlink"); |
482 | |||
483 | link = kmalloc(len, GFP_NOFS); | ||
484 | if (!link) { | ||
485 | link = ERR_PTR(-ENOMEM); | ||
486 | } else if (befs_read_lsymlink(sb, data, link, len) != len) { | ||
487 | kfree(link); | ||
488 | befs_error(sb, "Failed to read entire long symlink"); | ||
489 | link = ERR_PTR(-EIO); | ||
490 | } else { | ||
491 | link[len - 1] = '\0'; | ||
492 | } | ||
488 | } | 493 | } |
489 | } else { | 494 | } else { |
490 | link = befs_ino->i_data.symlink; | 495 | link = befs_ino->i_data.symlink; |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0469263e327e..03912c5c6f49 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -1415,17 +1415,15 @@ void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val); | |||
1415 | #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ | 1415 | #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ |
1416 | static inline u##bits btrfs_##name(struct extent_buffer *eb) \ | 1416 | static inline u##bits btrfs_##name(struct extent_buffer *eb) \ |
1417 | { \ | 1417 | { \ |
1418 | type *p = kmap_atomic(eb->first_page, KM_USER0); \ | 1418 | type *p = page_address(eb->first_page); \ |
1419 | u##bits res = le##bits##_to_cpu(p->member); \ | 1419 | u##bits res = le##bits##_to_cpu(p->member); \ |
1420 | kunmap_atomic(p, KM_USER0); \ | ||
1421 | return res; \ | 1420 | return res; \ |
1422 | } \ | 1421 | } \ |
1423 | static inline void btrfs_set_##name(struct extent_buffer *eb, \ | 1422 | static inline void btrfs_set_##name(struct extent_buffer *eb, \ |
1424 | u##bits val) \ | 1423 | u##bits val) \ |
1425 | { \ | 1424 | { \ |
1426 | type *p = kmap_atomic(eb->first_page, KM_USER0); \ | 1425 | type *p = page_address(eb->first_page); \ |
1427 | p->member = cpu_to_le##bits(val); \ | 1426 | p->member = cpu_to_le##bits(val); \ |
1428 | kunmap_atomic(p, KM_USER0); \ | ||
1429 | } | 1427 | } |
1430 | 1428 | ||
1431 | #define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \ | 1429 | #define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \ |
@@ -2367,8 +2365,8 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, | |||
2367 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); | 2365 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); |
2368 | int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); | 2366 | int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); |
2369 | int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); | 2367 | int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); |
2370 | int btrfs_drop_snapshot(struct btrfs_root *root, | 2368 | void btrfs_drop_snapshot(struct btrfs_root *root, |
2371 | struct btrfs_block_rsv *block_rsv, int update_ref); | 2369 | struct btrfs_block_rsv *block_rsv, int update_ref); |
2372 | int btrfs_drop_subtree(struct btrfs_trans_handle *trans, | 2370 | int btrfs_drop_subtree(struct btrfs_trans_handle *trans, |
2373 | struct btrfs_root *root, | 2371 | struct btrfs_root *root, |
2374 | struct extent_buffer *node, | 2372 | struct extent_buffer *node, |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 66bac226944e..f5be06a2462f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -1782,6 +1782,9 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, | |||
1782 | 1782 | ||
1783 | 1783 | ||
1784 | for (i = 0; i < multi->num_stripes; i++, stripe++) { | 1784 | for (i = 0; i < multi->num_stripes; i++, stripe++) { |
1785 | if (!stripe->dev->can_discard) | ||
1786 | continue; | ||
1787 | |||
1785 | ret = btrfs_issue_discard(stripe->dev->bdev, | 1788 | ret = btrfs_issue_discard(stripe->dev->bdev, |
1786 | stripe->physical, | 1789 | stripe->physical, |
1787 | stripe->length); | 1790 | stripe->length); |
@@ -1789,11 +1792,16 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, | |||
1789 | discarded_bytes += stripe->length; | 1792 | discarded_bytes += stripe->length; |
1790 | else if (ret != -EOPNOTSUPP) | 1793 | else if (ret != -EOPNOTSUPP) |
1791 | break; | 1794 | break; |
1795 | |||
1796 | /* | ||
1797 | * Just in case we get back EOPNOTSUPP for some reason, | ||
1798 | * just ignore the return value so we don't screw up | ||
1799 | * people calling discard_extent. | ||
1800 | */ | ||
1801 | ret = 0; | ||
1792 | } | 1802 | } |
1793 | kfree(multi); | 1803 | kfree(multi); |
1794 | } | 1804 | } |
1795 | if (discarded_bytes && ret == -EOPNOTSUPP) | ||
1796 | ret = 0; | ||
1797 | 1805 | ||
1798 | if (actual_bytes) | 1806 | if (actual_bytes) |
1799 | *actual_bytes = discarded_bytes; | 1807 | *actual_bytes = discarded_bytes; |
@@ -6269,8 +6277,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans, | |||
6269 | * also make sure backrefs for the shared block and all lower level | 6277 | * also make sure backrefs for the shared block and all lower level |
6270 | * blocks are properly updated. | 6278 | * blocks are properly updated. |
6271 | */ | 6279 | */ |
6272 | int btrfs_drop_snapshot(struct btrfs_root *root, | 6280 | void btrfs_drop_snapshot(struct btrfs_root *root, |
6273 | struct btrfs_block_rsv *block_rsv, int update_ref) | 6281 | struct btrfs_block_rsv *block_rsv, int update_ref) |
6274 | { | 6282 | { |
6275 | struct btrfs_path *path; | 6283 | struct btrfs_path *path; |
6276 | struct btrfs_trans_handle *trans; | 6284 | struct btrfs_trans_handle *trans; |
@@ -6283,13 +6291,16 @@ int btrfs_drop_snapshot(struct btrfs_root *root, | |||
6283 | int level; | 6291 | int level; |
6284 | 6292 | ||
6285 | path = btrfs_alloc_path(); | 6293 | path = btrfs_alloc_path(); |
6286 | if (!path) | 6294 | if (!path) { |
6287 | return -ENOMEM; | 6295 | err = -ENOMEM; |
6296 | goto out; | ||
6297 | } | ||
6288 | 6298 | ||
6289 | wc = kzalloc(sizeof(*wc), GFP_NOFS); | 6299 | wc = kzalloc(sizeof(*wc), GFP_NOFS); |
6290 | if (!wc) { | 6300 | if (!wc) { |
6291 | btrfs_free_path(path); | 6301 | btrfs_free_path(path); |
6292 | return -ENOMEM; | 6302 | err = -ENOMEM; |
6303 | goto out; | ||
6293 | } | 6304 | } |
6294 | 6305 | ||
6295 | trans = btrfs_start_transaction(tree_root, 0); | 6306 | trans = btrfs_start_transaction(tree_root, 0); |
@@ -6318,7 +6329,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, | |||
6318 | path->lowest_level = 0; | 6329 | path->lowest_level = 0; |
6319 | if (ret < 0) { | 6330 | if (ret < 0) { |
6320 | err = ret; | 6331 | err = ret; |
6321 | goto out; | 6332 | goto out_free; |
6322 | } | 6333 | } |
6323 | WARN_ON(ret > 0); | 6334 | WARN_ON(ret > 0); |
6324 | 6335 | ||
@@ -6425,11 +6436,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root, | |||
6425 | free_extent_buffer(root->commit_root); | 6436 | free_extent_buffer(root->commit_root); |
6426 | kfree(root); | 6437 | kfree(root); |
6427 | } | 6438 | } |
6428 | out: | 6439 | out_free: |
6429 | btrfs_end_transaction_throttle(trans, tree_root); | 6440 | btrfs_end_transaction_throttle(trans, tree_root); |
6430 | kfree(wc); | 6441 | kfree(wc); |
6431 | btrfs_free_path(path); | 6442 | btrfs_free_path(path); |
6432 | return err; | 6443 | out: |
6444 | if (err) | ||
6445 | btrfs_std_error(root->fs_info, err); | ||
6446 | return; | ||
6433 | } | 6447 | } |
6434 | 6448 | ||
6435 | /* | 6449 | /* |
@@ -6720,6 +6734,10 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) | |||
6720 | struct btrfs_space_info *space_info; | 6734 | struct btrfs_space_info *space_info; |
6721 | struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; | 6735 | struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; |
6722 | struct btrfs_device *device; | 6736 | struct btrfs_device *device; |
6737 | u64 min_free; | ||
6738 | u64 dev_min = 1; | ||
6739 | u64 dev_nr = 0; | ||
6740 | int index; | ||
6723 | int full = 0; | 6741 | int full = 0; |
6724 | int ret = 0; | 6742 | int ret = 0; |
6725 | 6743 | ||
@@ -6729,8 +6747,10 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) | |||
6729 | if (!block_group) | 6747 | if (!block_group) |
6730 | return -1; | 6748 | return -1; |
6731 | 6749 | ||
6750 | min_free = btrfs_block_group_used(&block_group->item); | ||
6751 | |||
6732 | /* no bytes used, we're good */ | 6752 | /* no bytes used, we're good */ |
6733 | if (!btrfs_block_group_used(&block_group->item)) | 6753 | if (!min_free) |
6734 | goto out; | 6754 | goto out; |
6735 | 6755 | ||
6736 | space_info = block_group->space_info; | 6756 | space_info = block_group->space_info; |
@@ -6746,10 +6766,9 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) | |||
6746 | * all of the extents from this block group. If we can, we're good | 6766 | * all of the extents from this block group. If we can, we're good |
6747 | */ | 6767 | */ |
6748 | if ((space_info->total_bytes != block_group->key.offset) && | 6768 | if ((space_info->total_bytes != block_group->key.offset) && |
6749 | (space_info->bytes_used + space_info->bytes_reserved + | 6769 | (space_info->bytes_used + space_info->bytes_reserved + |
6750 | space_info->bytes_pinned + space_info->bytes_readonly + | 6770 | space_info->bytes_pinned + space_info->bytes_readonly + |
6751 | btrfs_block_group_used(&block_group->item) < | 6771 | min_free < space_info->total_bytes)) { |
6752 | space_info->total_bytes)) { | ||
6753 | spin_unlock(&space_info->lock); | 6772 | spin_unlock(&space_info->lock); |
6754 | goto out; | 6773 | goto out; |
6755 | } | 6774 | } |
@@ -6766,9 +6785,31 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) | |||
6766 | if (full) | 6785 | if (full) |
6767 | goto out; | 6786 | goto out; |
6768 | 6787 | ||
6788 | /* | ||
6789 | * index: | ||
6790 | * 0: raid10 | ||
6791 | * 1: raid1 | ||
6792 | * 2: dup | ||
6793 | * 3: raid0 | ||
6794 | * 4: single | ||
6795 | */ | ||
6796 | index = get_block_group_index(block_group); | ||
6797 | if (index == 0) { | ||
6798 | dev_min = 4; | ||
6799 | /* Divide by 2 */ | ||
6800 | min_free >>= 1; | ||
6801 | } else if (index == 1) { | ||
6802 | dev_min = 2; | ||
6803 | } else if (index == 2) { | ||
6804 | /* Multiply by 2 */ | ||
6805 | min_free <<= 1; | ||
6806 | } else if (index == 3) { | ||
6807 | dev_min = fs_devices->rw_devices; | ||
6808 | do_div(min_free, dev_min); | ||
6809 | } | ||
6810 | |||
6769 | mutex_lock(&root->fs_info->chunk_mutex); | 6811 | mutex_lock(&root->fs_info->chunk_mutex); |
6770 | list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { | 6812 | list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { |
6771 | u64 min_free = btrfs_block_group_used(&block_group->item); | ||
6772 | u64 dev_offset; | 6813 | u64 dev_offset; |
6773 | 6814 | ||
6774 | /* | 6815 | /* |
@@ -6779,7 +6820,11 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) | |||
6779 | ret = find_free_dev_extent(NULL, device, min_free, | 6820 | ret = find_free_dev_extent(NULL, device, min_free, |
6780 | &dev_offset, NULL); | 6821 | &dev_offset, NULL); |
6781 | if (!ret) | 6822 | if (!ret) |
6823 | dev_nr++; | ||
6824 | |||
6825 | if (dev_nr >= dev_min) | ||
6782 | break; | 6826 | break; |
6827 | |||
6783 | ret = -1; | 6828 | ret = -1; |
6784 | } | 6829 | } |
6785 | } | 6830 | } |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 658d66959abe..e7872e485f13 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -150,6 +150,8 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, | |||
150 | spin_lock(&root->fs_info->defrag_inodes_lock); | 150 | spin_lock(&root->fs_info->defrag_inodes_lock); |
151 | if (!BTRFS_I(inode)->in_defrag) | 151 | if (!BTRFS_I(inode)->in_defrag) |
152 | __btrfs_add_inode_defrag(inode, defrag); | 152 | __btrfs_add_inode_defrag(inode, defrag); |
153 | else | ||
154 | kfree(defrag); | ||
153 | spin_unlock(&root->fs_info->defrag_inodes_lock); | 155 | spin_unlock(&root->fs_info->defrag_inodes_lock); |
154 | return 0; | 156 | return 0; |
155 | } | 157 | } |
@@ -1638,11 +1640,15 @@ static long btrfs_fallocate(struct file *file, int mode, | |||
1638 | 1640 | ||
1639 | cur_offset = alloc_start; | 1641 | cur_offset = alloc_start; |
1640 | while (1) { | 1642 | while (1) { |
1643 | u64 actual_end; | ||
1644 | |||
1641 | em = btrfs_get_extent(inode, NULL, 0, cur_offset, | 1645 | em = btrfs_get_extent(inode, NULL, 0, cur_offset, |
1642 | alloc_end - cur_offset, 0); | 1646 | alloc_end - cur_offset, 0); |
1643 | BUG_ON(IS_ERR_OR_NULL(em)); | 1647 | BUG_ON(IS_ERR_OR_NULL(em)); |
1644 | last_byte = min(extent_map_end(em), alloc_end); | 1648 | last_byte = min(extent_map_end(em), alloc_end); |
1649 | actual_end = min_t(u64, extent_map_end(em), offset + len); | ||
1645 | last_byte = (last_byte + mask) & ~mask; | 1650 | last_byte = (last_byte + mask) & ~mask; |
1651 | |||
1646 | if (em->block_start == EXTENT_MAP_HOLE || | 1652 | if (em->block_start == EXTENT_MAP_HOLE || |
1647 | (cur_offset >= inode->i_size && | 1653 | (cur_offset >= inode->i_size && |
1648 | !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { | 1654 | !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { |
@@ -1655,6 +1661,16 @@ static long btrfs_fallocate(struct file *file, int mode, | |||
1655 | free_extent_map(em); | 1661 | free_extent_map(em); |
1656 | break; | 1662 | break; |
1657 | } | 1663 | } |
1664 | } else if (actual_end > inode->i_size && | ||
1665 | !(mode & FALLOC_FL_KEEP_SIZE)) { | ||
1666 | /* | ||
1667 | * We didn't need to allocate any more space, but we | ||
1668 | * still extended the size of the file so we need to | ||
1669 | * update i_size. | ||
1670 | */ | ||
1671 | inode->i_ctime = CURRENT_TIME; | ||
1672 | i_size_write(inode, actual_end); | ||
1673 | btrfs_ordered_update_i_size(inode, actual_end, NULL); | ||
1658 | } | 1674 | } |
1659 | free_extent_map(em); | 1675 | free_extent_map(em); |
1660 | 1676 | ||
@@ -1804,10 +1820,14 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin) | |||
1804 | } | 1820 | } |
1805 | } | 1821 | } |
1806 | 1822 | ||
1807 | if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) | 1823 | if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) { |
1808 | return -EINVAL; | 1824 | ret = -EINVAL; |
1809 | if (offset > inode->i_sb->s_maxbytes) | 1825 | goto out; |
1810 | return -EINVAL; | 1826 | } |
1827 | if (offset > inode->i_sb->s_maxbytes) { | ||
1828 | ret = -EINVAL; | ||
1829 | goto out; | ||
1830 | } | ||
1811 | 1831 | ||
1812 | /* Special lock needed here? */ | 1832 | /* Special lock needed here? */ |
1813 | if (offset != file->f_pos) { | 1833 | if (offset != file->f_pos) { |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 6377713f639c..6a265b9f85f2 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -1168,9 +1168,9 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) | |||
1168 | div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); | 1168 | div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); |
1169 | } | 1169 | } |
1170 | 1170 | ||
1171 | static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, | 1171 | static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, |
1172 | struct btrfs_free_space *info, u64 offset, | 1172 | struct btrfs_free_space *info, |
1173 | u64 bytes) | 1173 | u64 offset, u64 bytes) |
1174 | { | 1174 | { |
1175 | unsigned long start, count; | 1175 | unsigned long start, count; |
1176 | 1176 | ||
@@ -1181,6 +1181,13 @@ static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, | |||
1181 | bitmap_clear(info->bitmap, start, count); | 1181 | bitmap_clear(info->bitmap, start, count); |
1182 | 1182 | ||
1183 | info->bytes -= bytes; | 1183 | info->bytes -= bytes; |
1184 | } | ||
1185 | |||
1186 | static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, | ||
1187 | struct btrfs_free_space *info, u64 offset, | ||
1188 | u64 bytes) | ||
1189 | { | ||
1190 | __bitmap_clear_bits(ctl, info, offset, bytes); | ||
1184 | ctl->free_space -= bytes; | 1191 | ctl->free_space -= bytes; |
1185 | } | 1192 | } |
1186 | 1193 | ||
@@ -1984,7 +1991,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | |||
1984 | return 0; | 1991 | return 0; |
1985 | 1992 | ||
1986 | ret = search_start; | 1993 | ret = search_start; |
1987 | bitmap_clear_bits(ctl, entry, ret, bytes); | 1994 | __bitmap_clear_bits(ctl, entry, ret, bytes); |
1988 | 1995 | ||
1989 | return ret; | 1996 | return ret; |
1990 | } | 1997 | } |
@@ -2039,7 +2046,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
2039 | continue; | 2046 | continue; |
2040 | } | 2047 | } |
2041 | } else { | 2048 | } else { |
2042 | |||
2043 | ret = entry->offset; | 2049 | ret = entry->offset; |
2044 | 2050 | ||
2045 | entry->offset += bytes; | 2051 | entry->offset += bytes; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 15fceefbca0a..0ccc7438ad34 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -7354,11 +7354,15 @@ static int btrfs_set_page_dirty(struct page *page) | |||
7354 | static int btrfs_permission(struct inode *inode, int mask) | 7354 | static int btrfs_permission(struct inode *inode, int mask) |
7355 | { | 7355 | { |
7356 | struct btrfs_root *root = BTRFS_I(inode)->root; | 7356 | struct btrfs_root *root = BTRFS_I(inode)->root; |
7357 | umode_t mode = inode->i_mode; | ||
7357 | 7358 | ||
7358 | if (btrfs_root_readonly(root) && (mask & MAY_WRITE)) | 7359 | if (mask & MAY_WRITE && |
7359 | return -EROFS; | 7360 | (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { |
7360 | if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE)) | 7361 | if (btrfs_root_readonly(root)) |
7361 | return -EACCES; | 7362 | return -EROFS; |
7363 | if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) | ||
7364 | return -EACCES; | ||
7365 | } | ||
7362 | return generic_permission(inode, mask); | 7366 | return generic_permission(inode, mask); |
7363 | } | 7367 | } |
7364 | 7368 | ||
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 7cf013349941..970977aab224 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -2236,6 +2236,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
2236 | btrfs_wait_ordered_range(src, off, len); | 2236 | btrfs_wait_ordered_range(src, off, len); |
2237 | } | 2237 | } |
2238 | 2238 | ||
2239 | /* truncate page cache pages from target inode range */ | ||
2240 | truncate_inode_pages_range(&inode->i_data, off, | ||
2241 | ALIGN(off + len, PAGE_CACHE_SIZE) - 1); | ||
2242 | |||
2239 | /* clone data */ | 2243 | /* clone data */ |
2240 | key.objectid = btrfs_ino(src); | 2244 | key.objectid = btrfs_ino(src); |
2241 | key.type = BTRFS_EXTENT_DATA_KEY; | 2245 | key.type = BTRFS_EXTENT_DATA_KEY; |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index babee65f8eda..786639fca067 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -799,14 +799,15 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, | |||
799 | struct extent_buffer *eb, int slot, | 799 | struct extent_buffer *eb, int slot, |
800 | struct btrfs_key *key) | 800 | struct btrfs_key *key) |
801 | { | 801 | { |
802 | struct inode *dir; | ||
803 | int ret; | ||
804 | struct btrfs_inode_ref *ref; | 802 | struct btrfs_inode_ref *ref; |
803 | struct btrfs_dir_item *di; | ||
804 | struct inode *dir; | ||
805 | struct inode *inode; | 805 | struct inode *inode; |
806 | char *name; | ||
807 | int namelen; | ||
808 | unsigned long ref_ptr; | 806 | unsigned long ref_ptr; |
809 | unsigned long ref_end; | 807 | unsigned long ref_end; |
808 | char *name; | ||
809 | int namelen; | ||
810 | int ret; | ||
810 | int search_done = 0; | 811 | int search_done = 0; |
811 | 812 | ||
812 | /* | 813 | /* |
@@ -909,6 +910,25 @@ again: | |||
909 | } | 910 | } |
910 | btrfs_release_path(path); | 911 | btrfs_release_path(path); |
911 | 912 | ||
913 | /* look for a conflicting sequence number */ | ||
914 | di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), | ||
915 | btrfs_inode_ref_index(eb, ref), | ||
916 | name, namelen, 0); | ||
917 | if (di && !IS_ERR(di)) { | ||
918 | ret = drop_one_dir_item(trans, root, path, dir, di); | ||
919 | BUG_ON(ret); | ||
920 | } | ||
921 | btrfs_release_path(path); | ||
922 | |||
923 | /* look for a conflicing name */ | ||
924 | di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), | ||
925 | name, namelen, 0); | ||
926 | if (di && !IS_ERR(di)) { | ||
927 | ret = drop_one_dir_item(trans, root, path, dir, di); | ||
928 | BUG_ON(ret); | ||
929 | } | ||
930 | btrfs_release_path(path); | ||
931 | |||
912 | insert: | 932 | insert: |
913 | /* insert our name */ | 933 | /* insert our name */ |
914 | ret = btrfs_add_link(trans, dir, inode, name, namelen, 0, | 934 | ret = btrfs_add_link(trans, dir, inode, name, namelen, 0, |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 53875ae73ad4..f2a4cc79da61 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -142,6 +142,7 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) | |||
142 | unsigned long limit; | 142 | unsigned long limit; |
143 | unsigned long last_waited = 0; | 143 | unsigned long last_waited = 0; |
144 | int force_reg = 0; | 144 | int force_reg = 0; |
145 | int sync_pending = 0; | ||
145 | struct blk_plug plug; | 146 | struct blk_plug plug; |
146 | 147 | ||
147 | /* | 148 | /* |
@@ -229,6 +230,22 @@ loop_lock: | |||
229 | 230 | ||
230 | BUG_ON(atomic_read(&cur->bi_cnt) == 0); | 231 | BUG_ON(atomic_read(&cur->bi_cnt) == 0); |
231 | 232 | ||
233 | /* | ||
234 | * if we're doing the sync list, record that our | ||
235 | * plug has some sync requests on it | ||
236 | * | ||
237 | * If we're doing the regular list and there are | ||
238 | * sync requests sitting around, unplug before | ||
239 | * we add more | ||
240 | */ | ||
241 | if (pending_bios == &device->pending_sync_bios) { | ||
242 | sync_pending = 1; | ||
243 | } else if (sync_pending) { | ||
244 | blk_finish_plug(&plug); | ||
245 | blk_start_plug(&plug); | ||
246 | sync_pending = 0; | ||
247 | } | ||
248 | |||
232 | submit_bio(cur->bi_rw, cur); | 249 | submit_bio(cur->bi_rw, cur); |
233 | num_run++; | 250 | num_run++; |
234 | batch_run++; | 251 | batch_run++; |
@@ -500,6 +517,9 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) | |||
500 | fs_devices->rw_devices--; | 517 | fs_devices->rw_devices--; |
501 | } | 518 | } |
502 | 519 | ||
520 | if (device->can_discard) | ||
521 | fs_devices->num_can_discard--; | ||
522 | |||
503 | new_device = kmalloc(sizeof(*new_device), GFP_NOFS); | 523 | new_device = kmalloc(sizeof(*new_device), GFP_NOFS); |
504 | BUG_ON(!new_device); | 524 | BUG_ON(!new_device); |
505 | memcpy(new_device, device, sizeof(*new_device)); | 525 | memcpy(new_device, device, sizeof(*new_device)); |
@@ -508,6 +528,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) | |||
508 | new_device->bdev = NULL; | 528 | new_device->bdev = NULL; |
509 | new_device->writeable = 0; | 529 | new_device->writeable = 0; |
510 | new_device->in_fs_metadata = 0; | 530 | new_device->in_fs_metadata = 0; |
531 | new_device->can_discard = 0; | ||
511 | list_replace_rcu(&device->dev_list, &new_device->dev_list); | 532 | list_replace_rcu(&device->dev_list, &new_device->dev_list); |
512 | 533 | ||
513 | call_rcu(&device->rcu, free_device); | 534 | call_rcu(&device->rcu, free_device); |
@@ -547,6 +568,7 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) | |||
547 | static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, | 568 | static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, |
548 | fmode_t flags, void *holder) | 569 | fmode_t flags, void *holder) |
549 | { | 570 | { |
571 | struct request_queue *q; | ||
550 | struct block_device *bdev; | 572 | struct block_device *bdev; |
551 | struct list_head *head = &fs_devices->devices; | 573 | struct list_head *head = &fs_devices->devices; |
552 | struct btrfs_device *device; | 574 | struct btrfs_device *device; |
@@ -603,6 +625,12 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, | |||
603 | seeding = 0; | 625 | seeding = 0; |
604 | } | 626 | } |
605 | 627 | ||
628 | q = bdev_get_queue(bdev); | ||
629 | if (blk_queue_discard(q)) { | ||
630 | device->can_discard = 1; | ||
631 | fs_devices->num_can_discard++; | ||
632 | } | ||
633 | |||
606 | device->bdev = bdev; | 634 | device->bdev = bdev; |
607 | device->in_fs_metadata = 0; | 635 | device->in_fs_metadata = 0; |
608 | device->mode = flags; | 636 | device->mode = flags; |
@@ -835,6 +863,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans, | |||
835 | 863 | ||
836 | max_hole_start = search_start; | 864 | max_hole_start = search_start; |
837 | max_hole_size = 0; | 865 | max_hole_size = 0; |
866 | hole_size = 0; | ||
838 | 867 | ||
839 | if (search_start >= search_end) { | 868 | if (search_start >= search_end) { |
840 | ret = -ENOSPC; | 869 | ret = -ENOSPC; |
@@ -917,7 +946,14 @@ next: | |||
917 | cond_resched(); | 946 | cond_resched(); |
918 | } | 947 | } |
919 | 948 | ||
920 | hole_size = search_end- search_start; | 949 | /* |
950 | * At this point, search_start should be the end of | ||
951 | * allocated dev extents, and when shrinking the device, | ||
952 | * search_end may be smaller than search_start. | ||
953 | */ | ||
954 | if (search_end > search_start) | ||
955 | hole_size = search_end - search_start; | ||
956 | |||
921 | if (hole_size > max_hole_size) { | 957 | if (hole_size > max_hole_size) { |
922 | max_hole_start = search_start; | 958 | max_hole_start = search_start; |
923 | max_hole_size = hole_size; | 959 | max_hole_size = hole_size; |
@@ -1543,6 +1579,7 @@ error: | |||
1543 | 1579 | ||
1544 | int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | 1580 | int btrfs_init_new_device(struct btrfs_root *root, char *device_path) |
1545 | { | 1581 | { |
1582 | struct request_queue *q; | ||
1546 | struct btrfs_trans_handle *trans; | 1583 | struct btrfs_trans_handle *trans; |
1547 | struct btrfs_device *device; | 1584 | struct btrfs_device *device; |
1548 | struct block_device *bdev; | 1585 | struct block_device *bdev; |
@@ -1612,6 +1649,9 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1612 | 1649 | ||
1613 | lock_chunks(root); | 1650 | lock_chunks(root); |
1614 | 1651 | ||
1652 | q = bdev_get_queue(bdev); | ||
1653 | if (blk_queue_discard(q)) | ||
1654 | device->can_discard = 1; | ||
1615 | device->writeable = 1; | 1655 | device->writeable = 1; |
1616 | device->work.func = pending_bios_fn; | 1656 | device->work.func = pending_bios_fn; |
1617 | generate_random_uuid(device->uuid); | 1657 | generate_random_uuid(device->uuid); |
@@ -1647,6 +1687,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1647 | root->fs_info->fs_devices->num_devices++; | 1687 | root->fs_info->fs_devices->num_devices++; |
1648 | root->fs_info->fs_devices->open_devices++; | 1688 | root->fs_info->fs_devices->open_devices++; |
1649 | root->fs_info->fs_devices->rw_devices++; | 1689 | root->fs_info->fs_devices->rw_devices++; |
1690 | if (device->can_discard) | ||
1691 | root->fs_info->fs_devices->num_can_discard++; | ||
1650 | root->fs_info->fs_devices->total_rw_bytes += device->total_bytes; | 1692 | root->fs_info->fs_devices->total_rw_bytes += device->total_bytes; |
1651 | 1693 | ||
1652 | if (!blk_queue_nonrot(bdev_get_queue(bdev))) | 1694 | if (!blk_queue_nonrot(bdev_get_queue(bdev))) |
@@ -2413,9 +2455,10 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |||
2413 | total_avail = device->total_bytes - device->bytes_used; | 2455 | total_avail = device->total_bytes - device->bytes_used; |
2414 | else | 2456 | else |
2415 | total_avail = 0; | 2457 | total_avail = 0; |
2416 | /* avail is off by max(alloc_start, 1MB), but that is the same | 2458 | |
2417 | * for all devices, so it doesn't hurt the sorting later on | 2459 | /* If there is no space on this device, skip it. */ |
2418 | */ | 2460 | if (total_avail == 0) |
2461 | continue; | ||
2419 | 2462 | ||
2420 | ret = find_free_dev_extent(trans, device, | 2463 | ret = find_free_dev_extent(trans, device, |
2421 | max_stripe_size * dev_stripes, | 2464 | max_stripe_size * dev_stripes, |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 7c12d61ae7ae..6d866db4e177 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
@@ -48,6 +48,7 @@ struct btrfs_device { | |||
48 | int writeable; | 48 | int writeable; |
49 | int in_fs_metadata; | 49 | int in_fs_metadata; |
50 | int missing; | 50 | int missing; |
51 | int can_discard; | ||
51 | 52 | ||
52 | spinlock_t io_lock; | 53 | spinlock_t io_lock; |
53 | 54 | ||
@@ -104,6 +105,7 @@ struct btrfs_fs_devices { | |||
104 | u64 rw_devices; | 105 | u64 rw_devices; |
105 | u64 missing_devices; | 106 | u64 missing_devices; |
106 | u64 total_rw_bytes; | 107 | u64 total_rw_bytes; |
108 | u64 num_can_discard; | ||
107 | struct block_device *latest_bdev; | 109 | struct block_device *latest_bdev; |
108 | 110 | ||
109 | /* all of the devices in the FS, protected by a mutex | 111 | /* all of the devices in the FS, protected by a mutex |
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 2fe3cf13b2e9..6d40656e1e29 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c | |||
@@ -176,7 +176,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) | |||
176 | 176 | ||
177 | #ifdef CONFIG_CIFS_STATS2 | 177 | #ifdef CONFIG_CIFS_STATS2 |
178 | seq_printf(m, " In Send: %d In MaxReq Wait: %d", | 178 | seq_printf(m, " In Send: %d In MaxReq Wait: %d", |
179 | atomic_read(&server->inSend), | 179 | atomic_read(&server->in_send), |
180 | atomic_read(&server->num_waiters)); | 180 | atomic_read(&server->num_waiters)); |
181 | #endif | 181 | #endif |
182 | 182 | ||
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index 21de1d6d5849..d0f59faefb78 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c | |||
@@ -991,24 +991,6 @@ struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb, | |||
991 | return pntsd; | 991 | return pntsd; |
992 | } | 992 | } |
993 | 993 | ||
994 | static int set_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, __u16 fid, | ||
995 | struct cifs_ntsd *pnntsd, u32 acllen) | ||
996 | { | ||
997 | int xid, rc; | ||
998 | struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); | ||
999 | |||
1000 | if (IS_ERR(tlink)) | ||
1001 | return PTR_ERR(tlink); | ||
1002 | |||
1003 | xid = GetXid(); | ||
1004 | rc = CIFSSMBSetCIFSACL(xid, tlink_tcon(tlink), fid, pnntsd, acllen); | ||
1005 | FreeXid(xid); | ||
1006 | cifs_put_tlink(tlink); | ||
1007 | |||
1008 | cFYI(DBG2, "SetCIFSACL rc = %d", rc); | ||
1009 | return rc; | ||
1010 | } | ||
1011 | |||
1012 | static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path, | 994 | static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path, |
1013 | struct cifs_ntsd *pnntsd, u32 acllen) | 995 | struct cifs_ntsd *pnntsd, u32 acllen) |
1014 | { | 996 | { |
@@ -1047,18 +1029,10 @@ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen, | |||
1047 | struct inode *inode, const char *path) | 1029 | struct inode *inode, const char *path) |
1048 | { | 1030 | { |
1049 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 1031 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
1050 | struct cifsFileInfo *open_file; | ||
1051 | int rc; | ||
1052 | 1032 | ||
1053 | cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode); | 1033 | cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode); |
1054 | 1034 | ||
1055 | open_file = find_readable_file(CIFS_I(inode), true); | 1035 | return set_cifs_acl_by_path(cifs_sb, path, pnntsd, acllen); |
1056 | if (!open_file) | ||
1057 | return set_cifs_acl_by_path(cifs_sb, path, pnntsd, acllen); | ||
1058 | |||
1059 | rc = set_cifs_acl_by_fid(cifs_sb, open_file->netfid, pnntsd, acllen); | ||
1060 | cifsFileInfo_put(open_file); | ||
1061 | return rc; | ||
1062 | } | 1036 | } |
1063 | 1037 | ||
1064 | /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ | 1038 | /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ |
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index cb71dc1f94d1..95da8027983d 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -125,5 +125,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); | |||
125 | extern const struct export_operations cifs_export_ops; | 125 | extern const struct export_operations cifs_export_ops; |
126 | #endif /* CIFS_NFSD_EXPORT */ | 126 | #endif /* CIFS_NFSD_EXPORT */ |
127 | 127 | ||
128 | #define CIFS_VERSION "1.74" | 128 | #define CIFS_VERSION "1.75" |
129 | #endif /* _CIFSFS_H */ | 129 | #endif /* _CIFSFS_H */ |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 38ce6d44b145..95dad9d14cf1 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -291,7 +291,7 @@ struct TCP_Server_Info { | |||
291 | struct fscache_cookie *fscache; /* client index cache cookie */ | 291 | struct fscache_cookie *fscache; /* client index cache cookie */ |
292 | #endif | 292 | #endif |
293 | #ifdef CONFIG_CIFS_STATS2 | 293 | #ifdef CONFIG_CIFS_STATS2 |
294 | atomic_t inSend; /* requests trying to send */ | 294 | atomic_t in_send; /* requests trying to send */ |
295 | atomic_t num_waiters; /* blocked waiting to get in sendrecv */ | 295 | atomic_t num_waiters; /* blocked waiting to get in sendrecv */ |
296 | #endif | 296 | #endif |
297 | }; | 297 | }; |
@@ -672,12 +672,54 @@ struct mid_q_entry { | |||
672 | bool multiEnd:1; /* both received */ | 672 | bool multiEnd:1; /* both received */ |
673 | }; | 673 | }; |
674 | 674 | ||
675 | struct oplock_q_entry { | 675 | /* Make code in transport.c a little cleaner by moving |
676 | struct list_head qhead; | 676 | update of optional stats into function below */ |
677 | struct inode *pinode; | 677 | #ifdef CONFIG_CIFS_STATS2 |
678 | struct cifs_tcon *tcon; | 678 | |
679 | __u16 netfid; | 679 | static inline void cifs_in_send_inc(struct TCP_Server_Info *server) |
680 | }; | 680 | { |
681 | atomic_inc(&server->in_send); | ||
682 | } | ||
683 | |||
684 | static inline void cifs_in_send_dec(struct TCP_Server_Info *server) | ||
685 | { | ||
686 | atomic_dec(&server->in_send); | ||
687 | } | ||
688 | |||
689 | static inline void cifs_num_waiters_inc(struct TCP_Server_Info *server) | ||
690 | { | ||
691 | atomic_inc(&server->num_waiters); | ||
692 | } | ||
693 | |||
694 | static inline void cifs_num_waiters_dec(struct TCP_Server_Info *server) | ||
695 | { | ||
696 | atomic_dec(&server->num_waiters); | ||
697 | } | ||
698 | |||
699 | static inline void cifs_save_when_sent(struct mid_q_entry *mid) | ||
700 | { | ||
701 | mid->when_sent = jiffies; | ||
702 | } | ||
703 | #else | ||
704 | static inline void cifs_in_send_inc(struct TCP_Server_Info *server) | ||
705 | { | ||
706 | } | ||
707 | static inline void cifs_in_send_dec(struct TCP_Server_Info *server) | ||
708 | { | ||
709 | } | ||
710 | |||
711 | static inline void cifs_num_waiters_inc(struct TCP_Server_Info *server) | ||
712 | { | ||
713 | } | ||
714 | |||
715 | static inline void cifs_num_waiters_dec(struct TCP_Server_Info *server) | ||
716 | { | ||
717 | } | ||
718 | |||
719 | static inline void cifs_save_when_sent(struct mid_q_entry *mid) | ||
720 | { | ||
721 | } | ||
722 | #endif | ||
681 | 723 | ||
682 | /* for pending dnotify requests */ | 724 | /* for pending dnotify requests */ |
683 | struct dir_notify_req { | 725 | struct dir_notify_req { |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 80c2e3add3a2..633c246b6775 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -2878,7 +2878,8 @@ cleanup_volume_info_contents(struct smb_vol *volume_info) | |||
2878 | kfree(volume_info->username); | 2878 | kfree(volume_info->username); |
2879 | kzfree(volume_info->password); | 2879 | kzfree(volume_info->password); |
2880 | kfree(volume_info->UNC); | 2880 | kfree(volume_info->UNC); |
2881 | kfree(volume_info->UNCip); | 2881 | if (volume_info->UNCip != volume_info->UNC + 2) |
2882 | kfree(volume_info->UNCip); | ||
2882 | kfree(volume_info->domainname); | 2883 | kfree(volume_info->domainname); |
2883 | kfree(volume_info->iocharset); | 2884 | kfree(volume_info->iocharset); |
2884 | kfree(volume_info->prepath); | 2885 | kfree(volume_info->prepath); |
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index ae576fbb5142..72d448bf96ce 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -105,8 +105,8 @@ cifs_bp_rename_retry: | |||
105 | } | 105 | } |
106 | rcu_read_unlock(); | 106 | rcu_read_unlock(); |
107 | if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) { | 107 | if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) { |
108 | cERROR(1, "did not end path lookup where expected namelen is %d", | 108 | cFYI(1, "did not end path lookup where expected. namelen=%d " |
109 | namelen); | 109 | "dfsplen=%d", namelen, dfsplen); |
110 | /* presumably this is only possible if racing with a rename | 110 | /* presumably this is only possible if racing with a rename |
111 | of one of the parent directories (we can not lock the dentries | 111 | of one of the parent directories (we can not lock the dentries |
112 | above us to prevent this, but retrying should be harmless) */ | 112 | above us to prevent this, but retrying should be harmless) */ |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index c1b9c4b10739..10ca6b2c26b7 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -266,15 +266,11 @@ static int wait_for_free_request(struct TCP_Server_Info *server, | |||
266 | while (1) { | 266 | while (1) { |
267 | if (atomic_read(&server->inFlight) >= cifs_max_pending) { | 267 | if (atomic_read(&server->inFlight) >= cifs_max_pending) { |
268 | spin_unlock(&GlobalMid_Lock); | 268 | spin_unlock(&GlobalMid_Lock); |
269 | #ifdef CONFIG_CIFS_STATS2 | 269 | cifs_num_waiters_inc(server); |
270 | atomic_inc(&server->num_waiters); | ||
271 | #endif | ||
272 | wait_event(server->request_q, | 270 | wait_event(server->request_q, |
273 | atomic_read(&server->inFlight) | 271 | atomic_read(&server->inFlight) |
274 | < cifs_max_pending); | 272 | < cifs_max_pending); |
275 | #ifdef CONFIG_CIFS_STATS2 | 273 | cifs_num_waiters_dec(server); |
276 | atomic_dec(&server->num_waiters); | ||
277 | #endif | ||
278 | spin_lock(&GlobalMid_Lock); | 274 | spin_lock(&GlobalMid_Lock); |
279 | } else { | 275 | } else { |
280 | if (server->tcpStatus == CifsExiting) { | 276 | if (server->tcpStatus == CifsExiting) { |
@@ -381,15 +377,13 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, | |||
381 | mid->callback = callback; | 377 | mid->callback = callback; |
382 | mid->callback_data = cbdata; | 378 | mid->callback_data = cbdata; |
383 | mid->midState = MID_REQUEST_SUBMITTED; | 379 | mid->midState = MID_REQUEST_SUBMITTED; |
384 | #ifdef CONFIG_CIFS_STATS2 | 380 | |
385 | atomic_inc(&server->inSend); | 381 | cifs_in_send_inc(server); |
386 | #endif | ||
387 | rc = smb_sendv(server, iov, nvec); | 382 | rc = smb_sendv(server, iov, nvec); |
388 | #ifdef CONFIG_CIFS_STATS2 | 383 | cifs_in_send_dec(server); |
389 | atomic_dec(&server->inSend); | 384 | cifs_save_when_sent(mid); |
390 | mid->when_sent = jiffies; | ||
391 | #endif | ||
392 | mutex_unlock(&server->srv_mutex); | 385 | mutex_unlock(&server->srv_mutex); |
386 | |||
393 | if (rc) | 387 | if (rc) |
394 | goto out_err; | 388 | goto out_err; |
395 | 389 | ||
@@ -575,14 +569,10 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, | |||
575 | } | 569 | } |
576 | 570 | ||
577 | midQ->midState = MID_REQUEST_SUBMITTED; | 571 | midQ->midState = MID_REQUEST_SUBMITTED; |
578 | #ifdef CONFIG_CIFS_STATS2 | 572 | cifs_in_send_inc(ses->server); |
579 | atomic_inc(&ses->server->inSend); | ||
580 | #endif | ||
581 | rc = smb_sendv(ses->server, iov, n_vec); | 573 | rc = smb_sendv(ses->server, iov, n_vec); |
582 | #ifdef CONFIG_CIFS_STATS2 | 574 | cifs_in_send_dec(ses->server); |
583 | atomic_dec(&ses->server->inSend); | 575 | cifs_save_when_sent(midQ); |
584 | midQ->when_sent = jiffies; | ||
585 | #endif | ||
586 | 576 | ||
587 | mutex_unlock(&ses->server->srv_mutex); | 577 | mutex_unlock(&ses->server->srv_mutex); |
588 | 578 | ||
@@ -703,14 +693,11 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, | |||
703 | } | 693 | } |
704 | 694 | ||
705 | midQ->midState = MID_REQUEST_SUBMITTED; | 695 | midQ->midState = MID_REQUEST_SUBMITTED; |
706 | #ifdef CONFIG_CIFS_STATS2 | 696 | |
707 | atomic_inc(&ses->server->inSend); | 697 | cifs_in_send_inc(ses->server); |
708 | #endif | ||
709 | rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); | 698 | rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); |
710 | #ifdef CONFIG_CIFS_STATS2 | 699 | cifs_in_send_dec(ses->server); |
711 | atomic_dec(&ses->server->inSend); | 700 | cifs_save_when_sent(midQ); |
712 | midQ->when_sent = jiffies; | ||
713 | #endif | ||
714 | mutex_unlock(&ses->server->srv_mutex); | 701 | mutex_unlock(&ses->server->srv_mutex); |
715 | 702 | ||
716 | if (rc < 0) | 703 | if (rc < 0) |
@@ -843,14 +830,10 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, | |||
843 | } | 830 | } |
844 | 831 | ||
845 | midQ->midState = MID_REQUEST_SUBMITTED; | 832 | midQ->midState = MID_REQUEST_SUBMITTED; |
846 | #ifdef CONFIG_CIFS_STATS2 | 833 | cifs_in_send_inc(ses->server); |
847 | atomic_inc(&ses->server->inSend); | ||
848 | #endif | ||
849 | rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); | 834 | rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); |
850 | #ifdef CONFIG_CIFS_STATS2 | 835 | cifs_in_send_dec(ses->server); |
851 | atomic_dec(&ses->server->inSend); | 836 | cifs_save_when_sent(midQ); |
852 | midQ->when_sent = jiffies; | ||
853 | #endif | ||
854 | mutex_unlock(&ses->server->srv_mutex); | 837 | mutex_unlock(&ses->server->srv_mutex); |
855 | 838 | ||
856 | if (rc < 0) { | 839 | if (rc < 0) { |
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 8be086e9abe4..51352de88ef1 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c | |||
@@ -1003,6 +1003,7 @@ COMPATIBLE_IOCTL(PPPIOCCONNECT) | |||
1003 | COMPATIBLE_IOCTL(PPPIOCDISCONN) | 1003 | COMPATIBLE_IOCTL(PPPIOCDISCONN) |
1004 | COMPATIBLE_IOCTL(PPPIOCATTCHAN) | 1004 | COMPATIBLE_IOCTL(PPPIOCATTCHAN) |
1005 | COMPATIBLE_IOCTL(PPPIOCGCHAN) | 1005 | COMPATIBLE_IOCTL(PPPIOCGCHAN) |
1006 | COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS) | ||
1006 | /* PPPOX */ | 1007 | /* PPPOX */ |
1007 | COMPATIBLE_IOCTL(PPPOEIOCSFWD) | 1008 | COMPATIBLE_IOCTL(PPPOEIOCSFWD) |
1008 | COMPATIBLE_IOCTL(PPPOEIOCDFWD) | 1009 | COMPATIBLE_IOCTL(PPPOEIOCDFWD) |
diff --git a/fs/ecryptfs/Kconfig b/fs/ecryptfs/Kconfig index 1cd6d9d3e29a..cc16562654de 100644 --- a/fs/ecryptfs/Kconfig +++ b/fs/ecryptfs/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config ECRYPT_FS | 1 | config ECRYPT_FS |
2 | tristate "eCrypt filesystem layer support (EXPERIMENTAL)" | 2 | tristate "eCrypt filesystem layer support (EXPERIMENTAL)" |
3 | depends on EXPERIMENTAL && KEYS && CRYPTO | 3 | depends on EXPERIMENTAL && KEYS && CRYPTO && (ENCRYPTED_KEYS || ENCRYPTED_KEYS=n) |
4 | select CRYPTO_ECB | 4 | select CRYPTO_ECB |
5 | select CRYPTO_CBC | 5 | select CRYPTO_CBC |
6 | select CRYPTO_MD5 | 6 | select CRYPTO_MD5 |
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index 08a2b52bf565..ac1ad48c2376 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c | |||
@@ -1973,7 +1973,7 @@ pki_encrypt_session_key(struct key *auth_tok_key, | |||
1973 | { | 1973 | { |
1974 | struct ecryptfs_msg_ctx *msg_ctx = NULL; | 1974 | struct ecryptfs_msg_ctx *msg_ctx = NULL; |
1975 | char *payload = NULL; | 1975 | char *payload = NULL; |
1976 | size_t payload_len; | 1976 | size_t payload_len = 0; |
1977 | struct ecryptfs_message *msg; | 1977 | struct ecryptfs_message *msg; |
1978 | int rc; | 1978 | int rc; |
1979 | 1979 | ||
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 9f1bb747d77d..b4a6befb1216 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c | |||
@@ -175,6 +175,7 @@ enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig, | |||
175 | ecryptfs_opt_encrypted_view, ecryptfs_opt_fnek_sig, | 175 | ecryptfs_opt_encrypted_view, ecryptfs_opt_fnek_sig, |
176 | ecryptfs_opt_fn_cipher, ecryptfs_opt_fn_cipher_key_bytes, | 176 | ecryptfs_opt_fn_cipher, ecryptfs_opt_fn_cipher_key_bytes, |
177 | ecryptfs_opt_unlink_sigs, ecryptfs_opt_mount_auth_tok_only, | 177 | ecryptfs_opt_unlink_sigs, ecryptfs_opt_mount_auth_tok_only, |
178 | ecryptfs_opt_check_dev_ruid, | ||
178 | ecryptfs_opt_err }; | 179 | ecryptfs_opt_err }; |
179 | 180 | ||
180 | static const match_table_t tokens = { | 181 | static const match_table_t tokens = { |
@@ -191,6 +192,7 @@ static const match_table_t tokens = { | |||
191 | {ecryptfs_opt_fn_cipher_key_bytes, "ecryptfs_fn_key_bytes=%u"}, | 192 | {ecryptfs_opt_fn_cipher_key_bytes, "ecryptfs_fn_key_bytes=%u"}, |
192 | {ecryptfs_opt_unlink_sigs, "ecryptfs_unlink_sigs"}, | 193 | {ecryptfs_opt_unlink_sigs, "ecryptfs_unlink_sigs"}, |
193 | {ecryptfs_opt_mount_auth_tok_only, "ecryptfs_mount_auth_tok_only"}, | 194 | {ecryptfs_opt_mount_auth_tok_only, "ecryptfs_mount_auth_tok_only"}, |
195 | {ecryptfs_opt_check_dev_ruid, "ecryptfs_check_dev_ruid"}, | ||
194 | {ecryptfs_opt_err, NULL} | 196 | {ecryptfs_opt_err, NULL} |
195 | }; | 197 | }; |
196 | 198 | ||
@@ -236,6 +238,7 @@ static void ecryptfs_init_mount_crypt_stat( | |||
236 | * ecryptfs_parse_options | 238 | * ecryptfs_parse_options |
237 | * @sb: The ecryptfs super block | 239 | * @sb: The ecryptfs super block |
238 | * @options: The options passed to the kernel | 240 | * @options: The options passed to the kernel |
241 | * @check_ruid: set to 1 if device uid should be checked against the ruid | ||
239 | * | 242 | * |
240 | * Parse mount options: | 243 | * Parse mount options: |
241 | * debug=N - ecryptfs_verbosity level for debug output | 244 | * debug=N - ecryptfs_verbosity level for debug output |
@@ -251,7 +254,8 @@ static void ecryptfs_init_mount_crypt_stat( | |||
251 | * | 254 | * |
252 | * Returns zero on success; non-zero on error | 255 | * Returns zero on success; non-zero on error |
253 | */ | 256 | */ |
254 | static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options) | 257 | static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options, |
258 | uid_t *check_ruid) | ||
255 | { | 259 | { |
256 | char *p; | 260 | char *p; |
257 | int rc = 0; | 261 | int rc = 0; |
@@ -276,6 +280,8 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options) | |||
276 | char *cipher_key_bytes_src; | 280 | char *cipher_key_bytes_src; |
277 | char *fn_cipher_key_bytes_src; | 281 | char *fn_cipher_key_bytes_src; |
278 | 282 | ||
283 | *check_ruid = 0; | ||
284 | |||
279 | if (!options) { | 285 | if (!options) { |
280 | rc = -EINVAL; | 286 | rc = -EINVAL; |
281 | goto out; | 287 | goto out; |
@@ -380,6 +386,9 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options) | |||
380 | mount_crypt_stat->flags |= | 386 | mount_crypt_stat->flags |= |
381 | ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY; | 387 | ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY; |
382 | break; | 388 | break; |
389 | case ecryptfs_opt_check_dev_ruid: | ||
390 | *check_ruid = 1; | ||
391 | break; | ||
383 | case ecryptfs_opt_err: | 392 | case ecryptfs_opt_err: |
384 | default: | 393 | default: |
385 | printk(KERN_WARNING | 394 | printk(KERN_WARNING |
@@ -475,6 +484,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags | |||
475 | const char *err = "Getting sb failed"; | 484 | const char *err = "Getting sb failed"; |
476 | struct inode *inode; | 485 | struct inode *inode; |
477 | struct path path; | 486 | struct path path; |
487 | uid_t check_ruid; | ||
478 | int rc; | 488 | int rc; |
479 | 489 | ||
480 | sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL); | 490 | sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL); |
@@ -483,7 +493,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags | |||
483 | goto out; | 493 | goto out; |
484 | } | 494 | } |
485 | 495 | ||
486 | rc = ecryptfs_parse_options(sbi, raw_data); | 496 | rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid); |
487 | if (rc) { | 497 | if (rc) { |
488 | err = "Error parsing options"; | 498 | err = "Error parsing options"; |
489 | goto out; | 499 | goto out; |
@@ -521,6 +531,15 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags | |||
521 | "known incompatibilities\n"); | 531 | "known incompatibilities\n"); |
522 | goto out_free; | 532 | goto out_free; |
523 | } | 533 | } |
534 | |||
535 | if (check_ruid && path.dentry->d_inode->i_uid != current_uid()) { | ||
536 | rc = -EPERM; | ||
537 | printk(KERN_ERR "Mount of device (uid: %d) not owned by " | ||
538 | "requested user (uid: %d)\n", | ||
539 | path.dentry->d_inode->i_uid, current_uid()); | ||
540 | goto out_free; | ||
541 | } | ||
542 | |||
524 | ecryptfs_set_superblock_lower(s, path.dentry->d_sb); | 543 | ecryptfs_set_superblock_lower(s, path.dentry->d_sb); |
525 | s->s_maxbytes = path.dentry->d_sb->s_maxbytes; | 544 | s->s_maxbytes = path.dentry->d_sb->s_maxbytes; |
526 | s->s_blocksize = path.dentry->d_sb->s_blocksize; | 545 | s->s_blocksize = path.dentry->d_sb->s_blocksize; |
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c index 85d430963116..3745f7c2b9c2 100644 --- a/fs/ecryptfs/read_write.c +++ b/fs/ecryptfs/read_write.c | |||
@@ -39,15 +39,16 @@ | |||
39 | int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data, | 39 | int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data, |
40 | loff_t offset, size_t size) | 40 | loff_t offset, size_t size) |
41 | { | 41 | { |
42 | struct ecryptfs_inode_info *inode_info; | 42 | struct file *lower_file; |
43 | mm_segment_t fs_save; | 43 | mm_segment_t fs_save; |
44 | ssize_t rc; | 44 | ssize_t rc; |
45 | 45 | ||
46 | inode_info = ecryptfs_inode_to_private(ecryptfs_inode); | 46 | lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file; |
47 | BUG_ON(!inode_info->lower_file); | 47 | if (!lower_file) |
48 | return -EIO; | ||
48 | fs_save = get_fs(); | 49 | fs_save = get_fs(); |
49 | set_fs(get_ds()); | 50 | set_fs(get_ds()); |
50 | rc = vfs_write(inode_info->lower_file, data, size, &offset); | 51 | rc = vfs_write(lower_file, data, size, &offset); |
51 | set_fs(fs_save); | 52 | set_fs(fs_save); |
52 | mark_inode_dirty_sync(ecryptfs_inode); | 53 | mark_inode_dirty_sync(ecryptfs_inode); |
53 | return rc; | 54 | return rc; |
@@ -225,15 +226,16 @@ out: | |||
225 | int ecryptfs_read_lower(char *data, loff_t offset, size_t size, | 226 | int ecryptfs_read_lower(char *data, loff_t offset, size_t size, |
226 | struct inode *ecryptfs_inode) | 227 | struct inode *ecryptfs_inode) |
227 | { | 228 | { |
228 | struct ecryptfs_inode_info *inode_info = | 229 | struct file *lower_file; |
229 | ecryptfs_inode_to_private(ecryptfs_inode); | ||
230 | mm_segment_t fs_save; | 230 | mm_segment_t fs_save; |
231 | ssize_t rc; | 231 | ssize_t rc; |
232 | 232 | ||
233 | BUG_ON(!inode_info->lower_file); | 233 | lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file; |
234 | if (!lower_file) | ||
235 | return -EIO; | ||
234 | fs_save = get_fs(); | 236 | fs_save = get_fs(); |
235 | set_fs(get_ds()); | 237 | set_fs(get_ds()); |
236 | rc = vfs_read(inode_info->lower_file, data, size, &offset); | 238 | rc = vfs_read(lower_file, data, size, &offset); |
237 | set_fs(fs_save); | 239 | set_fs(fs_save); |
238 | return rc; | 240 | return rc; |
239 | } | 241 | } |
@@ -1459,6 +1459,23 @@ static int do_execve_common(const char *filename, | |||
1459 | struct files_struct *displaced; | 1459 | struct files_struct *displaced; |
1460 | bool clear_in_exec; | 1460 | bool clear_in_exec; |
1461 | int retval; | 1461 | int retval; |
1462 | const struct cred *cred = current_cred(); | ||
1463 | |||
1464 | /* | ||
1465 | * We move the actual failure in case of RLIMIT_NPROC excess from | ||
1466 | * set*uid() to execve() because too many poorly written programs | ||
1467 | * don't check setuid() return code. Here we additionally recheck | ||
1468 | * whether NPROC limit is still exceeded. | ||
1469 | */ | ||
1470 | if ((current->flags & PF_NPROC_EXCEEDED) && | ||
1471 | atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) { | ||
1472 | retval = -EAGAIN; | ||
1473 | goto out_ret; | ||
1474 | } | ||
1475 | |||
1476 | /* We're below the limit (still or again), so we don't want to make | ||
1477 | * further execve() calls fail. */ | ||
1478 | current->flags &= ~PF_NPROC_EXCEEDED; | ||
1462 | 1479 | ||
1463 | retval = unshare_files(&displaced); | 1480 | retval = unshare_files(&displaced); |
1464 | if (retval) | 1481 | if (retval) |
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 6e18a0b7750d..5571708b6a58 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c | |||
@@ -2209,9 +2209,11 @@ static int ext3_symlink (struct inode * dir, | |||
2209 | /* | 2209 | /* |
2210 | * For non-fast symlinks, we just allocate inode and put it on | 2210 | * For non-fast symlinks, we just allocate inode and put it on |
2211 | * orphan list in the first transaction => we need bitmap, | 2211 | * orphan list in the first transaction => we need bitmap, |
2212 | * group descriptor, sb, inode block, quota blocks. | 2212 | * group descriptor, sb, inode block, quota blocks, and |
2213 | * possibly selinux xattr blocks. | ||
2213 | */ | 2214 | */ |
2214 | credits = 4 + EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb); | 2215 | credits = 4 + EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) + |
2216 | EXT3_XATTR_TRANS_BLOCKS; | ||
2215 | } else { | 2217 | } else { |
2216 | /* | 2218 | /* |
2217 | * Fast symlink. We have to add entry to directory | 2219 | * Fast symlink. We have to add entry to directory |
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index bb85757689b6..5802fa1dab18 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h | |||
@@ -289,10 +289,10 @@ static inline int ext4_should_order_data(struct inode *inode) | |||
289 | 289 | ||
290 | static inline int ext4_should_writeback_data(struct inode *inode) | 290 | static inline int ext4_should_writeback_data(struct inode *inode) |
291 | { | 291 | { |
292 | if (!S_ISREG(inode->i_mode)) | ||
293 | return 0; | ||
294 | if (EXT4_JOURNAL(inode) == NULL) | 292 | if (EXT4_JOURNAL(inode) == NULL) |
295 | return 1; | 293 | return 1; |
294 | if (!S_ISREG(inode->i_mode)) | ||
295 | return 0; | ||
296 | if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA)) | 296 | if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA)) |
297 | return 0; | 297 | return 0; |
298 | if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) | 298 | if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) |
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index b8602cde5b5a..0962642119c0 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c | |||
@@ -800,12 +800,17 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, | |||
800 | } | 800 | } |
801 | 801 | ||
802 | retry: | 802 | retry: |
803 | if (rw == READ && ext4_should_dioread_nolock(inode)) | 803 | if (rw == READ && ext4_should_dioread_nolock(inode)) { |
804 | if (unlikely(!list_empty(&ei->i_completed_io_list))) { | ||
805 | mutex_lock(&inode->i_mutex); | ||
806 | ext4_flush_completed_IO(inode); | ||
807 | mutex_unlock(&inode->i_mutex); | ||
808 | } | ||
804 | ret = __blockdev_direct_IO(rw, iocb, inode, | 809 | ret = __blockdev_direct_IO(rw, iocb, inode, |
805 | inode->i_sb->s_bdev, iov, | 810 | inode->i_sb->s_bdev, iov, |
806 | offset, nr_segs, | 811 | offset, nr_segs, |
807 | ext4_get_block, NULL, NULL, 0); | 812 | ext4_get_block, NULL, NULL, 0); |
808 | else { | 813 | } else { |
809 | ret = blockdev_direct_IO(rw, iocb, inode, iov, | 814 | ret = blockdev_direct_IO(rw, iocb, inode, iov, |
810 | offset, nr_segs, ext4_get_block); | 815 | offset, nr_segs, ext4_get_block); |
811 | 816 | ||
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index d47264cafee0..c4da98a959ae 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -120,6 +120,12 @@ void ext4_evict_inode(struct inode *inode) | |||
120 | int err; | 120 | int err; |
121 | 121 | ||
122 | trace_ext4_evict_inode(inode); | 122 | trace_ext4_evict_inode(inode); |
123 | |||
124 | mutex_lock(&inode->i_mutex); | ||
125 | ext4_flush_completed_IO(inode); | ||
126 | mutex_unlock(&inode->i_mutex); | ||
127 | ext4_ioend_wait(inode); | ||
128 | |||
123 | if (inode->i_nlink) { | 129 | if (inode->i_nlink) { |
124 | /* | 130 | /* |
125 | * When journalling data dirty buffers are tracked only in the | 131 | * When journalling data dirty buffers are tracked only in the |
@@ -983,6 +989,8 @@ static int ext4_journalled_write_end(struct file *file, | |||
983 | from = pos & (PAGE_CACHE_SIZE - 1); | 989 | from = pos & (PAGE_CACHE_SIZE - 1); |
984 | to = from + len; | 990 | to = from + len; |
985 | 991 | ||
992 | BUG_ON(!ext4_handle_valid(handle)); | ||
993 | |||
986 | if (copied < len) { | 994 | if (copied < len) { |
987 | if (!PageUptodate(page)) | 995 | if (!PageUptodate(page)) |
988 | copied = 0; | 996 | copied = 0; |
@@ -1283,7 +1291,12 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd, | |||
1283 | else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT)) | 1291 | else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT)) |
1284 | err = ext4_bio_write_page(&io_submit, page, | 1292 | err = ext4_bio_write_page(&io_submit, page, |
1285 | len, mpd->wbc); | 1293 | len, mpd->wbc); |
1286 | else | 1294 | else if (buffer_uninit(page_bufs)) { |
1295 | ext4_set_bh_endio(page_bufs, inode); | ||
1296 | err = block_write_full_page_endio(page, | ||
1297 | noalloc_get_block_write, | ||
1298 | mpd->wbc, ext4_end_io_buffer_write); | ||
1299 | } else | ||
1287 | err = block_write_full_page(page, | 1300 | err = block_write_full_page(page, |
1288 | noalloc_get_block_write, mpd->wbc); | 1301 | noalloc_get_block_write, mpd->wbc); |
1289 | 1302 | ||
@@ -1699,6 +1712,8 @@ static int __ext4_journalled_writepage(struct page *page, | |||
1699 | goto out; | 1712 | goto out; |
1700 | } | 1713 | } |
1701 | 1714 | ||
1715 | BUG_ON(!ext4_handle_valid(handle)); | ||
1716 | |||
1702 | ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, | 1717 | ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, |
1703 | do_journal_get_write_access); | 1718 | do_journal_get_write_access); |
1704 | 1719 | ||
@@ -2668,8 +2683,15 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) | |||
2668 | goto out; | 2683 | goto out; |
2669 | } | 2684 | } |
2670 | 2685 | ||
2671 | io_end->flag = EXT4_IO_END_UNWRITTEN; | 2686 | /* |
2687 | * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now, | ||
2688 | * but being more careful is always safe for the future change. | ||
2689 | */ | ||
2672 | inode = io_end->inode; | 2690 | inode = io_end->inode; |
2691 | if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { | ||
2692 | io_end->flag |= EXT4_IO_END_UNWRITTEN; | ||
2693 | atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); | ||
2694 | } | ||
2673 | 2695 | ||
2674 | /* Add the io_end to per-inode completed io list*/ | 2696 | /* Add the io_end to per-inode completed io list*/ |
2675 | spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); | 2697 | spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); |
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 565a154e22d4..f8068c7bae9f 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c | |||
@@ -2253,9 +2253,11 @@ static int ext4_symlink(struct inode *dir, | |||
2253 | /* | 2253 | /* |
2254 | * For non-fast symlinks, we just allocate inode and put it on | 2254 | * For non-fast symlinks, we just allocate inode and put it on |
2255 | * orphan list in the first transaction => we need bitmap, | 2255 | * orphan list in the first transaction => we need bitmap, |
2256 | * group descriptor, sb, inode block, quota blocks. | 2256 | * group descriptor, sb, inode block, quota blocks, and |
2257 | * possibly selinux xattr blocks. | ||
2257 | */ | 2258 | */ |
2258 | credits = 4 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb); | 2259 | credits = 4 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) + |
2260 | EXT4_XATTR_TRANS_BLOCKS; | ||
2259 | } else { | 2261 | } else { |
2260 | /* | 2262 | /* |
2261 | * Fast symlink. We have to add entry to directory | 2263 | * Fast symlink. We have to add entry to directory |
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 430c401d0895..78839af7ce29 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
@@ -334,8 +334,10 @@ submit_and_retry: | |||
334 | if ((io_end->num_io_pages >= MAX_IO_PAGES) && | 334 | if ((io_end->num_io_pages >= MAX_IO_PAGES) && |
335 | (io_end->pages[io_end->num_io_pages-1] != io_page)) | 335 | (io_end->pages[io_end->num_io_pages-1] != io_page)) |
336 | goto submit_and_retry; | 336 | goto submit_and_retry; |
337 | if (buffer_uninit(bh)) | 337 | if (buffer_uninit(bh) && !(io_end->flag & EXT4_IO_END_UNWRITTEN)) { |
338 | io->io_end->flag |= EXT4_IO_END_UNWRITTEN; | 338 | io_end->flag |= EXT4_IO_END_UNWRITTEN; |
339 | atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); | ||
340 | } | ||
339 | io->io_end->size += bh->b_size; | 341 | io->io_end->size += bh->b_size; |
340 | io->io_next_block++; | 342 | io->io_next_block++; |
341 | ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); | 343 | ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 4687fea0c00f..44d0c8db2239 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -919,7 +919,6 @@ static void ext4_i_callback(struct rcu_head *head) | |||
919 | 919 | ||
920 | static void ext4_destroy_inode(struct inode *inode) | 920 | static void ext4_destroy_inode(struct inode *inode) |
921 | { | 921 | { |
922 | ext4_ioend_wait(inode); | ||
923 | if (!list_empty(&(EXT4_I(inode)->i_orphan))) { | 922 | if (!list_empty(&(EXT4_I(inode)->i_orphan))) { |
924 | ext4_msg(inode->i_sb, KERN_ERR, | 923 | ext4_msg(inode->i_sb, KERN_ERR, |
925 | "Inode %lu (%p): orphan list check failed!", | 924 | "Inode %lu (%p): orphan list check failed!", |
diff --git a/fs/fat/dir.c b/fs/fat/dir.c index 4ad64732cbce..5efbd5d7701a 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c | |||
@@ -1231,7 +1231,7 @@ int fat_add_entries(struct inode *dir, void *slots, int nr_slots, | |||
1231 | struct super_block *sb = dir->i_sb; | 1231 | struct super_block *sb = dir->i_sb; |
1232 | struct msdos_sb_info *sbi = MSDOS_SB(sb); | 1232 | struct msdos_sb_info *sbi = MSDOS_SB(sb); |
1233 | struct buffer_head *bh, *prev, *bhs[3]; /* 32*slots (672bytes) */ | 1233 | struct buffer_head *bh, *prev, *bhs[3]; /* 32*slots (672bytes) */ |
1234 | struct msdos_dir_entry *de; | 1234 | struct msdos_dir_entry *uninitialized_var(de); |
1235 | int err, free_slots, i, nr_bhs; | 1235 | int err, free_slots, i, nr_bhs; |
1236 | loff_t pos, i_pos; | 1236 | loff_t pos, i_pos; |
1237 | 1237 | ||
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 5942fec22c65..1726d7303047 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
@@ -1188,9 +1188,9 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat, | |||
1188 | out: | 1188 | out: |
1189 | /* UTF-8 doesn't provide FAT semantics */ | 1189 | /* UTF-8 doesn't provide FAT semantics */ |
1190 | if (!strcmp(opts->iocharset, "utf8")) { | 1190 | if (!strcmp(opts->iocharset, "utf8")) { |
1191 | fat_msg(sb, KERN_ERR, "utf8 is not a recommended IO charset" | 1191 | fat_msg(sb, KERN_WARNING, "utf8 is not a recommended IO charset" |
1192 | " for FAT filesystems, filesystem will be " | 1192 | " for FAT filesystems, filesystem will be " |
1193 | "case sensitive!\n"); | 1193 | "case sensitive!"); |
1194 | } | 1194 | } |
1195 | 1195 | ||
1196 | /* If user doesn't specify allow_utime, it's initialized from dmask. */ | 1196 | /* If user doesn't specify allow_utime, it's initialized from dmask. */ |
@@ -1367,6 +1367,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, | |||
1367 | sbi->free_clusters = -1; /* Don't know yet */ | 1367 | sbi->free_clusters = -1; /* Don't know yet */ |
1368 | sbi->free_clus_valid = 0; | 1368 | sbi->free_clus_valid = 0; |
1369 | sbi->prev_free = FAT_START_ENT; | 1369 | sbi->prev_free = FAT_START_ENT; |
1370 | sb->s_maxbytes = 0xffffffff; | ||
1370 | 1371 | ||
1371 | if (!sbi->fat_length && b->fat32_length) { | 1372 | if (!sbi->fat_length && b->fat32_length) { |
1372 | struct fat_boot_fsinfo *fsinfo; | 1373 | struct fat_boot_fsinfo *fsinfo; |
@@ -1377,8 +1378,6 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, | |||
1377 | sbi->fat_length = le32_to_cpu(b->fat32_length); | 1378 | sbi->fat_length = le32_to_cpu(b->fat32_length); |
1378 | sbi->root_cluster = le32_to_cpu(b->root_cluster); | 1379 | sbi->root_cluster = le32_to_cpu(b->root_cluster); |
1379 | 1380 | ||
1380 | sb->s_maxbytes = 0xffffffff; | ||
1381 | |||
1382 | /* MC - if info_sector is 0, don't multiply by 0 */ | 1381 | /* MC - if info_sector is 0, don't multiply by 0 */ |
1383 | sbi->fsinfo_sector = le16_to_cpu(b->info_sector); | 1382 | sbi->fsinfo_sector = le16_to_cpu(b->info_sector); |
1384 | if (sbi->fsinfo_sector == 0) | 1383 | if (sbi->fsinfo_sector == 0) |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 640fc229df10..168a80f7f12b 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -1358,6 +1358,10 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, | |||
1358 | if (outarg.namelen > FUSE_NAME_MAX) | 1358 | if (outarg.namelen > FUSE_NAME_MAX) |
1359 | goto err; | 1359 | goto err; |
1360 | 1360 | ||
1361 | err = -EINVAL; | ||
1362 | if (size != sizeof(outarg) + outarg.namelen + 1) | ||
1363 | goto err; | ||
1364 | |||
1361 | name.name = buf; | 1365 | name.name = buf; |
1362 | name.len = outarg.namelen; | 1366 | name.len = outarg.namelen; |
1363 | err = fuse_copy_one(cs, buf, outarg.namelen + 1); | 1367 | err = fuse_copy_one(cs, buf, outarg.namelen + 1); |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index d480d9af46c9..594f07a81c28 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/compat.h> | 16 | #include <linux/compat.h> |
17 | #include <linux/swap.h> | ||
17 | 18 | ||
18 | static const struct file_operations fuse_direct_io_file_operations; | 19 | static const struct file_operations fuse_direct_io_file_operations; |
19 | 20 | ||
@@ -245,6 +246,12 @@ void fuse_release_common(struct file *file, int opcode) | |||
245 | req = ff->reserved_req; | 246 | req = ff->reserved_req; |
246 | fuse_prepare_release(ff, file->f_flags, opcode); | 247 | fuse_prepare_release(ff, file->f_flags, opcode); |
247 | 248 | ||
249 | if (ff->flock) { | ||
250 | struct fuse_release_in *inarg = &req->misc.release.in; | ||
251 | inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; | ||
252 | inarg->lock_owner = fuse_lock_owner_id(ff->fc, | ||
253 | (fl_owner_t) file); | ||
254 | } | ||
248 | /* Hold vfsmount and dentry until release is finished */ | 255 | /* Hold vfsmount and dentry until release is finished */ |
249 | path_get(&file->f_path); | 256 | path_get(&file->f_path); |
250 | req->misc.release.path = file->f_path; | 257 | req->misc.release.path = file->f_path; |
@@ -755,18 +762,6 @@ static size_t fuse_send_write(struct fuse_req *req, struct file *file, | |||
755 | return req->misc.write.out.size; | 762 | return req->misc.write.out.size; |
756 | } | 763 | } |
757 | 764 | ||
758 | static int fuse_write_begin(struct file *file, struct address_space *mapping, | ||
759 | loff_t pos, unsigned len, unsigned flags, | ||
760 | struct page **pagep, void **fsdata) | ||
761 | { | ||
762 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | ||
763 | |||
764 | *pagep = grab_cache_page_write_begin(mapping, index, flags); | ||
765 | if (!*pagep) | ||
766 | return -ENOMEM; | ||
767 | return 0; | ||
768 | } | ||
769 | |||
770 | void fuse_write_update_size(struct inode *inode, loff_t pos) | 765 | void fuse_write_update_size(struct inode *inode, loff_t pos) |
771 | { | 766 | { |
772 | struct fuse_conn *fc = get_fuse_conn(inode); | 767 | struct fuse_conn *fc = get_fuse_conn(inode); |
@@ -779,62 +774,6 @@ void fuse_write_update_size(struct inode *inode, loff_t pos) | |||
779 | spin_unlock(&fc->lock); | 774 | spin_unlock(&fc->lock); |
780 | } | 775 | } |
781 | 776 | ||
782 | static int fuse_buffered_write(struct file *file, struct inode *inode, | ||
783 | loff_t pos, unsigned count, struct page *page) | ||
784 | { | ||
785 | int err; | ||
786 | size_t nres; | ||
787 | struct fuse_conn *fc = get_fuse_conn(inode); | ||
788 | unsigned offset = pos & (PAGE_CACHE_SIZE - 1); | ||
789 | struct fuse_req *req; | ||
790 | |||
791 | if (is_bad_inode(inode)) | ||
792 | return -EIO; | ||
793 | |||
794 | /* | ||
795 | * Make sure writepages on the same page are not mixed up with | ||
796 | * plain writes. | ||
797 | */ | ||
798 | fuse_wait_on_page_writeback(inode, page->index); | ||
799 | |||
800 | req = fuse_get_req(fc); | ||
801 | if (IS_ERR(req)) | ||
802 | return PTR_ERR(req); | ||
803 | |||
804 | req->in.argpages = 1; | ||
805 | req->num_pages = 1; | ||
806 | req->pages[0] = page; | ||
807 | req->page_offset = offset; | ||
808 | nres = fuse_send_write(req, file, pos, count, NULL); | ||
809 | err = req->out.h.error; | ||
810 | fuse_put_request(fc, req); | ||
811 | if (!err && !nres) | ||
812 | err = -EIO; | ||
813 | if (!err) { | ||
814 | pos += nres; | ||
815 | fuse_write_update_size(inode, pos); | ||
816 | if (count == PAGE_CACHE_SIZE) | ||
817 | SetPageUptodate(page); | ||
818 | } | ||
819 | fuse_invalidate_attr(inode); | ||
820 | return err ? err : nres; | ||
821 | } | ||
822 | |||
823 | static int fuse_write_end(struct file *file, struct address_space *mapping, | ||
824 | loff_t pos, unsigned len, unsigned copied, | ||
825 | struct page *page, void *fsdata) | ||
826 | { | ||
827 | struct inode *inode = mapping->host; | ||
828 | int res = 0; | ||
829 | |||
830 | if (copied) | ||
831 | res = fuse_buffered_write(file, inode, pos, copied, page); | ||
832 | |||
833 | unlock_page(page); | ||
834 | page_cache_release(page); | ||
835 | return res; | ||
836 | } | ||
837 | |||
838 | static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, | 777 | static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, |
839 | struct inode *inode, loff_t pos, | 778 | struct inode *inode, loff_t pos, |
840 | size_t count) | 779 | size_t count) |
@@ -908,6 +847,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, | |||
908 | pagefault_enable(); | 847 | pagefault_enable(); |
909 | flush_dcache_page(page); | 848 | flush_dcache_page(page); |
910 | 849 | ||
850 | mark_page_accessed(page); | ||
851 | |||
911 | if (!tmp) { | 852 | if (!tmp) { |
912 | unlock_page(page); | 853 | unlock_page(page); |
913 | page_cache_release(page); | 854 | page_cache_release(page); |
@@ -1559,11 +1500,14 @@ static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) | |||
1559 | struct fuse_conn *fc = get_fuse_conn(inode); | 1500 | struct fuse_conn *fc = get_fuse_conn(inode); |
1560 | int err; | 1501 | int err; |
1561 | 1502 | ||
1562 | if (fc->no_lock) { | 1503 | if (fc->no_flock) { |
1563 | err = flock_lock_file_wait(file, fl); | 1504 | err = flock_lock_file_wait(file, fl); |
1564 | } else { | 1505 | } else { |
1506 | struct fuse_file *ff = file->private_data; | ||
1507 | |||
1565 | /* emulate flock with POSIX locks */ | 1508 | /* emulate flock with POSIX locks */ |
1566 | fl->fl_owner = (fl_owner_t) file; | 1509 | fl->fl_owner = (fl_owner_t) file; |
1510 | ff->flock = true; | ||
1567 | err = fuse_setlk(file, fl, 1); | 1511 | err = fuse_setlk(file, fl, 1); |
1568 | } | 1512 | } |
1569 | 1513 | ||
@@ -2201,8 +2145,6 @@ static const struct address_space_operations fuse_file_aops = { | |||
2201 | .readpage = fuse_readpage, | 2145 | .readpage = fuse_readpage, |
2202 | .writepage = fuse_writepage, | 2146 | .writepage = fuse_writepage, |
2203 | .launder_page = fuse_launder_page, | 2147 | .launder_page = fuse_launder_page, |
2204 | .write_begin = fuse_write_begin, | ||
2205 | .write_end = fuse_write_end, | ||
2206 | .readpages = fuse_readpages, | 2148 | .readpages = fuse_readpages, |
2207 | .set_page_dirty = __set_page_dirty_nobuffers, | 2149 | .set_page_dirty = __set_page_dirty_nobuffers, |
2208 | .bmap = fuse_bmap, | 2150 | .bmap = fuse_bmap, |
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index c6aa2d4b8517..cf6db0a93219 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
@@ -135,6 +135,9 @@ struct fuse_file { | |||
135 | 135 | ||
136 | /** Wait queue head for poll */ | 136 | /** Wait queue head for poll */ |
137 | wait_queue_head_t poll_wait; | 137 | wait_queue_head_t poll_wait; |
138 | |||
139 | /** Has flock been performed on this file? */ | ||
140 | bool flock:1; | ||
138 | }; | 141 | }; |
139 | 142 | ||
140 | /** One input argument of a request */ | 143 | /** One input argument of a request */ |
@@ -448,7 +451,7 @@ struct fuse_conn { | |||
448 | /** Is removexattr not implemented by fs? */ | 451 | /** Is removexattr not implemented by fs? */ |
449 | unsigned no_removexattr:1; | 452 | unsigned no_removexattr:1; |
450 | 453 | ||
451 | /** Are file locking primitives not implemented by fs? */ | 454 | /** Are posix file locking primitives not implemented by fs? */ |
452 | unsigned no_lock:1; | 455 | unsigned no_lock:1; |
453 | 456 | ||
454 | /** Is access not implemented by fs? */ | 457 | /** Is access not implemented by fs? */ |
@@ -472,6 +475,9 @@ struct fuse_conn { | |||
472 | /** Don't apply umask to creation modes */ | 475 | /** Don't apply umask to creation modes */ |
473 | unsigned dont_mask:1; | 476 | unsigned dont_mask:1; |
474 | 477 | ||
478 | /** Are BSD file locking primitives not implemented by fs? */ | ||
479 | unsigned no_flock:1; | ||
480 | |||
475 | /** The number of requests waiting for completion */ | 481 | /** The number of requests waiting for completion */ |
476 | atomic_t num_waiting; | 482 | atomic_t num_waiting; |
477 | 483 | ||
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 38f84cd48b67..12b502929da9 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -71,7 +71,7 @@ struct fuse_mount_data { | |||
71 | unsigned blksize; | 71 | unsigned blksize; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct fuse_forget_link *fuse_alloc_forget() | 74 | struct fuse_forget_link *fuse_alloc_forget(void) |
75 | { | 75 | { |
76 | return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL); | 76 | return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL); |
77 | } | 77 | } |
@@ -809,6 +809,10 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) | |||
809 | fc->async_read = 1; | 809 | fc->async_read = 1; |
810 | if (!(arg->flags & FUSE_POSIX_LOCKS)) | 810 | if (!(arg->flags & FUSE_POSIX_LOCKS)) |
811 | fc->no_lock = 1; | 811 | fc->no_lock = 1; |
812 | if (arg->minor >= 17) { | ||
813 | if (!(arg->flags & FUSE_FLOCK_LOCKS)) | ||
814 | fc->no_flock = 1; | ||
815 | } | ||
812 | if (arg->flags & FUSE_ATOMIC_O_TRUNC) | 816 | if (arg->flags & FUSE_ATOMIC_O_TRUNC) |
813 | fc->atomic_o_trunc = 1; | 817 | fc->atomic_o_trunc = 1; |
814 | if (arg->minor >= 9) { | 818 | if (arg->minor >= 9) { |
@@ -823,6 +827,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) | |||
823 | } else { | 827 | } else { |
824 | ra_pages = fc->max_read / PAGE_CACHE_SIZE; | 828 | ra_pages = fc->max_read / PAGE_CACHE_SIZE; |
825 | fc->no_lock = 1; | 829 | fc->no_lock = 1; |
830 | fc->no_flock = 1; | ||
826 | } | 831 | } |
827 | 832 | ||
828 | fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages); | 833 | fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages); |
@@ -843,7 +848,8 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) | |||
843 | arg->minor = FUSE_KERNEL_MINOR_VERSION; | 848 | arg->minor = FUSE_KERNEL_MINOR_VERSION; |
844 | arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; | 849 | arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; |
845 | arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | | 850 | arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | |
846 | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK; | 851 | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | |
852 | FUSE_FLOCK_LOCKS; | ||
847 | req->in.h.opcode = FUSE_INIT; | 853 | req->in.h.opcode = FUSE_INIT; |
848 | req->in.numargs = 1; | 854 | req->in.numargs = 1; |
849 | req->in.args[0].size = sizeof(*arg); | 855 | req->in.args[0].size = sizeof(*arg); |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 87b6e0421c12..ec889538e5a6 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -491,6 +491,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid, | |||
491 | inode->i_op = &page_symlink_inode_operations; | 491 | inode->i_op = &page_symlink_inode_operations; |
492 | break; | 492 | break; |
493 | } | 493 | } |
494 | lockdep_annotate_inode_mutex_key(inode); | ||
494 | } | 495 | } |
495 | return inode; | 496 | return inode; |
496 | } | 497 | } |
diff --git a/fs/inode.c b/fs/inode.c index 73920d555c88..ec7924696a13 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -848,16 +848,9 @@ struct inode *new_inode(struct super_block *sb) | |||
848 | } | 848 | } |
849 | EXPORT_SYMBOL(new_inode); | 849 | EXPORT_SYMBOL(new_inode); |
850 | 850 | ||
851 | /** | ||
852 | * unlock_new_inode - clear the I_NEW state and wake up any waiters | ||
853 | * @inode: new inode to unlock | ||
854 | * | ||
855 | * Called when the inode is fully initialised to clear the new state of the | ||
856 | * inode and wake up anyone waiting for the inode to finish initialisation. | ||
857 | */ | ||
858 | void unlock_new_inode(struct inode *inode) | ||
859 | { | ||
860 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 851 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
852 | void lockdep_annotate_inode_mutex_key(struct inode *inode) | ||
853 | { | ||
861 | if (S_ISDIR(inode->i_mode)) { | 854 | if (S_ISDIR(inode->i_mode)) { |
862 | struct file_system_type *type = inode->i_sb->s_type; | 855 | struct file_system_type *type = inode->i_sb->s_type; |
863 | 856 | ||
@@ -873,7 +866,20 @@ void unlock_new_inode(struct inode *inode) | |||
873 | &type->i_mutex_dir_key); | 866 | &type->i_mutex_dir_key); |
874 | } | 867 | } |
875 | } | 868 | } |
869 | } | ||
870 | EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); | ||
876 | #endif | 871 | #endif |
872 | |||
873 | /** | ||
874 | * unlock_new_inode - clear the I_NEW state and wake up any waiters | ||
875 | * @inode: new inode to unlock | ||
876 | * | ||
877 | * Called when the inode is fully initialised to clear the new state of the | ||
878 | * inode and wake up anyone waiting for the inode to finish initialisation. | ||
879 | */ | ||
880 | void unlock_new_inode(struct inode *inode) | ||
881 | { | ||
882 | lockdep_annotate_inode_mutex_key(inode); | ||
877 | spin_lock(&inode->i_lock); | 883 | spin_lock(&inode->i_lock); |
878 | WARN_ON(!(inode->i_state & I_NEW)); | 884 | WARN_ON(!(inode->i_state & I_NEW)); |
879 | inode->i_state &= ~I_NEW; | 885 | inode->i_state &= ~I_NEW; |
diff --git a/fs/jfs/jfs_umount.c b/fs/jfs/jfs_umount.c index adcf92d3b603..7971f37534a3 100644 --- a/fs/jfs/jfs_umount.c +++ b/fs/jfs/jfs_umount.c | |||
@@ -68,7 +68,7 @@ int jfs_umount(struct super_block *sb) | |||
68 | /* | 68 | /* |
69 | * Wait for outstanding transactions to be written to log: | 69 | * Wait for outstanding transactions to be written to log: |
70 | */ | 70 | */ |
71 | jfs_flush_journal(log, 1); | 71 | jfs_flush_journal(log, 2); |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * close fileset inode allocation map (aka fileset inode) | 74 | * close fileset inode allocation map (aka fileset inode) |
@@ -146,7 +146,7 @@ int jfs_umount_rw(struct super_block *sb) | |||
146 | * | 146 | * |
147 | * remove file system from log active file system list. | 147 | * remove file system from log active file system list. |
148 | */ | 148 | */ |
149 | jfs_flush_journal(log, 1); | 149 | jfs_flush_journal(log, 2); |
150 | 150 | ||
151 | /* | 151 | /* |
152 | * Make sure all metadata makes it to disk | 152 | * Make sure all metadata makes it to disk |
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index be020771c6b4..dbcd82126aed 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig | |||
@@ -79,12 +79,9 @@ config NFS_V4_1 | |||
79 | depends on NFS_FS && NFS_V4 && EXPERIMENTAL | 79 | depends on NFS_FS && NFS_V4 && EXPERIMENTAL |
80 | select SUNRPC_BACKCHANNEL | 80 | select SUNRPC_BACKCHANNEL |
81 | select PNFS_FILE_LAYOUT | 81 | select PNFS_FILE_LAYOUT |
82 | select PNFS_BLOCK | ||
83 | select MD | ||
84 | select BLK_DEV_DM | ||
85 | help | 82 | help |
86 | This option enables support for minor version 1 of the NFSv4 protocol | 83 | This option enables support for minor version 1 of the NFSv4 protocol |
87 | (RFC 5661 and RFC 5663) in the kernel's NFS client. | 84 | (RFC 5661) in the kernel's NFS client. |
88 | 85 | ||
89 | If unsure, say N. | 86 | If unsure, say N. |
90 | 87 | ||
@@ -93,16 +90,13 @@ config PNFS_FILE_LAYOUT | |||
93 | 90 | ||
94 | config PNFS_BLOCK | 91 | config PNFS_BLOCK |
95 | tristate | 92 | tristate |
93 | depends on NFS_FS && NFS_V4_1 && BLK_DEV_DM | ||
94 | default m | ||
96 | 95 | ||
97 | config PNFS_OBJLAYOUT | 96 | config PNFS_OBJLAYOUT |
98 | tristate "Provide support for the pNFS Objects Layout Driver for NFSv4.1 pNFS (EXPERIMENTAL)" | 97 | tristate |
99 | depends on NFS_FS && NFS_V4_1 && SCSI_OSD_ULD | 98 | depends on NFS_FS && NFS_V4_1 && SCSI_OSD_ULD |
100 | help | 99 | default m |
101 | Say M here if you want your pNFS client to support the Objects Layout Driver. | ||
102 | Requires the SCSI osd initiator library (SCSI_OSD_INITIATOR) and | ||
103 | upper level driver (SCSI_OSD_ULD). | ||
104 | |||
105 | If unsure, say N. | ||
106 | 100 | ||
107 | config ROOT_NFS | 101 | config ROOT_NFS |
108 | bool "Root file system on NFS" | 102 | bool "Root file system on NFS" |
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index e56564d2ef95..9561c8fc8bdb 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/namei.h> | 36 | #include <linux/namei.h> |
37 | #include <linux/bio.h> /* struct bio */ | 37 | #include <linux/bio.h> /* struct bio */ |
38 | #include <linux/buffer_head.h> /* various write calls */ | 38 | #include <linux/buffer_head.h> /* various write calls */ |
39 | #include <linux/prefetch.h> | ||
39 | 40 | ||
40 | #include "blocklayout.h" | 41 | #include "blocklayout.h" |
41 | 42 | ||
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index b257383bb565..07df5f1d85e5 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h | |||
@@ -38,6 +38,7 @@ enum nfs4_callback_opnum { | |||
38 | struct cb_process_state { | 38 | struct cb_process_state { |
39 | __be32 drc_status; | 39 | __be32 drc_status; |
40 | struct nfs_client *clp; | 40 | struct nfs_client *clp; |
41 | int slotid; | ||
41 | }; | 42 | }; |
42 | 43 | ||
43 | struct cb_compound_hdr_arg { | 44 | struct cb_compound_hdr_arg { |
@@ -166,7 +167,6 @@ extern unsigned nfs4_callback_layoutrecall( | |||
166 | void *dummy, struct cb_process_state *cps); | 167 | void *dummy, struct cb_process_state *cps); |
167 | 168 | ||
168 | extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses); | 169 | extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses); |
169 | extern void nfs4_cb_take_slot(struct nfs_client *clp); | ||
170 | 170 | ||
171 | struct cb_devicenotifyitem { | 171 | struct cb_devicenotifyitem { |
172 | uint32_t cbd_notify_type; | 172 | uint32_t cbd_notify_type; |
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 74780f9f852c..43926add945b 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c | |||
@@ -348,7 +348,7 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args) | |||
348 | /* Normal */ | 348 | /* Normal */ |
349 | if (likely(args->csa_sequenceid == slot->seq_nr + 1)) { | 349 | if (likely(args->csa_sequenceid == slot->seq_nr + 1)) { |
350 | slot->seq_nr++; | 350 | slot->seq_nr++; |
351 | return htonl(NFS4_OK); | 351 | goto out_ok; |
352 | } | 352 | } |
353 | 353 | ||
354 | /* Replay */ | 354 | /* Replay */ |
@@ -367,11 +367,14 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args) | |||
367 | /* Wraparound */ | 367 | /* Wraparound */ |
368 | if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) { | 368 | if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) { |
369 | slot->seq_nr = 1; | 369 | slot->seq_nr = 1; |
370 | return htonl(NFS4_OK); | 370 | goto out_ok; |
371 | } | 371 | } |
372 | 372 | ||
373 | /* Misordered request */ | 373 | /* Misordered request */ |
374 | return htonl(NFS4ERR_SEQ_MISORDERED); | 374 | return htonl(NFS4ERR_SEQ_MISORDERED); |
375 | out_ok: | ||
376 | tbl->highest_used_slotid = args->csa_slotid; | ||
377 | return htonl(NFS4_OK); | ||
375 | } | 378 | } |
376 | 379 | ||
377 | /* | 380 | /* |
@@ -433,26 +436,37 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, | |||
433 | struct cb_sequenceres *res, | 436 | struct cb_sequenceres *res, |
434 | struct cb_process_state *cps) | 437 | struct cb_process_state *cps) |
435 | { | 438 | { |
439 | struct nfs4_slot_table *tbl; | ||
436 | struct nfs_client *clp; | 440 | struct nfs_client *clp; |
437 | int i; | 441 | int i; |
438 | __be32 status = htonl(NFS4ERR_BADSESSION); | 442 | __be32 status = htonl(NFS4ERR_BADSESSION); |
439 | 443 | ||
440 | cps->clp = NULL; | ||
441 | |||
442 | clp = nfs4_find_client_sessionid(args->csa_addr, &args->csa_sessionid); | 444 | clp = nfs4_find_client_sessionid(args->csa_addr, &args->csa_sessionid); |
443 | if (clp == NULL) | 445 | if (clp == NULL) |
444 | goto out; | 446 | goto out; |
445 | 447 | ||
448 | tbl = &clp->cl_session->bc_slot_table; | ||
449 | |||
450 | spin_lock(&tbl->slot_tbl_lock); | ||
446 | /* state manager is resetting the session */ | 451 | /* state manager is resetting the session */ |
447 | if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) { | 452 | if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) { |
448 | status = NFS4ERR_DELAY; | 453 | spin_unlock(&tbl->slot_tbl_lock); |
454 | status = htonl(NFS4ERR_DELAY); | ||
455 | /* Return NFS4ERR_BADSESSION if we're draining the session | ||
456 | * in order to reset it. | ||
457 | */ | ||
458 | if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) | ||
459 | status = htonl(NFS4ERR_BADSESSION); | ||
449 | goto out; | 460 | goto out; |
450 | } | 461 | } |
451 | 462 | ||
452 | status = validate_seqid(&clp->cl_session->bc_slot_table, args); | 463 | status = validate_seqid(&clp->cl_session->bc_slot_table, args); |
464 | spin_unlock(&tbl->slot_tbl_lock); | ||
453 | if (status) | 465 | if (status) |
454 | goto out; | 466 | goto out; |
455 | 467 | ||
468 | cps->slotid = args->csa_slotid; | ||
469 | |||
456 | /* | 470 | /* |
457 | * Check for pending referring calls. If a match is found, a | 471 | * Check for pending referring calls. If a match is found, a |
458 | * related callback was received before the response to the original | 472 | * related callback was received before the response to the original |
@@ -469,7 +483,6 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, | |||
469 | res->csr_slotid = args->csa_slotid; | 483 | res->csr_slotid = args->csa_slotid; |
470 | res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; | 484 | res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; |
471 | res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; | 485 | res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; |
472 | nfs4_cb_take_slot(clp); | ||
473 | 486 | ||
474 | out: | 487 | out: |
475 | cps->clp = clp; /* put in nfs4_callback_compound */ | 488 | cps->clp = clp; /* put in nfs4_callback_compound */ |
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index c6c86a77e043..918ad647afea 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c | |||
@@ -754,26 +754,15 @@ static void nfs4_callback_free_slot(struct nfs4_session *session) | |||
754 | * Let the state manager know callback processing done. | 754 | * Let the state manager know callback processing done. |
755 | * A single slot, so highest used slotid is either 0 or -1 | 755 | * A single slot, so highest used slotid is either 0 or -1 |
756 | */ | 756 | */ |
757 | tbl->highest_used_slotid--; | 757 | tbl->highest_used_slotid = -1; |
758 | nfs4_check_drain_bc_complete(session); | 758 | nfs4_check_drain_bc_complete(session); |
759 | spin_unlock(&tbl->slot_tbl_lock); | 759 | spin_unlock(&tbl->slot_tbl_lock); |
760 | } | 760 | } |
761 | 761 | ||
762 | static void nfs4_cb_free_slot(struct nfs_client *clp) | 762 | static void nfs4_cb_free_slot(struct cb_process_state *cps) |
763 | { | 763 | { |
764 | if (clp && clp->cl_session) | 764 | if (cps->slotid != -1) |
765 | nfs4_callback_free_slot(clp->cl_session); | 765 | nfs4_callback_free_slot(cps->clp->cl_session); |
766 | } | ||
767 | |||
768 | /* A single slot, so highest used slotid is either 0 or -1 */ | ||
769 | void nfs4_cb_take_slot(struct nfs_client *clp) | ||
770 | { | ||
771 | struct nfs4_slot_table *tbl = &clp->cl_session->bc_slot_table; | ||
772 | |||
773 | spin_lock(&tbl->slot_tbl_lock); | ||
774 | tbl->highest_used_slotid++; | ||
775 | BUG_ON(tbl->highest_used_slotid != 0); | ||
776 | spin_unlock(&tbl->slot_tbl_lock); | ||
777 | } | 766 | } |
778 | 767 | ||
779 | #else /* CONFIG_NFS_V4_1 */ | 768 | #else /* CONFIG_NFS_V4_1 */ |
@@ -784,7 +773,7 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op) | |||
784 | return htonl(NFS4ERR_MINOR_VERS_MISMATCH); | 773 | return htonl(NFS4ERR_MINOR_VERS_MISMATCH); |
785 | } | 774 | } |
786 | 775 | ||
787 | static void nfs4_cb_free_slot(struct nfs_client *clp) | 776 | static void nfs4_cb_free_slot(struct cb_process_state *cps) |
788 | { | 777 | { |
789 | } | 778 | } |
790 | #endif /* CONFIG_NFS_V4_1 */ | 779 | #endif /* CONFIG_NFS_V4_1 */ |
@@ -866,6 +855,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r | |||
866 | struct cb_process_state cps = { | 855 | struct cb_process_state cps = { |
867 | .drc_status = 0, | 856 | .drc_status = 0, |
868 | .clp = NULL, | 857 | .clp = NULL, |
858 | .slotid = -1, | ||
869 | }; | 859 | }; |
870 | unsigned int nops = 0; | 860 | unsigned int nops = 0; |
871 | 861 | ||
@@ -906,7 +896,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r | |||
906 | 896 | ||
907 | *hdr_res.status = status; | 897 | *hdr_res.status = status; |
908 | *hdr_res.nops = htonl(nops); | 898 | *hdr_res.nops = htonl(nops); |
909 | nfs4_cb_free_slot(cps.clp); | 899 | nfs4_cb_free_slot(&cps); |
910 | nfs_put_client(cps.clp); | 900 | nfs_put_client(cps.clp); |
911 | dprintk("%s: done, status = %u\n", __func__, ntohl(status)); | 901 | dprintk("%s: done, status = %u\n", __func__, ntohl(status)); |
912 | return rpc_success; | 902 | return rpc_success; |
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index 9383ca7245bc..d0cda12fddc3 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c | |||
@@ -479,7 +479,6 @@ static int _io_check(struct objio_state *ios, bool is_write) | |||
479 | for (i = 0; i < ios->numdevs; i++) { | 479 | for (i = 0; i < ios->numdevs; i++) { |
480 | struct osd_sense_info osi; | 480 | struct osd_sense_info osi; |
481 | struct osd_request *or = ios->per_dev[i].or; | 481 | struct osd_request *or = ios->per_dev[i].or; |
482 | unsigned dev; | ||
483 | int ret; | 482 | int ret; |
484 | 483 | ||
485 | if (!or) | 484 | if (!or) |
@@ -500,9 +499,8 @@ static int _io_check(struct objio_state *ios, bool is_write) | |||
500 | 499 | ||
501 | continue; /* we recovered */ | 500 | continue; /* we recovered */ |
502 | } | 501 | } |
503 | dev = ios->per_dev[i].dev; | 502 | objlayout_io_set_result(&ios->ol_state, i, |
504 | objlayout_io_set_result(&ios->ol_state, dev, | 503 | &ios->layout->comps[i].oc_object_id, |
505 | &ios->layout->comps[dev].oc_object_id, | ||
506 | osd_pri_2_pnfs_err(osi.osd_err_pri), | 504 | osd_pri_2_pnfs_err(osi.osd_err_pri), |
507 | ios->per_dev[i].offset, | 505 | ios->per_dev[i].offset, |
508 | ios->per_dev[i].length, | 506 | ios->per_dev[i].length, |
@@ -589,22 +587,19 @@ static void _calc_stripe_info(struct objio_state *ios, u64 file_offset, | |||
589 | } | 587 | } |
590 | 588 | ||
591 | static int _add_stripe_unit(struct objio_state *ios, unsigned *cur_pg, | 589 | static int _add_stripe_unit(struct objio_state *ios, unsigned *cur_pg, |
592 | unsigned pgbase, struct _objio_per_comp *per_dev, int cur_len, | 590 | unsigned pgbase, struct _objio_per_comp *per_dev, int len, |
593 | gfp_t gfp_flags) | 591 | gfp_t gfp_flags) |
594 | { | 592 | { |
595 | unsigned pg = *cur_pg; | 593 | unsigned pg = *cur_pg; |
594 | int cur_len = len; | ||
596 | struct request_queue *q = | 595 | struct request_queue *q = |
597 | osd_request_queue(_io_od(ios, per_dev->dev)); | 596 | osd_request_queue(_io_od(ios, per_dev->dev)); |
598 | 597 | ||
599 | per_dev->length += cur_len; | ||
600 | |||
601 | if (per_dev->bio == NULL) { | 598 | if (per_dev->bio == NULL) { |
602 | unsigned stripes = ios->layout->num_comps / | 599 | unsigned pages_in_stripe = ios->layout->group_width * |
603 | ios->layout->mirrors_p1; | ||
604 | unsigned pages_in_stripe = stripes * | ||
605 | (ios->layout->stripe_unit / PAGE_SIZE); | 600 | (ios->layout->stripe_unit / PAGE_SIZE); |
606 | unsigned bio_size = (ios->ol_state.nr_pages + pages_in_stripe) / | 601 | unsigned bio_size = (ios->ol_state.nr_pages + pages_in_stripe) / |
607 | stripes; | 602 | ios->layout->group_width; |
608 | 603 | ||
609 | if (BIO_MAX_PAGES_KMALLOC < bio_size) | 604 | if (BIO_MAX_PAGES_KMALLOC < bio_size) |
610 | bio_size = BIO_MAX_PAGES_KMALLOC; | 605 | bio_size = BIO_MAX_PAGES_KMALLOC; |
@@ -632,6 +627,7 @@ static int _add_stripe_unit(struct objio_state *ios, unsigned *cur_pg, | |||
632 | } | 627 | } |
633 | BUG_ON(cur_len); | 628 | BUG_ON(cur_len); |
634 | 629 | ||
630 | per_dev->length += len; | ||
635 | *cur_pg = pg; | 631 | *cur_pg = pg; |
636 | return 0; | 632 | return 0; |
637 | } | 633 | } |
@@ -650,7 +646,7 @@ static int _prepare_one_group(struct objio_state *ios, u64 length, | |||
650 | int ret = 0; | 646 | int ret = 0; |
651 | 647 | ||
652 | while (length) { | 648 | while (length) { |
653 | struct _objio_per_comp *per_dev = &ios->per_dev[dev]; | 649 | struct _objio_per_comp *per_dev = &ios->per_dev[dev - first_dev]; |
654 | unsigned cur_len, page_off = 0; | 650 | unsigned cur_len, page_off = 0; |
655 | 651 | ||
656 | if (!per_dev->length) { | 652 | if (!per_dev->length) { |
@@ -670,8 +666,8 @@ static int _prepare_one_group(struct objio_state *ios, u64 length, | |||
670 | cur_len = stripe_unit; | 666 | cur_len = stripe_unit; |
671 | } | 667 | } |
672 | 668 | ||
673 | if (max_comp < dev) | 669 | if (max_comp < dev - first_dev) |
674 | max_comp = dev; | 670 | max_comp = dev - first_dev; |
675 | } else { | 671 | } else { |
676 | cur_len = stripe_unit; | 672 | cur_len = stripe_unit; |
677 | } | 673 | } |
@@ -806,7 +802,7 @@ static int _read_mirrors(struct objio_state *ios, unsigned cur_comp) | |||
806 | struct _objio_per_comp *per_dev = &ios->per_dev[cur_comp]; | 802 | struct _objio_per_comp *per_dev = &ios->per_dev[cur_comp]; |
807 | unsigned dev = per_dev->dev; | 803 | unsigned dev = per_dev->dev; |
808 | struct pnfs_osd_object_cred *cred = | 804 | struct pnfs_osd_object_cred *cred = |
809 | &ios->layout->comps[dev]; | 805 | &ios->layout->comps[cur_comp]; |
810 | struct osd_obj_id obj = { | 806 | struct osd_obj_id obj = { |
811 | .partition = cred->oc_object_id.oid_partition_id, | 807 | .partition = cred->oc_object_id.oid_partition_id, |
812 | .id = cred->oc_object_id.oid_object_id, | 808 | .id = cred->oc_object_id.oid_object_id, |
@@ -904,7 +900,7 @@ static int _write_mirrors(struct objio_state *ios, unsigned cur_comp) | |||
904 | for (; cur_comp < last_comp; ++cur_comp, ++dev) { | 900 | for (; cur_comp < last_comp; ++cur_comp, ++dev) { |
905 | struct osd_request *or = NULL; | 901 | struct osd_request *or = NULL; |
906 | struct pnfs_osd_object_cred *cred = | 902 | struct pnfs_osd_object_cred *cred = |
907 | &ios->layout->comps[dev]; | 903 | &ios->layout->comps[cur_comp]; |
908 | struct osd_obj_id obj = { | 904 | struct osd_obj_id obj = { |
909 | .partition = cred->oc_object_id.oid_partition_id, | 905 | .partition = cred->oc_object_id.oid_partition_id, |
910 | .id = cred->oc_object_id.oid_object_id, | 906 | .id = cred->oc_object_id.oid_object_id, |
diff --git a/fs/nfs/objlayout/pnfs_osd_xdr_cli.c b/fs/nfs/objlayout/pnfs_osd_xdr_cli.c index 16fc758e9123..b3918f7ac34d 100644 --- a/fs/nfs/objlayout/pnfs_osd_xdr_cli.c +++ b/fs/nfs/objlayout/pnfs_osd_xdr_cli.c | |||
@@ -170,6 +170,9 @@ int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout, | |||
170 | p = _osd_xdr_decode_data_map(p, &layout->olo_map); | 170 | p = _osd_xdr_decode_data_map(p, &layout->olo_map); |
171 | layout->olo_comps_index = be32_to_cpup(p++); | 171 | layout->olo_comps_index = be32_to_cpup(p++); |
172 | layout->olo_num_comps = be32_to_cpup(p++); | 172 | layout->olo_num_comps = be32_to_cpup(p++); |
173 | dprintk("%s: olo_comps_index=%d olo_num_comps=%d\n", __func__, | ||
174 | layout->olo_comps_index, layout->olo_num_comps); | ||
175 | |||
173 | iter->total_comps = layout->olo_num_comps; | 176 | iter->total_comps = layout->olo_num_comps; |
174 | return 0; | 177 | return 0; |
175 | } | 178 | } |
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 75bb316529dd..427a4e82a588 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile | |||
@@ -16,44 +16,53 @@ | |||
16 | # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 16 | # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
17 | # | 17 | # |
18 | 18 | ||
19 | ccflags-y := -I$(src) -I$(src)/linux-2.6 | 19 | ccflags-y += -I$(src) # needed for trace events |
20 | ccflags-$(CONFIG_XFS_DEBUG) += -g | ||
21 | 20 | ||
22 | XFS_LINUX := linux-2.6 | 21 | ccflags-$(CONFIG_XFS_DEBUG) += -g |
23 | 22 | ||
24 | obj-$(CONFIG_XFS_FS) += xfs.o | 23 | obj-$(CONFIG_XFS_FS) += xfs.o |
25 | 24 | ||
26 | xfs-y += linux-2.6/xfs_trace.o | 25 | # this one should be compiled first, as the tracing macros can easily blow up |
27 | 26 | xfs-y += xfs_trace.o | |
28 | xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \ | ||
29 | xfs_dquot.o \ | ||
30 | xfs_dquot_item.o \ | ||
31 | xfs_trans_dquot.o \ | ||
32 | xfs_qm_syscalls.o \ | ||
33 | xfs_qm_bhv.o \ | ||
34 | xfs_qm.o) | ||
35 | xfs-$(CONFIG_XFS_QUOTA) += linux-2.6/xfs_quotaops.o | ||
36 | |||
37 | ifeq ($(CONFIG_XFS_QUOTA),y) | ||
38 | xfs-$(CONFIG_PROC_FS) += quota/xfs_qm_stats.o | ||
39 | endif | ||
40 | |||
41 | xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o | ||
42 | xfs-$(CONFIG_XFS_POSIX_ACL) += $(XFS_LINUX)/xfs_acl.o | ||
43 | xfs-$(CONFIG_PROC_FS) += $(XFS_LINUX)/xfs_stats.o | ||
44 | xfs-$(CONFIG_SYSCTL) += $(XFS_LINUX)/xfs_sysctl.o | ||
45 | xfs-$(CONFIG_COMPAT) += $(XFS_LINUX)/xfs_ioctl32.o | ||
46 | 27 | ||
28 | # highlevel code | ||
29 | xfs-y += xfs_aops.o \ | ||
30 | xfs_bit.o \ | ||
31 | xfs_buf.o \ | ||
32 | xfs_dfrag.o \ | ||
33 | xfs_discard.o \ | ||
34 | xfs_error.o \ | ||
35 | xfs_export.o \ | ||
36 | xfs_file.o \ | ||
37 | xfs_filestream.o \ | ||
38 | xfs_fsops.o \ | ||
39 | xfs_fs_subr.o \ | ||
40 | xfs_globals.o \ | ||
41 | xfs_iget.o \ | ||
42 | xfs_ioctl.o \ | ||
43 | xfs_iomap.o \ | ||
44 | xfs_iops.o \ | ||
45 | xfs_itable.o \ | ||
46 | xfs_message.o \ | ||
47 | xfs_mru_cache.o \ | ||
48 | xfs_super.o \ | ||
49 | xfs_sync.o \ | ||
50 | xfs_xattr.o \ | ||
51 | xfs_rename.o \ | ||
52 | xfs_rw.o \ | ||
53 | xfs_utils.o \ | ||
54 | xfs_vnodeops.o \ | ||
55 | kmem.o \ | ||
56 | uuid.o | ||
47 | 57 | ||
58 | # code shared with libxfs | ||
48 | xfs-y += xfs_alloc.o \ | 59 | xfs-y += xfs_alloc.o \ |
49 | xfs_alloc_btree.o \ | 60 | xfs_alloc_btree.o \ |
50 | xfs_attr.o \ | 61 | xfs_attr.o \ |
51 | xfs_attr_leaf.o \ | 62 | xfs_attr_leaf.o \ |
52 | xfs_bit.o \ | ||
53 | xfs_bmap.o \ | 63 | xfs_bmap.o \ |
54 | xfs_bmap_btree.o \ | 64 | xfs_bmap_btree.o \ |
55 | xfs_btree.o \ | 65 | xfs_btree.o \ |
56 | xfs_buf_item.o \ | ||
57 | xfs_da_btree.o \ | 66 | xfs_da_btree.o \ |
58 | xfs_dir2.o \ | 67 | xfs_dir2.o \ |
59 | xfs_dir2_block.o \ | 68 | xfs_dir2_block.o \ |
@@ -61,49 +70,37 @@ xfs-y += xfs_alloc.o \ | |||
61 | xfs_dir2_leaf.o \ | 70 | xfs_dir2_leaf.o \ |
62 | xfs_dir2_node.o \ | 71 | xfs_dir2_node.o \ |
63 | xfs_dir2_sf.o \ | 72 | xfs_dir2_sf.o \ |
64 | xfs_error.o \ | ||
65 | xfs_extfree_item.o \ | ||
66 | xfs_filestream.o \ | ||
67 | xfs_fsops.o \ | ||
68 | xfs_ialloc.o \ | 73 | xfs_ialloc.o \ |
69 | xfs_ialloc_btree.o \ | 74 | xfs_ialloc_btree.o \ |
70 | xfs_iget.o \ | ||
71 | xfs_inode.o \ | 75 | xfs_inode.o \ |
72 | xfs_inode_item.o \ | ||
73 | xfs_iomap.o \ | ||
74 | xfs_itable.o \ | ||
75 | xfs_dfrag.o \ | ||
76 | xfs_log.o \ | ||
77 | xfs_log_cil.o \ | ||
78 | xfs_log_recover.o \ | 76 | xfs_log_recover.o \ |
79 | xfs_mount.o \ | 77 | xfs_mount.o \ |
80 | xfs_mru_cache.o \ | 78 | xfs_trans.o |
81 | xfs_rename.o \ | 79 | |
82 | xfs_trans.o \ | 80 | # low-level transaction/log code |
81 | xfs-y += xfs_log.o \ | ||
82 | xfs_log_cil.o \ | ||
83 | xfs_buf_item.o \ | ||
84 | xfs_extfree_item.o \ | ||
85 | xfs_inode_item.o \ | ||
83 | xfs_trans_ail.o \ | 86 | xfs_trans_ail.o \ |
84 | xfs_trans_buf.o \ | 87 | xfs_trans_buf.o \ |
85 | xfs_trans_extfree.o \ | 88 | xfs_trans_extfree.o \ |
86 | xfs_trans_inode.o \ | 89 | xfs_trans_inode.o \ |
87 | xfs_utils.o \ | ||
88 | xfs_vnodeops.o \ | ||
89 | xfs_rw.o | ||
90 | |||
91 | # Objects in linux/ | ||
92 | xfs-y += $(addprefix $(XFS_LINUX)/, \ | ||
93 | kmem.o \ | ||
94 | xfs_aops.o \ | ||
95 | xfs_buf.o \ | ||
96 | xfs_discard.o \ | ||
97 | xfs_export.o \ | ||
98 | xfs_file.o \ | ||
99 | xfs_fs_subr.o \ | ||
100 | xfs_globals.o \ | ||
101 | xfs_ioctl.o \ | ||
102 | xfs_iops.o \ | ||
103 | xfs_message.o \ | ||
104 | xfs_super.o \ | ||
105 | xfs_sync.o \ | ||
106 | xfs_xattr.o) | ||
107 | 90 | ||
108 | # Objects in support/ | 91 | # optional features |
109 | xfs-y += support/uuid.o | 92 | xfs-$(CONFIG_XFS_QUOTA) += xfs_dquot.o \ |
93 | xfs_dquot_item.o \ | ||
94 | xfs_trans_dquot.o \ | ||
95 | xfs_qm_syscalls.o \ | ||
96 | xfs_qm_bhv.o \ | ||
97 | xfs_qm.o \ | ||
98 | xfs_quotaops.o | ||
99 | ifeq ($(CONFIG_XFS_QUOTA),y) | ||
100 | xfs-$(CONFIG_PROC_FS) += xfs_qm_stats.o | ||
101 | endif | ||
102 | xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o | ||
103 | xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o | ||
104 | xfs-$(CONFIG_PROC_FS) += xfs_stats.o | ||
105 | xfs-$(CONFIG_SYSCTL) += xfs_sysctl.o | ||
106 | xfs-$(CONFIG_COMPAT) += xfs_ioctl32.o | ||
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/kmem.c index a907de565db3..a907de565db3 100644 --- a/fs/xfs/linux-2.6/kmem.c +++ b/fs/xfs/kmem.c | |||
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/kmem.h index f7c8f7a9ea6d..f7c8f7a9ea6d 100644 --- a/fs/xfs/linux-2.6/kmem.h +++ b/fs/xfs/kmem.h | |||
diff --git a/fs/xfs/linux-2.6/mrlock.h b/fs/xfs/mrlock.h index ff6a19873e5c..ff6a19873e5c 100644 --- a/fs/xfs/linux-2.6/mrlock.h +++ b/fs/xfs/mrlock.h | |||
diff --git a/fs/xfs/linux-2.6/time.h b/fs/xfs/time.h index 387e695a184c..387e695a184c 100644 --- a/fs/xfs/linux-2.6/time.h +++ b/fs/xfs/time.h | |||
diff --git a/fs/xfs/support/uuid.c b/fs/xfs/uuid.c index b83f76b6d410..b83f76b6d410 100644 --- a/fs/xfs/support/uuid.c +++ b/fs/xfs/uuid.c | |||
diff --git a/fs/xfs/support/uuid.h b/fs/xfs/uuid.h index 4732d71262cc..4732d71262cc 100644 --- a/fs/xfs/support/uuid.h +++ b/fs/xfs/uuid.h | |||
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h index 53ec3ea9a625..d8b11b7f94aa 100644 --- a/fs/xfs/xfs.h +++ b/fs/xfs/xfs.h | |||
@@ -24,5 +24,6 @@ | |||
24 | #define XFS_BUF_LOCK_TRACKING 1 | 24 | #define XFS_BUF_LOCK_TRACKING 1 |
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | #include <linux-2.6/xfs_linux.h> | 27 | #include "xfs_linux.h" |
28 | |||
28 | #endif /* __XFS_H__ */ | 29 | #endif /* __XFS_H__ */ |
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/xfs_acl.c index b6c4b3795c4a..b6c4b3795c4a 100644 --- a/fs/xfs/linux-2.6/xfs_acl.c +++ b/fs/xfs/xfs_acl.c | |||
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index 6530769a999b..4805f009f923 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h | |||
@@ -103,7 +103,7 @@ typedef struct xfs_agf { | |||
103 | /* disk block (xfs_daddr_t) in the AG */ | 103 | /* disk block (xfs_daddr_t) in the AG */ |
104 | #define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log)) | 104 | #define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log)) |
105 | #define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp)) | 105 | #define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp)) |
106 | #define XFS_BUF_TO_AGF(bp) ((xfs_agf_t *)XFS_BUF_PTR(bp)) | 106 | #define XFS_BUF_TO_AGF(bp) ((xfs_agf_t *)((bp)->b_addr)) |
107 | 107 | ||
108 | extern int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp, | 108 | extern int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp, |
109 | xfs_agnumber_t agno, int flags, struct xfs_buf **bpp); | 109 | xfs_agnumber_t agno, int flags, struct xfs_buf **bpp); |
@@ -156,7 +156,7 @@ typedef struct xfs_agi { | |||
156 | /* disk block (xfs_daddr_t) in the AG */ | 156 | /* disk block (xfs_daddr_t) in the AG */ |
157 | #define XFS_AGI_DADDR(mp) ((xfs_daddr_t)(2 << (mp)->m_sectbb_log)) | 157 | #define XFS_AGI_DADDR(mp) ((xfs_daddr_t)(2 << (mp)->m_sectbb_log)) |
158 | #define XFS_AGI_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGI_DADDR(mp)) | 158 | #define XFS_AGI_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGI_DADDR(mp)) |
159 | #define XFS_BUF_TO_AGI(bp) ((xfs_agi_t *)XFS_BUF_PTR(bp)) | 159 | #define XFS_BUF_TO_AGI(bp) ((xfs_agi_t *)((bp)->b_addr)) |
160 | 160 | ||
161 | extern int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp, | 161 | extern int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp, |
162 | xfs_agnumber_t agno, struct xfs_buf **bpp); | 162 | xfs_agnumber_t agno, struct xfs_buf **bpp); |
@@ -168,7 +168,7 @@ extern int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp, | |||
168 | #define XFS_AGFL_DADDR(mp) ((xfs_daddr_t)(3 << (mp)->m_sectbb_log)) | 168 | #define XFS_AGFL_DADDR(mp) ((xfs_daddr_t)(3 << (mp)->m_sectbb_log)) |
169 | #define XFS_AGFL_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGFL_DADDR(mp)) | 169 | #define XFS_AGFL_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGFL_DADDR(mp)) |
170 | #define XFS_AGFL_SIZE(mp) ((mp)->m_sb.sb_sectsize / sizeof(xfs_agblock_t)) | 170 | #define XFS_AGFL_SIZE(mp) ((mp)->m_sb.sb_sectsize / sizeof(xfs_agblock_t)) |
171 | #define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)XFS_BUF_PTR(bp)) | 171 | #define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)((bp)->b_addr)) |
172 | 172 | ||
173 | typedef struct xfs_agfl { | 173 | typedef struct xfs_agfl { |
174 | __be32 agfl_bno[1]; /* actually XFS_AGFL_SIZE(mp) */ | 174 | __be32 agfl_bno[1]; /* actually XFS_AGFL_SIZE(mp) */ |
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 1e00b3ef6274..bdd9cb54d63b 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c | |||
@@ -451,8 +451,7 @@ xfs_alloc_read_agfl( | |||
451 | XFS_FSS_TO_BB(mp, 1), 0, &bp); | 451 | XFS_FSS_TO_BB(mp, 1), 0, &bp); |
452 | if (error) | 452 | if (error) |
453 | return error; | 453 | return error; |
454 | ASSERT(bp); | 454 | ASSERT(!xfs_buf_geterror(bp)); |
455 | ASSERT(!XFS_BUF_GETERROR(bp)); | ||
456 | XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGFL, XFS_AGFL_REF); | 455 | XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGFL, XFS_AGFL_REF); |
457 | *bpp = bp; | 456 | *bpp = bp; |
458 | return 0; | 457 | return 0; |
@@ -2116,7 +2115,7 @@ xfs_read_agf( | |||
2116 | if (!*bpp) | 2115 | if (!*bpp) |
2117 | return 0; | 2116 | return 0; |
2118 | 2117 | ||
2119 | ASSERT(!XFS_BUF_GETERROR(*bpp)); | 2118 | ASSERT(!(*bpp)->b_error); |
2120 | agf = XFS_BUF_TO_AGF(*bpp); | 2119 | agf = XFS_BUF_TO_AGF(*bpp); |
2121 | 2120 | ||
2122 | /* | 2121 | /* |
@@ -2168,7 +2167,7 @@ xfs_alloc_read_agf( | |||
2168 | return error; | 2167 | return error; |
2169 | if (!*bpp) | 2168 | if (!*bpp) |
2170 | return 0; | 2169 | return 0; |
2171 | ASSERT(!XFS_BUF_GETERROR(*bpp)); | 2170 | ASSERT(!(*bpp)->b_error); |
2172 | 2171 | ||
2173 | agf = XFS_BUF_TO_AGF(*bpp); | 2172 | agf = XFS_BUF_TO_AGF(*bpp); |
2174 | pag = xfs_perag_get(mp, agno); | 2173 | pag = xfs_perag_get(mp, agno); |
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/xfs_aops.c index 63e971e2b837..63e971e2b837 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/xfs_aops.h index 71f721e1a71f..71f721e1a71f 100644 --- a/fs/xfs/linux-2.6/xfs_aops.h +++ b/fs/xfs/xfs_aops.h | |||
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index cbae424fe1ba..160bcdc34a6e 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c | |||
@@ -2121,8 +2121,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) | |||
2121 | 2121 | ||
2122 | bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, | 2122 | bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, |
2123 | XBF_LOCK | XBF_DONT_BLOCK); | 2123 | XBF_LOCK | XBF_DONT_BLOCK); |
2124 | ASSERT(bp); | 2124 | ASSERT(!xfs_buf_geterror(bp)); |
2125 | ASSERT(!XFS_BUF_GETERROR(bp)); | ||
2126 | 2125 | ||
2127 | tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen : | 2126 | tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen : |
2128 | XFS_BUF_SIZE(bp); | 2127 | XFS_BUF_SIZE(bp); |
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index ab3e5c6c4642..452a291383ab 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
@@ -3383,8 +3383,7 @@ xfs_bmap_local_to_extents( | |||
3383 | ASSERT(args.len == 1); | 3383 | ASSERT(args.len == 1); |
3384 | *firstblock = args.fsbno; | 3384 | *firstblock = args.fsbno; |
3385 | bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); | 3385 | bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); |
3386 | memcpy((char *)XFS_BUF_PTR(bp), ifp->if_u1.if_data, | 3386 | memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes); |
3387 | ifp->if_bytes); | ||
3388 | xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1); | 3387 | xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1); |
3389 | xfs_bmap_forkoff_reset(args.mp, ip, whichfork); | 3388 | xfs_bmap_forkoff_reset(args.mp, ip, whichfork); |
3390 | xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); | 3389 | xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); |
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index cabf4b5604aa..2b9fd385e27d 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c | |||
@@ -275,8 +275,7 @@ xfs_btree_dup_cursor( | |||
275 | return error; | 275 | return error; |
276 | } | 276 | } |
277 | new->bc_bufs[i] = bp; | 277 | new->bc_bufs[i] = bp; |
278 | ASSERT(bp); | 278 | ASSERT(!xfs_buf_geterror(bp)); |
279 | ASSERT(!XFS_BUF_GETERROR(bp)); | ||
280 | } else | 279 | } else |
281 | new->bc_bufs[i] = NULL; | 280 | new->bc_bufs[i] = NULL; |
282 | } | 281 | } |
@@ -467,8 +466,7 @@ xfs_btree_get_bufl( | |||
467 | ASSERT(fsbno != NULLFSBLOCK); | 466 | ASSERT(fsbno != NULLFSBLOCK); |
468 | d = XFS_FSB_TO_DADDR(mp, fsbno); | 467 | d = XFS_FSB_TO_DADDR(mp, fsbno); |
469 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock); | 468 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock); |
470 | ASSERT(bp); | 469 | ASSERT(!xfs_buf_geterror(bp)); |
471 | ASSERT(!XFS_BUF_GETERROR(bp)); | ||
472 | return bp; | 470 | return bp; |
473 | } | 471 | } |
474 | 472 | ||
@@ -491,8 +489,7 @@ xfs_btree_get_bufs( | |||
491 | ASSERT(agbno != NULLAGBLOCK); | 489 | ASSERT(agbno != NULLAGBLOCK); |
492 | d = XFS_AGB_TO_DADDR(mp, agno, agbno); | 490 | d = XFS_AGB_TO_DADDR(mp, agno, agbno); |
493 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock); | 491 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock); |
494 | ASSERT(bp); | 492 | ASSERT(!xfs_buf_geterror(bp)); |
495 | ASSERT(!XFS_BUF_GETERROR(bp)); | ||
496 | return bp; | 493 | return bp; |
497 | } | 494 | } |
498 | 495 | ||
@@ -632,7 +629,7 @@ xfs_btree_read_bufl( | |||
632 | mp->m_bsize, lock, &bp))) { | 629 | mp->m_bsize, lock, &bp))) { |
633 | return error; | 630 | return error; |
634 | } | 631 | } |
635 | ASSERT(!bp || !XFS_BUF_GETERROR(bp)); | 632 | ASSERT(!xfs_buf_geterror(bp)); |
636 | if (bp) | 633 | if (bp) |
637 | XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval); | 634 | XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval); |
638 | *bpp = bp; | 635 | *bpp = bp; |
@@ -973,8 +970,7 @@ xfs_btree_get_buf_block( | |||
973 | *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d, | 970 | *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d, |
974 | mp->m_bsize, flags); | 971 | mp->m_bsize, flags); |
975 | 972 | ||
976 | ASSERT(*bpp); | 973 | ASSERT(!xfs_buf_geterror(*bpp)); |
977 | ASSERT(!XFS_BUF_GETERROR(*bpp)); | ||
978 | 974 | ||
979 | *block = XFS_BUF_TO_BLOCK(*bpp); | 975 | *block = XFS_BUF_TO_BLOCK(*bpp); |
980 | return 0; | 976 | return 0; |
@@ -1006,8 +1002,7 @@ xfs_btree_read_buf_block( | |||
1006 | if (error) | 1002 | if (error) |
1007 | return error; | 1003 | return error; |
1008 | 1004 | ||
1009 | ASSERT(*bpp != NULL); | 1005 | ASSERT(!xfs_buf_geterror(*bpp)); |
1010 | ASSERT(!XFS_BUF_GETERROR(*bpp)); | ||
1011 | 1006 | ||
1012 | xfs_btree_set_refs(cur, *bpp); | 1007 | xfs_btree_set_refs(cur, *bpp); |
1013 | *block = XFS_BUF_TO_BLOCK(*bpp); | 1008 | *block = XFS_BUF_TO_BLOCK(*bpp); |
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 8d05a6a46ce3..5b240de104c0 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h | |||
@@ -262,7 +262,7 @@ typedef struct xfs_btree_cur | |||
262 | /* | 262 | /* |
263 | * Convert from buffer to btree block header. | 263 | * Convert from buffer to btree block header. |
264 | */ | 264 | */ |
265 | #define XFS_BUF_TO_BLOCK(bp) ((struct xfs_btree_block *)XFS_BUF_PTR(bp)) | 265 | #define XFS_BUF_TO_BLOCK(bp) ((struct xfs_btree_block *)((bp)->b_addr)) |
266 | 266 | ||
267 | 267 | ||
268 | /* | 268 | /* |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/xfs_buf.c index d1fe74506c4c..c57836dc778f 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -596,7 +596,7 @@ _xfs_buf_read( | |||
596 | bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); | 596 | bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); |
597 | 597 | ||
598 | status = xfs_buf_iorequest(bp); | 598 | status = xfs_buf_iorequest(bp); |
599 | if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC)) | 599 | if (status || bp->b_error || (flags & XBF_ASYNC)) |
600 | return status; | 600 | return status; |
601 | return xfs_buf_iowait(bp); | 601 | return xfs_buf_iowait(bp); |
602 | } | 602 | } |
@@ -679,7 +679,6 @@ xfs_buf_read_uncached( | |||
679 | /* set up the buffer for a read IO */ | 679 | /* set up the buffer for a read IO */ |
680 | XFS_BUF_SET_ADDR(bp, daddr); | 680 | XFS_BUF_SET_ADDR(bp, daddr); |
681 | XFS_BUF_READ(bp); | 681 | XFS_BUF_READ(bp); |
682 | XFS_BUF_BUSY(bp); | ||
683 | 682 | ||
684 | xfsbdstrat(mp, bp); | 683 | xfsbdstrat(mp, bp); |
685 | error = xfs_buf_iowait(bp); | 684 | error = xfs_buf_iowait(bp); |
@@ -1069,7 +1068,7 @@ xfs_bioerror( | |||
1069 | /* | 1068 | /* |
1070 | * No need to wait until the buffer is unpinned, we aren't flushing it. | 1069 | * No need to wait until the buffer is unpinned, we aren't flushing it. |
1071 | */ | 1070 | */ |
1072 | XFS_BUF_ERROR(bp, EIO); | 1071 | xfs_buf_ioerror(bp, EIO); |
1073 | 1072 | ||
1074 | /* | 1073 | /* |
1075 | * We're calling xfs_buf_ioend, so delete XBF_DONE flag. | 1074 | * We're calling xfs_buf_ioend, so delete XBF_DONE flag. |
@@ -1094,7 +1093,7 @@ STATIC int | |||
1094 | xfs_bioerror_relse( | 1093 | xfs_bioerror_relse( |
1095 | struct xfs_buf *bp) | 1094 | struct xfs_buf *bp) |
1096 | { | 1095 | { |
1097 | int64_t fl = XFS_BUF_BFLAGS(bp); | 1096 | int64_t fl = bp->b_flags; |
1098 | /* | 1097 | /* |
1099 | * No need to wait until the buffer is unpinned. | 1098 | * No need to wait until the buffer is unpinned. |
1100 | * We aren't flushing it. | 1099 | * We aren't flushing it. |
@@ -1115,7 +1114,7 @@ xfs_bioerror_relse( | |||
1115 | * There's no reason to mark error for | 1114 | * There's no reason to mark error for |
1116 | * ASYNC buffers. | 1115 | * ASYNC buffers. |
1117 | */ | 1116 | */ |
1118 | XFS_BUF_ERROR(bp, EIO); | 1117 | xfs_buf_ioerror(bp, EIO); |
1119 | XFS_BUF_FINISH_IOWAIT(bp); | 1118 | XFS_BUF_FINISH_IOWAIT(bp); |
1120 | } else { | 1119 | } else { |
1121 | xfs_buf_relse(bp); | 1120 | xfs_buf_relse(bp); |
@@ -1324,7 +1323,7 @@ xfs_buf_offset( | |||
1324 | struct page *page; | 1323 | struct page *page; |
1325 | 1324 | ||
1326 | if (bp->b_flags & XBF_MAPPED) | 1325 | if (bp->b_flags & XBF_MAPPED) |
1327 | return XFS_BUF_PTR(bp) + offset; | 1326 | return bp->b_addr + offset; |
1328 | 1327 | ||
1329 | offset += bp->b_offset; | 1328 | offset += bp->b_offset; |
1330 | page = bp->b_pages[offset >> PAGE_SHIFT]; | 1329 | page = bp->b_pages[offset >> PAGE_SHIFT]; |
@@ -1484,7 +1483,7 @@ xfs_setsize_buftarg_flags( | |||
1484 | if (set_blocksize(btp->bt_bdev, sectorsize)) { | 1483 | if (set_blocksize(btp->bt_bdev, sectorsize)) { |
1485 | xfs_warn(btp->bt_mount, | 1484 | xfs_warn(btp->bt_mount, |
1486 | "Cannot set_blocksize to %u on device %s\n", | 1485 | "Cannot set_blocksize to %u on device %s\n", |
1487 | sectorsize, XFS_BUFTARG_NAME(btp)); | 1486 | sectorsize, xfs_buf_target_name(btp)); |
1488 | return EINVAL; | 1487 | return EINVAL; |
1489 | } | 1488 | } |
1490 | 1489 | ||
@@ -1681,7 +1680,7 @@ xfs_buf_delwri_split( | |||
1681 | list_for_each_entry_safe(bp, n, dwq, b_list) { | 1680 | list_for_each_entry_safe(bp, n, dwq, b_list) { |
1682 | ASSERT(bp->b_flags & XBF_DELWRI); | 1681 | ASSERT(bp->b_flags & XBF_DELWRI); |
1683 | 1682 | ||
1684 | if (!XFS_BUF_ISPINNED(bp) && xfs_buf_trylock(bp)) { | 1683 | if (!xfs_buf_ispinned(bp) && xfs_buf_trylock(bp)) { |
1685 | if (!force && | 1684 | if (!force && |
1686 | time_before(jiffies, bp->b_queuetime + age)) { | 1685 | time_before(jiffies, bp->b_queuetime + age)) { |
1687 | xfs_buf_unlock(bp); | 1686 | xfs_buf_unlock(bp); |
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/xfs_buf.h index 6a83b46b4bcf..620972b8094d 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/xfs_buf.h | |||
@@ -228,11 +228,15 @@ extern void xfs_buf_delwri_promote(xfs_buf_t *); | |||
228 | extern int xfs_buf_init(void); | 228 | extern int xfs_buf_init(void); |
229 | extern void xfs_buf_terminate(void); | 229 | extern void xfs_buf_terminate(void); |
230 | 230 | ||
231 | #define xfs_buf_target_name(target) \ | 231 | static inline const char * |
232 | ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; }) | 232 | xfs_buf_target_name(struct xfs_buftarg *target) |
233 | { | ||
234 | static char __b[BDEVNAME_SIZE]; | ||
235 | |||
236 | return bdevname(target->bt_bdev, __b); | ||
237 | } | ||
233 | 238 | ||
234 | 239 | ||
235 | #define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) | ||
236 | #define XFS_BUF_ZEROFLAGS(bp) \ | 240 | #define XFS_BUF_ZEROFLAGS(bp) \ |
237 | ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI| \ | 241 | ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI| \ |
238 | XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) | 242 | XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) |
@@ -251,23 +255,14 @@ void xfs_buf_stale(struct xfs_buf *bp); | |||
251 | #define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp) | 255 | #define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp) |
252 | #define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI) | 256 | #define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI) |
253 | 257 | ||
254 | #define XFS_BUF_ERROR(bp,no) xfs_buf_ioerror(bp,no) | ||
255 | #define XFS_BUF_GETERROR(bp) xfs_buf_geterror(bp) | ||
256 | #define XFS_BUF_ISERROR(bp) (xfs_buf_geterror(bp) ? 1 : 0) | ||
257 | |||
258 | #define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE) | 258 | #define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE) |
259 | #define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE) | 259 | #define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE) |
260 | #define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE) | 260 | #define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE) |
261 | 261 | ||
262 | #define XFS_BUF_BUSY(bp) do { } while (0) | ||
263 | #define XFS_BUF_UNBUSY(bp) do { } while (0) | ||
264 | #define XFS_BUF_ISBUSY(bp) (1) | ||
265 | |||
266 | #define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC) | 262 | #define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC) |
267 | #define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC) | 263 | #define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC) |
268 | #define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC) | 264 | #define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC) |
269 | 265 | ||
270 | #define XFS_BUF_HOLD(bp) xfs_buf_hold(bp) | ||
271 | #define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ) | 266 | #define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ) |
272 | #define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ) | 267 | #define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ) |
273 | #define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ) | 268 | #define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ) |
@@ -276,10 +271,6 @@ void xfs_buf_stale(struct xfs_buf *bp); | |||
276 | #define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE) | 271 | #define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE) |
277 | #define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE) | 272 | #define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE) |
278 | 273 | ||
279 | #define XFS_BUF_SET_START(bp) do { } while (0) | ||
280 | |||
281 | #define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr) | ||
282 | #define XFS_BUF_SET_PTR(bp, val, cnt) xfs_buf_associate_memory(bp, val, cnt) | ||
283 | #define XFS_BUF_ADDR(bp) ((bp)->b_bn) | 274 | #define XFS_BUF_ADDR(bp) ((bp)->b_bn) |
284 | #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno)) | 275 | #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno)) |
285 | #define XFS_BUF_OFFSET(bp) ((bp)->b_file_offset) | 276 | #define XFS_BUF_OFFSET(bp) ((bp)->b_file_offset) |
@@ -299,14 +290,13 @@ xfs_buf_set_ref( | |||
299 | #define XFS_BUF_SET_VTYPE_REF(bp, type, ref) xfs_buf_set_ref(bp, ref) | 290 | #define XFS_BUF_SET_VTYPE_REF(bp, type, ref) xfs_buf_set_ref(bp, ref) |
300 | #define XFS_BUF_SET_VTYPE(bp, type) do { } while (0) | 291 | #define XFS_BUF_SET_VTYPE(bp, type) do { } while (0) |
301 | 292 | ||
302 | #define XFS_BUF_ISPINNED(bp) atomic_read(&((bp)->b_pin_count)) | 293 | static inline int xfs_buf_ispinned(struct xfs_buf *bp) |
294 | { | ||
295 | return atomic_read(&bp->b_pin_count); | ||
296 | } | ||
303 | 297 | ||
304 | #define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait); | 298 | #define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait); |
305 | 299 | ||
306 | #define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target)) | ||
307 | #define XFS_BUF_TARGET(bp) ((bp)->b_target) | ||
308 | #define XFS_BUFTARG_NAME(target) xfs_buf_target_name(target) | ||
309 | |||
310 | static inline void xfs_buf_relse(xfs_buf_t *bp) | 300 | static inline void xfs_buf_relse(xfs_buf_t *bp) |
311 | { | 301 | { |
312 | xfs_buf_unlock(bp); | 302 | xfs_buf_unlock(bp); |
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 88492916c3dc..cac2ecfa6746 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c | |||
@@ -124,9 +124,9 @@ xfs_buf_item_log_check( | |||
124 | 124 | ||
125 | bp = bip->bli_buf; | 125 | bp = bip->bli_buf; |
126 | ASSERT(XFS_BUF_COUNT(bp) > 0); | 126 | ASSERT(XFS_BUF_COUNT(bp) > 0); |
127 | ASSERT(XFS_BUF_PTR(bp) != NULL); | 127 | ASSERT(bp->b_addr != NULL); |
128 | orig = bip->bli_orig; | 128 | orig = bip->bli_orig; |
129 | buffer = XFS_BUF_PTR(bp); | 129 | buffer = bp->b_addr; |
130 | for (x = 0; x < XFS_BUF_COUNT(bp); x++) { | 130 | for (x = 0; x < XFS_BUF_COUNT(bp); x++) { |
131 | if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) { | 131 | if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) { |
132 | xfs_emerg(bp->b_mount, | 132 | xfs_emerg(bp->b_mount, |
@@ -371,7 +371,6 @@ xfs_buf_item_pin( | |||
371 | { | 371 | { |
372 | struct xfs_buf_log_item *bip = BUF_ITEM(lip); | 372 | struct xfs_buf_log_item *bip = BUF_ITEM(lip); |
373 | 373 | ||
374 | ASSERT(XFS_BUF_ISBUSY(bip->bli_buf)); | ||
375 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 374 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
376 | ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || | 375 | ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || |
377 | (bip->bli_flags & XFS_BLI_STALE)); | 376 | (bip->bli_flags & XFS_BLI_STALE)); |
@@ -479,13 +478,13 @@ xfs_buf_item_trylock( | |||
479 | struct xfs_buf_log_item *bip = BUF_ITEM(lip); | 478 | struct xfs_buf_log_item *bip = BUF_ITEM(lip); |
480 | struct xfs_buf *bp = bip->bli_buf; | 479 | struct xfs_buf *bp = bip->bli_buf; |
481 | 480 | ||
482 | if (XFS_BUF_ISPINNED(bp)) | 481 | if (xfs_buf_ispinned(bp)) |
483 | return XFS_ITEM_PINNED; | 482 | return XFS_ITEM_PINNED; |
484 | if (!xfs_buf_trylock(bp)) | 483 | if (!xfs_buf_trylock(bp)) |
485 | return XFS_ITEM_LOCKED; | 484 | return XFS_ITEM_LOCKED; |
486 | 485 | ||
487 | /* take a reference to the buffer. */ | 486 | /* take a reference to the buffer. */ |
488 | XFS_BUF_HOLD(bp); | 487 | xfs_buf_hold(bp); |
489 | 488 | ||
490 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); | 489 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); |
491 | trace_xfs_buf_item_trylock(bip); | 490 | trace_xfs_buf_item_trylock(bip); |
@@ -726,7 +725,7 @@ xfs_buf_item_init( | |||
726 | * to have logged. | 725 | * to have logged. |
727 | */ | 726 | */ |
728 | bip->bli_orig = (char *)kmem_alloc(XFS_BUF_COUNT(bp), KM_SLEEP); | 727 | bip->bli_orig = (char *)kmem_alloc(XFS_BUF_COUNT(bp), KM_SLEEP); |
729 | memcpy(bip->bli_orig, XFS_BUF_PTR(bp), XFS_BUF_COUNT(bp)); | 728 | memcpy(bip->bli_orig, bp->b_addr, XFS_BUF_COUNT(bp)); |
730 | bip->bli_logged = (char *)kmem_zalloc(XFS_BUF_COUNT(bp) / NBBY, KM_SLEEP); | 729 | bip->bli_logged = (char *)kmem_zalloc(XFS_BUF_COUNT(bp) / NBBY, KM_SLEEP); |
731 | #endif | 730 | #endif |
732 | 731 | ||
@@ -895,7 +894,6 @@ xfs_buf_attach_iodone( | |||
895 | { | 894 | { |
896 | xfs_log_item_t *head_lip; | 895 | xfs_log_item_t *head_lip; |
897 | 896 | ||
898 | ASSERT(XFS_BUF_ISBUSY(bp)); | ||
899 | ASSERT(xfs_buf_islocked(bp)); | 897 | ASSERT(xfs_buf_islocked(bp)); |
900 | 898 | ||
901 | lip->li_cb = cb; | 899 | lip->li_cb = cb; |
@@ -960,7 +958,7 @@ xfs_buf_iodone_callbacks( | |||
960 | static ulong lasttime; | 958 | static ulong lasttime; |
961 | static xfs_buftarg_t *lasttarg; | 959 | static xfs_buftarg_t *lasttarg; |
962 | 960 | ||
963 | if (likely(!XFS_BUF_GETERROR(bp))) | 961 | if (likely(!xfs_buf_geterror(bp))) |
964 | goto do_callbacks; | 962 | goto do_callbacks; |
965 | 963 | ||
966 | /* | 964 | /* |
@@ -973,14 +971,14 @@ xfs_buf_iodone_callbacks( | |||
973 | goto do_callbacks; | 971 | goto do_callbacks; |
974 | } | 972 | } |
975 | 973 | ||
976 | if (XFS_BUF_TARGET(bp) != lasttarg || | 974 | if (bp->b_target != lasttarg || |
977 | time_after(jiffies, (lasttime + 5*HZ))) { | 975 | time_after(jiffies, (lasttime + 5*HZ))) { |
978 | lasttime = jiffies; | 976 | lasttime = jiffies; |
979 | xfs_alert(mp, "Device %s: metadata write error block 0x%llx", | 977 | xfs_alert(mp, "Device %s: metadata write error block 0x%llx", |
980 | XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)), | 978 | xfs_buf_target_name(bp->b_target), |
981 | (__uint64_t)XFS_BUF_ADDR(bp)); | 979 | (__uint64_t)XFS_BUF_ADDR(bp)); |
982 | } | 980 | } |
983 | lasttarg = XFS_BUF_TARGET(bp); | 981 | lasttarg = bp->b_target; |
984 | 982 | ||
985 | /* | 983 | /* |
986 | * If the write was asynchronous then no one will be looking for the | 984 | * If the write was asynchronous then no one will be looking for the |
@@ -991,12 +989,11 @@ xfs_buf_iodone_callbacks( | |||
991 | * around. | 989 | * around. |
992 | */ | 990 | */ |
993 | if (XFS_BUF_ISASYNC(bp)) { | 991 | if (XFS_BUF_ISASYNC(bp)) { |
994 | XFS_BUF_ERROR(bp, 0); /* errno of 0 unsets the flag */ | 992 | xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ |
995 | 993 | ||
996 | if (!XFS_BUF_ISSTALE(bp)) { | 994 | if (!XFS_BUF_ISSTALE(bp)) { |
997 | XFS_BUF_DELAYWRITE(bp); | 995 | XFS_BUF_DELAYWRITE(bp); |
998 | XFS_BUF_DONE(bp); | 996 | XFS_BUF_DONE(bp); |
999 | XFS_BUF_SET_START(bp); | ||
1000 | } | 997 | } |
1001 | ASSERT(bp->b_iodone != NULL); | 998 | ASSERT(bp->b_iodone != NULL); |
1002 | trace_xfs_buf_item_iodone_async(bp, _RET_IP_); | 999 | trace_xfs_buf_item_iodone_async(bp, _RET_IP_); |
@@ -1013,7 +1010,6 @@ xfs_buf_iodone_callbacks( | |||
1013 | XFS_BUF_UNDELAYWRITE(bp); | 1010 | XFS_BUF_UNDELAYWRITE(bp); |
1014 | 1011 | ||
1015 | trace_xfs_buf_error_relse(bp, _RET_IP_); | 1012 | trace_xfs_buf_error_relse(bp, _RET_IP_); |
1016 | xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); | ||
1017 | 1013 | ||
1018 | do_callbacks: | 1014 | do_callbacks: |
1019 | xfs_buf_do_callbacks(bp); | 1015 | xfs_buf_do_callbacks(bp); |
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index 5bfcb8779f9f..ee9d5427fcd4 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c | |||
@@ -2050,7 +2050,7 @@ xfs_da_do_buf( | |||
2050 | case 0: | 2050 | case 0: |
2051 | bp = xfs_trans_get_buf(trans, mp->m_ddev_targp, | 2051 | bp = xfs_trans_get_buf(trans, mp->m_ddev_targp, |
2052 | mappedbno, nmapped, 0); | 2052 | mappedbno, nmapped, 0); |
2053 | error = bp ? XFS_BUF_GETERROR(bp) : XFS_ERROR(EIO); | 2053 | error = bp ? bp->b_error : XFS_ERROR(EIO); |
2054 | break; | 2054 | break; |
2055 | case 1: | 2055 | case 1: |
2056 | case 2: | 2056 | case 2: |
@@ -2268,7 +2268,7 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps) | |||
2268 | dabuf->nbuf = 1; | 2268 | dabuf->nbuf = 1; |
2269 | bp = bps[0]; | 2269 | bp = bps[0]; |
2270 | dabuf->bbcount = (short)BTOBB(XFS_BUF_COUNT(bp)); | 2270 | dabuf->bbcount = (short)BTOBB(XFS_BUF_COUNT(bp)); |
2271 | dabuf->data = XFS_BUF_PTR(bp); | 2271 | dabuf->data = bp->b_addr; |
2272 | dabuf->bps[0] = bp; | 2272 | dabuf->bps[0] = bp; |
2273 | } else { | 2273 | } else { |
2274 | dabuf->nbuf = nbuf; | 2274 | dabuf->nbuf = nbuf; |
@@ -2279,7 +2279,7 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps) | |||
2279 | dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP); | 2279 | dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP); |
2280 | for (i = off = 0; i < nbuf; i++, off += XFS_BUF_COUNT(bp)) { | 2280 | for (i = off = 0; i < nbuf; i++, off += XFS_BUF_COUNT(bp)) { |
2281 | bp = bps[i]; | 2281 | bp = bps[i]; |
2282 | memcpy((char *)dabuf->data + off, XFS_BUF_PTR(bp), | 2282 | memcpy((char *)dabuf->data + off, bp->b_addr, |
2283 | XFS_BUF_COUNT(bp)); | 2283 | XFS_BUF_COUNT(bp)); |
2284 | } | 2284 | } |
2285 | } | 2285 | } |
@@ -2302,8 +2302,8 @@ xfs_da_buf_clean(xfs_dabuf_t *dabuf) | |||
2302 | for (i = off = 0; i < dabuf->nbuf; | 2302 | for (i = off = 0; i < dabuf->nbuf; |
2303 | i++, off += XFS_BUF_COUNT(bp)) { | 2303 | i++, off += XFS_BUF_COUNT(bp)) { |
2304 | bp = dabuf->bps[i]; | 2304 | bp = dabuf->bps[i]; |
2305 | memcpy(XFS_BUF_PTR(bp), (char *)dabuf->data + off, | 2305 | memcpy(bp->b_addr, dabuf->data + off, |
2306 | XFS_BUF_COUNT(bp)); | 2306 | XFS_BUF_COUNT(bp)); |
2307 | } | 2307 | } |
2308 | } | 2308 | } |
2309 | } | 2309 | } |
@@ -2340,7 +2340,7 @@ xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last) | |||
2340 | 2340 | ||
2341 | ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]); | 2341 | ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]); |
2342 | if (dabuf->nbuf == 1) { | 2342 | if (dabuf->nbuf == 1) { |
2343 | ASSERT(dabuf->data == (void *)XFS_BUF_PTR(dabuf->bps[0])); | 2343 | ASSERT(dabuf->data == dabuf->bps[0]->b_addr); |
2344 | xfs_trans_log_buf(tp, dabuf->bps[0], first, last); | 2344 | xfs_trans_log_buf(tp, dabuf->bps[0], first, last); |
2345 | return; | 2345 | return; |
2346 | } | 2346 | } |
diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h index dffba9ba0db6..a3721633abc8 100644 --- a/fs/xfs/xfs_dinode.h +++ b/fs/xfs/xfs_dinode.h | |||
@@ -148,7 +148,7 @@ typedef enum xfs_dinode_fmt { | |||
148 | be32_to_cpu((dip)->di_nextents) : \ | 148 | be32_to_cpu((dip)->di_nextents) : \ |
149 | be16_to_cpu((dip)->di_anextents)) | 149 | be16_to_cpu((dip)->di_anextents)) |
150 | 150 | ||
151 | #define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)XFS_BUF_PTR(bp)) | 151 | #define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)((bp)->b_addr)) |
152 | 152 | ||
153 | /* | 153 | /* |
154 | * For block and character special files the 32bit dev_t is stored at the | 154 | * For block and character special files the 32bit dev_t is stored at the |
diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/xfs_discard.c index 244e797dae32..244e797dae32 100644 --- a/fs/xfs/linux-2.6/xfs_discard.c +++ b/fs/xfs/xfs_discard.c | |||
diff --git a/fs/xfs/linux-2.6/xfs_discard.h b/fs/xfs/xfs_discard.h index 344879aea646..344879aea646 100644 --- a/fs/xfs/linux-2.6/xfs_discard.h +++ b/fs/xfs/xfs_discard.h | |||
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 837f31158d43..db62959bed13 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c | |||
@@ -318,10 +318,9 @@ xfs_qm_init_dquot_blk( | |||
318 | int curid, i; | 318 | int curid, i; |
319 | 319 | ||
320 | ASSERT(tp); | 320 | ASSERT(tp); |
321 | ASSERT(XFS_BUF_ISBUSY(bp)); | ||
322 | ASSERT(xfs_buf_islocked(bp)); | 321 | ASSERT(xfs_buf_islocked(bp)); |
323 | 322 | ||
324 | d = (xfs_dqblk_t *)XFS_BUF_PTR(bp); | 323 | d = bp->b_addr; |
325 | 324 | ||
326 | /* | 325 | /* |
327 | * ID of the first dquot in the block - id's are zero based. | 326 | * ID of the first dquot in the block - id's are zero based. |
@@ -403,7 +402,7 @@ xfs_qm_dqalloc( | |||
403 | dqp->q_blkno, | 402 | dqp->q_blkno, |
404 | mp->m_quotainfo->qi_dqchunklen, | 403 | mp->m_quotainfo->qi_dqchunklen, |
405 | 0); | 404 | 0); |
406 | if (!bp || (error = XFS_BUF_GETERROR(bp))) | 405 | if (!bp || (error = xfs_buf_geterror(bp))) |
407 | goto error1; | 406 | goto error1; |
408 | /* | 407 | /* |
409 | * Make a chunk of dquots out of this buffer and log | 408 | * Make a chunk of dquots out of this buffer and log |
@@ -534,13 +533,12 @@ xfs_qm_dqtobp( | |||
534 | return XFS_ERROR(error); | 533 | return XFS_ERROR(error); |
535 | } | 534 | } |
536 | 535 | ||
537 | ASSERT(XFS_BUF_ISBUSY(bp)); | ||
538 | ASSERT(xfs_buf_islocked(bp)); | 536 | ASSERT(xfs_buf_islocked(bp)); |
539 | 537 | ||
540 | /* | 538 | /* |
541 | * calculate the location of the dquot inside the buffer. | 539 | * calculate the location of the dquot inside the buffer. |
542 | */ | 540 | */ |
543 | ddq = (struct xfs_disk_dquot *)(XFS_BUF_PTR(bp) + dqp->q_bufoffset); | 541 | ddq = bp->b_addr + dqp->q_bufoffset; |
544 | 542 | ||
545 | /* | 543 | /* |
546 | * A simple sanity check in case we got a corrupted dquot... | 544 | * A simple sanity check in case we got a corrupted dquot... |
@@ -553,7 +551,6 @@ xfs_qm_dqtobp( | |||
553 | xfs_trans_brelse(tp, bp); | 551 | xfs_trans_brelse(tp, bp); |
554 | return XFS_ERROR(EIO); | 552 | return XFS_ERROR(EIO); |
555 | } | 553 | } |
556 | XFS_BUF_BUSY(bp); /* We dirtied this */ | ||
557 | } | 554 | } |
558 | 555 | ||
559 | *O_bpp = bp; | 556 | *O_bpp = bp; |
@@ -622,7 +619,6 @@ xfs_qm_dqread( | |||
622 | * this particular dquot was repaired. We still aren't afraid to | 619 | * this particular dquot was repaired. We still aren't afraid to |
623 | * brelse it because we have the changes incore. | 620 | * brelse it because we have the changes incore. |
624 | */ | 621 | */ |
625 | ASSERT(XFS_BUF_ISBUSY(bp)); | ||
626 | ASSERT(xfs_buf_islocked(bp)); | 622 | ASSERT(xfs_buf_islocked(bp)); |
627 | xfs_trans_brelse(tp, bp); | 623 | xfs_trans_brelse(tp, bp); |
628 | 624 | ||
@@ -1204,7 +1200,7 @@ xfs_qm_dqflush( | |||
1204 | /* | 1200 | /* |
1205 | * Calculate the location of the dquot inside the buffer. | 1201 | * Calculate the location of the dquot inside the buffer. |
1206 | */ | 1202 | */ |
1207 | ddqp = (struct xfs_disk_dquot *)(XFS_BUF_PTR(bp) + dqp->q_bufoffset); | 1203 | ddqp = bp->b_addr + dqp->q_bufoffset; |
1208 | 1204 | ||
1209 | /* | 1205 | /* |
1210 | * A simple sanity check in case we got a corrupted dquot.. | 1206 | * A simple sanity check in case we got a corrupted dquot.. |
@@ -1240,7 +1236,7 @@ xfs_qm_dqflush( | |||
1240 | * If the buffer is pinned then push on the log so we won't | 1236 | * If the buffer is pinned then push on the log so we won't |
1241 | * get stuck waiting in the write for too long. | 1237 | * get stuck waiting in the write for too long. |
1242 | */ | 1238 | */ |
1243 | if (XFS_BUF_ISPINNED(bp)) { | 1239 | if (xfs_buf_ispinned(bp)) { |
1244 | trace_xfs_dqflush_force(dqp); | 1240 | trace_xfs_dqflush_force(dqp); |
1245 | xfs_log_force(mp, 0); | 1241 | xfs_log_force(mp, 0); |
1246 | } | 1242 | } |
@@ -1447,7 +1443,7 @@ xfs_qm_dqflock_pushbuf_wait( | |||
1447 | goto out_lock; | 1443 | goto out_lock; |
1448 | 1444 | ||
1449 | if (XFS_BUF_ISDELAYWRITE(bp)) { | 1445 | if (XFS_BUF_ISDELAYWRITE(bp)) { |
1450 | if (XFS_BUF_ISPINNED(bp)) | 1446 | if (xfs_buf_ispinned(bp)) |
1451 | xfs_log_force(mp, 0); | 1447 | xfs_log_force(mp, 0); |
1452 | xfs_buf_delwri_promote(bp); | 1448 | xfs_buf_delwri_promote(bp); |
1453 | wake_up_process(bp->b_target->bt_task); | 1449 | wake_up_process(bp->b_target->bt_task); |
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/xfs_dquot.h index 34b7e945dbfa..34b7e945dbfa 100644 --- a/fs/xfs/quota/xfs_dquot.h +++ b/fs/xfs/xfs_dquot.h | |||
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c index 9e0e2fa3f2c8..9e0e2fa3f2c8 100644 --- a/fs/xfs/quota/xfs_dquot_item.c +++ b/fs/xfs/xfs_dquot_item.c | |||
diff --git a/fs/xfs/quota/xfs_dquot_item.h b/fs/xfs/xfs_dquot_item.h index 5acae2ada70b..5acae2ada70b 100644 --- a/fs/xfs/quota/xfs_dquot_item.h +++ b/fs/xfs/xfs_dquot_item.h | |||
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/xfs_export.c index 75e5d322e48f..75e5d322e48f 100644 --- a/fs/xfs/linux-2.6/xfs_export.c +++ b/fs/xfs/xfs_export.c | |||
diff --git a/fs/xfs/linux-2.6/xfs_export.h b/fs/xfs/xfs_export.h index 3272b6ae7a35..3272b6ae7a35 100644 --- a/fs/xfs/linux-2.6/xfs_export.h +++ b/fs/xfs/xfs_export.h | |||
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/xfs_file.c index 7f7b42469ea7..7f7b42469ea7 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/xfs_fs_subr.c index ed88ed16811c..ed88ed16811c 100644 --- a/fs/xfs/linux-2.6/xfs_fs_subr.c +++ b/fs/xfs/xfs_fs_subr.c | |||
diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/xfs_globals.c index 76e81cff70b9..76e81cff70b9 100644 --- a/fs/xfs/linux-2.6/xfs_globals.c +++ b/fs/xfs/xfs_globals.c | |||
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index dd5628bd8d0b..9f24ec28283b 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c | |||
@@ -202,8 +202,7 @@ xfs_ialloc_inode_init( | |||
202 | fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, | 202 | fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, |
203 | mp->m_bsize * blks_per_cluster, | 203 | mp->m_bsize * blks_per_cluster, |
204 | XBF_LOCK); | 204 | XBF_LOCK); |
205 | ASSERT(fbuf); | 205 | ASSERT(!xfs_buf_geterror(fbuf)); |
206 | ASSERT(!XFS_BUF_GETERROR(fbuf)); | ||
207 | 206 | ||
208 | /* | 207 | /* |
209 | * Initialize all inodes in this buffer and then log them. | 208 | * Initialize all inodes in this buffer and then log them. |
@@ -1486,7 +1485,7 @@ xfs_read_agi( | |||
1486 | if (error) | 1485 | if (error) |
1487 | return error; | 1486 | return error; |
1488 | 1487 | ||
1489 | ASSERT(*bpp && !XFS_BUF_GETERROR(*bpp)); | 1488 | ASSERT(!xfs_buf_geterror(*bpp)); |
1490 | agi = XFS_BUF_TO_AGI(*bpp); | 1489 | agi = XFS_BUF_TO_AGI(*bpp); |
1491 | 1490 | ||
1492 | /* | 1491 | /* |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 2fcca4b03ed3..0239a7c7c886 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -2473,7 +2473,7 @@ cluster_corrupt_out: | |||
2473 | if (bp->b_iodone) { | 2473 | if (bp->b_iodone) { |
2474 | XFS_BUF_UNDONE(bp); | 2474 | XFS_BUF_UNDONE(bp); |
2475 | XFS_BUF_STALE(bp); | 2475 | XFS_BUF_STALE(bp); |
2476 | XFS_BUF_ERROR(bp,EIO); | 2476 | xfs_buf_ioerror(bp, EIO); |
2477 | xfs_buf_ioend(bp, 0); | 2477 | xfs_buf_ioend(bp, 0); |
2478 | } else { | 2478 | } else { |
2479 | XFS_BUF_STALE(bp); | 2479 | XFS_BUF_STALE(bp); |
@@ -2585,7 +2585,7 @@ xfs_iflush( | |||
2585 | * If the buffer is pinned then push on the log now so we won't | 2585 | * If the buffer is pinned then push on the log now so we won't |
2586 | * get stuck waiting in the write for too long. | 2586 | * get stuck waiting in the write for too long. |
2587 | */ | 2587 | */ |
2588 | if (XFS_BUF_ISPINNED(bp)) | 2588 | if (xfs_buf_ispinned(bp)) |
2589 | xfs_log_force(mp, 0); | 2589 | xfs_log_force(mp, 0); |
2590 | 2590 | ||
2591 | /* | 2591 | /* |
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index f7ce7debe14c..f7ce7debe14c 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c | |||
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.h b/fs/xfs/xfs_ioctl.h index d56173b34a2a..d56173b34a2a 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.h +++ b/fs/xfs/xfs_ioctl.h | |||
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c index 54e623bfbb85..54e623bfbb85 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/xfs_ioctl32.c | |||
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.h b/fs/xfs/xfs_ioctl32.h index 80f4060e8970..80f4060e8970 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.h +++ b/fs/xfs/xfs_ioctl32.h | |||
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/xfs_iops.c index b9c172b3fbbe..b9c172b3fbbe 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/xfs_iops.c | |||
diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/xfs_iops.h index ef41c92ce66e..ef41c92ce66e 100644 --- a/fs/xfs/linux-2.6/xfs_iops.h +++ b/fs/xfs/xfs_iops.h | |||
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/xfs_linux.h index d42f814e4d35..1e8a45e74c3e 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/xfs_linux.h | |||
@@ -32,13 +32,12 @@ | |||
32 | # define XFS_BIG_INUMS 0 | 32 | # define XFS_BIG_INUMS 0 |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | #include <xfs_types.h> | 35 | #include "xfs_types.h" |
36 | 36 | ||
37 | #include <kmem.h> | 37 | #include "kmem.h" |
38 | #include <mrlock.h> | 38 | #include "mrlock.h" |
39 | #include <time.h> | 39 | #include "time.h" |
40 | 40 | #include "uuid.h" | |
41 | #include <support/uuid.h> | ||
42 | 41 | ||
43 | #include <linux/semaphore.h> | 42 | #include <linux/semaphore.h> |
44 | #include <linux/mm.h> | 43 | #include <linux/mm.h> |
@@ -78,14 +77,14 @@ | |||
78 | #include <asm/byteorder.h> | 77 | #include <asm/byteorder.h> |
79 | #include <asm/unaligned.h> | 78 | #include <asm/unaligned.h> |
80 | 79 | ||
81 | #include <xfs_vnode.h> | 80 | #include "xfs_vnode.h" |
82 | #include <xfs_stats.h> | 81 | #include "xfs_stats.h" |
83 | #include <xfs_sysctl.h> | 82 | #include "xfs_sysctl.h" |
84 | #include <xfs_iops.h> | 83 | #include "xfs_iops.h" |
85 | #include <xfs_aops.h> | 84 | #include "xfs_aops.h" |
86 | #include <xfs_super.h> | 85 | #include "xfs_super.h" |
87 | #include <xfs_buf.h> | 86 | #include "xfs_buf.h" |
88 | #include <xfs_message.h> | 87 | #include "xfs_message.h" |
89 | 88 | ||
90 | #ifdef __BIG_ENDIAN | 89 | #ifdef __BIG_ENDIAN |
91 | #define XFS_NATIVE_HOST 1 | 90 | #define XFS_NATIVE_HOST 1 |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 06ff8437ed8e..3a8d4f66d702 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -878,7 +878,7 @@ xlog_iodone(xfs_buf_t *bp) | |||
878 | /* | 878 | /* |
879 | * Race to shutdown the filesystem if we see an error. | 879 | * Race to shutdown the filesystem if we see an error. |
880 | */ | 880 | */ |
881 | if (XFS_TEST_ERROR((XFS_BUF_GETERROR(bp)), l->l_mp, | 881 | if (XFS_TEST_ERROR((xfs_buf_geterror(bp)), l->l_mp, |
882 | XFS_ERRTAG_IODONE_IOERR, XFS_RANDOM_IODONE_IOERR)) { | 882 | XFS_ERRTAG_IODONE_IOERR, XFS_RANDOM_IODONE_IOERR)) { |
883 | xfs_ioerror_alert("xlog_iodone", l->l_mp, bp, XFS_BUF_ADDR(bp)); | 883 | xfs_ioerror_alert("xlog_iodone", l->l_mp, bp, XFS_BUF_ADDR(bp)); |
884 | XFS_BUF_STALE(bp); | 884 | XFS_BUF_STALE(bp); |
@@ -1051,7 +1051,6 @@ xlog_alloc_log(xfs_mount_t *mp, | |||
1051 | if (!bp) | 1051 | if (!bp) |
1052 | goto out_free_log; | 1052 | goto out_free_log; |
1053 | bp->b_iodone = xlog_iodone; | 1053 | bp->b_iodone = xlog_iodone; |
1054 | ASSERT(XFS_BUF_ISBUSY(bp)); | ||
1055 | ASSERT(xfs_buf_islocked(bp)); | 1054 | ASSERT(xfs_buf_islocked(bp)); |
1056 | log->l_xbuf = bp; | 1055 | log->l_xbuf = bp; |
1057 | 1056 | ||
@@ -1108,7 +1107,6 @@ xlog_alloc_log(xfs_mount_t *mp, | |||
1108 | iclog->ic_callback_tail = &(iclog->ic_callback); | 1107 | iclog->ic_callback_tail = &(iclog->ic_callback); |
1109 | iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; | 1108 | iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; |
1110 | 1109 | ||
1111 | ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp)); | ||
1112 | ASSERT(xfs_buf_islocked(iclog->ic_bp)); | 1110 | ASSERT(xfs_buf_islocked(iclog->ic_bp)); |
1113 | init_waitqueue_head(&iclog->ic_force_wait); | 1111 | init_waitqueue_head(&iclog->ic_force_wait); |
1114 | init_waitqueue_head(&iclog->ic_write_wait); | 1112 | init_waitqueue_head(&iclog->ic_write_wait); |
@@ -1248,7 +1246,7 @@ xlog_bdstrat( | |||
1248 | struct xlog_in_core *iclog = bp->b_fspriv; | 1246 | struct xlog_in_core *iclog = bp->b_fspriv; |
1249 | 1247 | ||
1250 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 1248 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
1251 | XFS_BUF_ERROR(bp, EIO); | 1249 | xfs_buf_ioerror(bp, EIO); |
1252 | XFS_BUF_STALE(bp); | 1250 | XFS_BUF_STALE(bp); |
1253 | xfs_buf_ioend(bp, 0); | 1251 | xfs_buf_ioend(bp, 0); |
1254 | /* | 1252 | /* |
@@ -1355,7 +1353,6 @@ xlog_sync(xlog_t *log, | |||
1355 | XFS_BUF_SET_COUNT(bp, count); | 1353 | XFS_BUF_SET_COUNT(bp, count); |
1356 | bp->b_fspriv = iclog; | 1354 | bp->b_fspriv = iclog; |
1357 | XFS_BUF_ZEROFLAGS(bp); | 1355 | XFS_BUF_ZEROFLAGS(bp); |
1358 | XFS_BUF_BUSY(bp); | ||
1359 | XFS_BUF_ASYNC(bp); | 1356 | XFS_BUF_ASYNC(bp); |
1360 | bp->b_flags |= XBF_SYNCIO; | 1357 | bp->b_flags |= XBF_SYNCIO; |
1361 | 1358 | ||
@@ -1398,16 +1395,15 @@ xlog_sync(xlog_t *log, | |||
1398 | if (split) { | 1395 | if (split) { |
1399 | bp = iclog->ic_log->l_xbuf; | 1396 | bp = iclog->ic_log->l_xbuf; |
1400 | XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */ | 1397 | XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */ |
1401 | XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ | 1398 | xfs_buf_associate_memory(bp, |
1402 | (__psint_t)count), split); | 1399 | (char *)&iclog->ic_header + count, split); |
1403 | bp->b_fspriv = iclog; | 1400 | bp->b_fspriv = iclog; |
1404 | XFS_BUF_ZEROFLAGS(bp); | 1401 | XFS_BUF_ZEROFLAGS(bp); |
1405 | XFS_BUF_BUSY(bp); | ||
1406 | XFS_BUF_ASYNC(bp); | 1402 | XFS_BUF_ASYNC(bp); |
1407 | bp->b_flags |= XBF_SYNCIO; | 1403 | bp->b_flags |= XBF_SYNCIO; |
1408 | if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) | 1404 | if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) |
1409 | bp->b_flags |= XBF_FUA; | 1405 | bp->b_flags |= XBF_FUA; |
1410 | dptr = XFS_BUF_PTR(bp); | 1406 | dptr = bp->b_addr; |
1411 | /* | 1407 | /* |
1412 | * Bump the cycle numbers at the start of each block | 1408 | * Bump the cycle numbers at the start of each block |
1413 | * since this part of the buffer is at the start of | 1409 | * since this part of the buffer is at the start of |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 052a2c0ec5fb..a199dbcee7d8 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -147,7 +147,7 @@ xlog_align( | |||
147 | xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1); | 147 | xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1); |
148 | 148 | ||
149 | ASSERT(BBTOB(offset + nbblks) <= XFS_BUF_SIZE(bp)); | 149 | ASSERT(BBTOB(offset + nbblks) <= XFS_BUF_SIZE(bp)); |
150 | return XFS_BUF_PTR(bp) + BBTOB(offset); | 150 | return bp->b_addr + BBTOB(offset); |
151 | } | 151 | } |
152 | 152 | ||
153 | 153 | ||
@@ -178,9 +178,7 @@ xlog_bread_noalign( | |||
178 | 178 | ||
179 | XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); | 179 | XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); |
180 | XFS_BUF_READ(bp); | 180 | XFS_BUF_READ(bp); |
181 | XFS_BUF_BUSY(bp); | ||
182 | XFS_BUF_SET_COUNT(bp, BBTOB(nbblks)); | 181 | XFS_BUF_SET_COUNT(bp, BBTOB(nbblks)); |
183 | XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); | ||
184 | 182 | ||
185 | xfsbdstrat(log->l_mp, bp); | 183 | xfsbdstrat(log->l_mp, bp); |
186 | error = xfs_buf_iowait(bp); | 184 | error = xfs_buf_iowait(bp); |
@@ -220,18 +218,18 @@ xlog_bread_offset( | |||
220 | xfs_buf_t *bp, | 218 | xfs_buf_t *bp, |
221 | xfs_caddr_t offset) | 219 | xfs_caddr_t offset) |
222 | { | 220 | { |
223 | xfs_caddr_t orig_offset = XFS_BUF_PTR(bp); | 221 | xfs_caddr_t orig_offset = bp->b_addr; |
224 | int orig_len = bp->b_buffer_length; | 222 | int orig_len = bp->b_buffer_length; |
225 | int error, error2; | 223 | int error, error2; |
226 | 224 | ||
227 | error = XFS_BUF_SET_PTR(bp, offset, BBTOB(nbblks)); | 225 | error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks)); |
228 | if (error) | 226 | if (error) |
229 | return error; | 227 | return error; |
230 | 228 | ||
231 | error = xlog_bread_noalign(log, blk_no, nbblks, bp); | 229 | error = xlog_bread_noalign(log, blk_no, nbblks, bp); |
232 | 230 | ||
233 | /* must reset buffer pointer even on error */ | 231 | /* must reset buffer pointer even on error */ |
234 | error2 = XFS_BUF_SET_PTR(bp, orig_offset, orig_len); | 232 | error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len); |
235 | if (error) | 233 | if (error) |
236 | return error; | 234 | return error; |
237 | return error2; | 235 | return error2; |
@@ -266,11 +264,9 @@ xlog_bwrite( | |||
266 | 264 | ||
267 | XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); | 265 | XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); |
268 | XFS_BUF_ZEROFLAGS(bp); | 266 | XFS_BUF_ZEROFLAGS(bp); |
269 | XFS_BUF_BUSY(bp); | 267 | xfs_buf_hold(bp); |
270 | XFS_BUF_HOLD(bp); | ||
271 | xfs_buf_lock(bp); | 268 | xfs_buf_lock(bp); |
272 | XFS_BUF_SET_COUNT(bp, BBTOB(nbblks)); | 269 | XFS_BUF_SET_COUNT(bp, BBTOB(nbblks)); |
273 | XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); | ||
274 | 270 | ||
275 | if ((error = xfs_bwrite(log->l_mp, bp))) | 271 | if ((error = xfs_bwrite(log->l_mp, bp))) |
276 | xfs_ioerror_alert("xlog_bwrite", log->l_mp, | 272 | xfs_ioerror_alert("xlog_bwrite", log->l_mp, |
@@ -360,7 +356,7 @@ STATIC void | |||
360 | xlog_recover_iodone( | 356 | xlog_recover_iodone( |
361 | struct xfs_buf *bp) | 357 | struct xfs_buf *bp) |
362 | { | 358 | { |
363 | if (XFS_BUF_GETERROR(bp)) { | 359 | if (bp->b_error) { |
364 | /* | 360 | /* |
365 | * We're not going to bother about retrying | 361 | * We're not going to bother about retrying |
366 | * this during recovery. One strike! | 362 | * this during recovery. One strike! |
@@ -1262,7 +1258,7 @@ xlog_write_log_records( | |||
1262 | */ | 1258 | */ |
1263 | ealign = round_down(end_block, sectbb); | 1259 | ealign = round_down(end_block, sectbb); |
1264 | if (j == 0 && (start_block + endcount > ealign)) { | 1260 | if (j == 0 && (start_block + endcount > ealign)) { |
1265 | offset = XFS_BUF_PTR(bp) + BBTOB(ealign - start_block); | 1261 | offset = bp->b_addr + BBTOB(ealign - start_block); |
1266 | error = xlog_bread_offset(log, ealign, sectbb, | 1262 | error = xlog_bread_offset(log, ealign, sectbb, |
1267 | bp, offset); | 1263 | bp, offset); |
1268 | if (error) | 1264 | if (error) |
@@ -2135,15 +2131,16 @@ xlog_recover_buffer_pass2( | |||
2135 | 2131 | ||
2136 | bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, | 2132 | bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, |
2137 | buf_flags); | 2133 | buf_flags); |
2138 | if (XFS_BUF_ISERROR(bp)) { | 2134 | if (!bp) |
2135 | return XFS_ERROR(ENOMEM); | ||
2136 | error = bp->b_error; | ||
2137 | if (error) { | ||
2139 | xfs_ioerror_alert("xlog_recover_do..(read#1)", mp, | 2138 | xfs_ioerror_alert("xlog_recover_do..(read#1)", mp, |
2140 | bp, buf_f->blf_blkno); | 2139 | bp, buf_f->blf_blkno); |
2141 | error = XFS_BUF_GETERROR(bp); | ||
2142 | xfs_buf_relse(bp); | 2140 | xfs_buf_relse(bp); |
2143 | return error; | 2141 | return error; |
2144 | } | 2142 | } |
2145 | 2143 | ||
2146 | error = 0; | ||
2147 | if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { | 2144 | if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { |
2148 | error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); | 2145 | error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); |
2149 | } else if (buf_f->blf_flags & | 2146 | } else if (buf_f->blf_flags & |
@@ -2227,14 +2224,17 @@ xlog_recover_inode_pass2( | |||
2227 | 2224 | ||
2228 | bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, | 2225 | bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, |
2229 | XBF_LOCK); | 2226 | XBF_LOCK); |
2230 | if (XFS_BUF_ISERROR(bp)) { | 2227 | if (!bp) { |
2228 | error = ENOMEM; | ||
2229 | goto error; | ||
2230 | } | ||
2231 | error = bp->b_error; | ||
2232 | if (error) { | ||
2231 | xfs_ioerror_alert("xlog_recover_do..(read#2)", mp, | 2233 | xfs_ioerror_alert("xlog_recover_do..(read#2)", mp, |
2232 | bp, in_f->ilf_blkno); | 2234 | bp, in_f->ilf_blkno); |
2233 | error = XFS_BUF_GETERROR(bp); | ||
2234 | xfs_buf_relse(bp); | 2235 | xfs_buf_relse(bp); |
2235 | goto error; | 2236 | goto error; |
2236 | } | 2237 | } |
2237 | error = 0; | ||
2238 | ASSERT(in_f->ilf_fields & XFS_ILOG_CORE); | 2238 | ASSERT(in_f->ilf_fields & XFS_ILOG_CORE); |
2239 | dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset); | 2239 | dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset); |
2240 | 2240 | ||
@@ -3437,7 +3437,7 @@ xlog_do_recovery_pass( | |||
3437 | /* | 3437 | /* |
3438 | * Check for header wrapping around physical end-of-log | 3438 | * Check for header wrapping around physical end-of-log |
3439 | */ | 3439 | */ |
3440 | offset = XFS_BUF_PTR(hbp); | 3440 | offset = hbp->b_addr; |
3441 | split_hblks = 0; | 3441 | split_hblks = 0; |
3442 | wrapped_hblks = 0; | 3442 | wrapped_hblks = 0; |
3443 | if (blk_no + hblks <= log->l_logBBsize) { | 3443 | if (blk_no + hblks <= log->l_logBBsize) { |
@@ -3497,7 +3497,7 @@ xlog_do_recovery_pass( | |||
3497 | } else { | 3497 | } else { |
3498 | /* This log record is split across the | 3498 | /* This log record is split across the |
3499 | * physical end of log */ | 3499 | * physical end of log */ |
3500 | offset = XFS_BUF_PTR(dbp); | 3500 | offset = dbp->b_addr; |
3501 | split_bblks = 0; | 3501 | split_bblks = 0; |
3502 | if (blk_no != log->l_logBBsize) { | 3502 | if (blk_no != log->l_logBBsize) { |
3503 | /* some data is before the physical | 3503 | /* some data is before the physical |
diff --git a/fs/xfs/linux-2.6/xfs_message.c b/fs/xfs/xfs_message.c index bd672def95ac..bd672def95ac 100644 --- a/fs/xfs/linux-2.6/xfs_message.c +++ b/fs/xfs/xfs_message.c | |||
diff --git a/fs/xfs/linux-2.6/xfs_message.h b/fs/xfs/xfs_message.h index 7fb7ea007672..7fb7ea007672 100644 --- a/fs/xfs/linux-2.6/xfs_message.h +++ b/fs/xfs/xfs_message.h | |||
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 092e16ae4d9d..0081657ad985 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -1615,7 +1615,7 @@ xfs_unmountfs_writesb(xfs_mount_t *mp) | |||
1615 | XFS_BUF_UNDELAYWRITE(sbp); | 1615 | XFS_BUF_UNDELAYWRITE(sbp); |
1616 | XFS_BUF_WRITE(sbp); | 1616 | XFS_BUF_WRITE(sbp); |
1617 | XFS_BUF_UNASYNC(sbp); | 1617 | XFS_BUF_UNASYNC(sbp); |
1618 | ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp); | 1618 | ASSERT(sbp->b_target == mp->m_ddev_targp); |
1619 | xfsbdstrat(mp, sbp); | 1619 | xfsbdstrat(mp, sbp); |
1620 | error = xfs_buf_iowait(sbp); | 1620 | error = xfs_buf_iowait(sbp); |
1621 | if (error) | 1621 | if (error) |
@@ -1938,7 +1938,7 @@ xfs_getsb( | |||
1938 | xfs_buf_lock(bp); | 1938 | xfs_buf_lock(bp); |
1939 | } | 1939 | } |
1940 | 1940 | ||
1941 | XFS_BUF_HOLD(bp); | 1941 | xfs_buf_hold(bp); |
1942 | ASSERT(XFS_BUF_ISDONE(bp)); | 1942 | ASSERT(XFS_BUF_ISDONE(bp)); |
1943 | return bp; | 1943 | return bp; |
1944 | } | 1944 | } |
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/xfs_qm.c index 46e54ad9a2dc..9a0aa76facdf 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/xfs_qm.c | |||
@@ -1240,7 +1240,7 @@ xfs_qm_reset_dqcounts( | |||
1240 | do_div(j, sizeof(xfs_dqblk_t)); | 1240 | do_div(j, sizeof(xfs_dqblk_t)); |
1241 | ASSERT(mp->m_quotainfo->qi_dqperchunk == j); | 1241 | ASSERT(mp->m_quotainfo->qi_dqperchunk == j); |
1242 | #endif | 1242 | #endif |
1243 | ddq = (xfs_disk_dquot_t *)XFS_BUF_PTR(bp); | 1243 | ddq = bp->b_addr; |
1244 | for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { | 1244 | for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { |
1245 | /* | 1245 | /* |
1246 | * Do a sanity check, and if needed, repair the dqblk. Don't | 1246 | * Do a sanity check, and if needed, repair the dqblk. Don't |
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/xfs_qm.h index 43b9abe1052c..43b9abe1052c 100644 --- a/fs/xfs/quota/xfs_qm.h +++ b/fs/xfs/xfs_qm.h | |||
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c index a0a829addca9..a0a829addca9 100644 --- a/fs/xfs/quota/xfs_qm_bhv.c +++ b/fs/xfs/xfs_qm_bhv.c | |||
diff --git a/fs/xfs/quota/xfs_qm_stats.c b/fs/xfs/xfs_qm_stats.c index 8671a0b32644..8671a0b32644 100644 --- a/fs/xfs/quota/xfs_qm_stats.c +++ b/fs/xfs/xfs_qm_stats.c | |||
diff --git a/fs/xfs/quota/xfs_qm_stats.h b/fs/xfs/xfs_qm_stats.h index 5b964fc0dc09..5b964fc0dc09 100644 --- a/fs/xfs/quota/xfs_qm_stats.h +++ b/fs/xfs/xfs_qm_stats.h | |||
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c index 609246f42e6c..609246f42e6c 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/xfs_qm_syscalls.c | |||
diff --git a/fs/xfs/quota/xfs_quota_priv.h b/fs/xfs/xfs_quota_priv.h index 94a3d927d716..94a3d927d716 100644 --- a/fs/xfs/quota/xfs_quota_priv.h +++ b/fs/xfs/xfs_quota_priv.h | |||
diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c index 29b9d642e93d..7e76f537abb7 100644 --- a/fs/xfs/linux-2.6/xfs_quotaops.c +++ b/fs/xfs/xfs_quotaops.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include "xfs_trans.h" | 25 | #include "xfs_trans.h" |
26 | #include "xfs_bmap_btree.h" | 26 | #include "xfs_bmap_btree.h" |
27 | #include "xfs_inode.h" | 27 | #include "xfs_inode.h" |
28 | #include "quota/xfs_qm.h" | 28 | #include "xfs_qm.h" |
29 | #include <linux/quota.h> | 29 | #include <linux/quota.h> |
30 | 30 | ||
31 | 31 | ||
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 8f76fdff4f46..35561a511b57 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c | |||
@@ -168,7 +168,7 @@ error_cancel: | |||
168 | xfs_trans_cancel(tp, cancelflags); | 168 | xfs_trans_cancel(tp, cancelflags); |
169 | goto error; | 169 | goto error; |
170 | } | 170 | } |
171 | memset(XFS_BUF_PTR(bp), 0, mp->m_sb.sb_blocksize); | 171 | memset(bp->b_addr, 0, mp->m_sb.sb_blocksize); |
172 | xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1); | 172 | xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1); |
173 | /* | 173 | /* |
174 | * Commit the transaction. | 174 | * Commit the transaction. |
@@ -883,7 +883,7 @@ xfs_rtbuf_get( | |||
883 | if (error) { | 883 | if (error) { |
884 | return error; | 884 | return error; |
885 | } | 885 | } |
886 | ASSERT(bp && !XFS_BUF_GETERROR(bp)); | 886 | ASSERT(!xfs_buf_geterror(bp)); |
887 | *bpp = bp; | 887 | *bpp = bp; |
888 | return 0; | 888 | return 0; |
889 | } | 889 | } |
@@ -943,7 +943,7 @@ xfs_rtcheck_range( | |||
943 | if (error) { | 943 | if (error) { |
944 | return error; | 944 | return error; |
945 | } | 945 | } |
946 | bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); | 946 | bufp = bp->b_addr; |
947 | /* | 947 | /* |
948 | * Compute the starting word's address, and starting bit. | 948 | * Compute the starting word's address, and starting bit. |
949 | */ | 949 | */ |
@@ -994,7 +994,7 @@ xfs_rtcheck_range( | |||
994 | if (error) { | 994 | if (error) { |
995 | return error; | 995 | return error; |
996 | } | 996 | } |
997 | b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); | 997 | b = bufp = bp->b_addr; |
998 | word = 0; | 998 | word = 0; |
999 | } else { | 999 | } else { |
1000 | /* | 1000 | /* |
@@ -1040,7 +1040,7 @@ xfs_rtcheck_range( | |||
1040 | if (error) { | 1040 | if (error) { |
1041 | return error; | 1041 | return error; |
1042 | } | 1042 | } |
1043 | b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); | 1043 | b = bufp = bp->b_addr; |
1044 | word = 0; | 1044 | word = 0; |
1045 | } else { | 1045 | } else { |
1046 | /* | 1046 | /* |
@@ -1158,7 +1158,7 @@ xfs_rtfind_back( | |||
1158 | if (error) { | 1158 | if (error) { |
1159 | return error; | 1159 | return error; |
1160 | } | 1160 | } |
1161 | bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); | 1161 | bufp = bp->b_addr; |
1162 | /* | 1162 | /* |
1163 | * Get the first word's index & point to it. | 1163 | * Get the first word's index & point to it. |
1164 | */ | 1164 | */ |
@@ -1210,7 +1210,7 @@ xfs_rtfind_back( | |||
1210 | if (error) { | 1210 | if (error) { |
1211 | return error; | 1211 | return error; |
1212 | } | 1212 | } |
1213 | bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); | 1213 | bufp = bp->b_addr; |
1214 | word = XFS_BLOCKWMASK(mp); | 1214 | word = XFS_BLOCKWMASK(mp); |
1215 | b = &bufp[word]; | 1215 | b = &bufp[word]; |
1216 | } else { | 1216 | } else { |
@@ -1256,7 +1256,7 @@ xfs_rtfind_back( | |||
1256 | if (error) { | 1256 | if (error) { |
1257 | return error; | 1257 | return error; |
1258 | } | 1258 | } |
1259 | bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); | 1259 | bufp = bp->b_addr; |
1260 | word = XFS_BLOCKWMASK(mp); | 1260 | word = XFS_BLOCKWMASK(mp); |
1261 | b = &bufp[word]; | 1261 | b = &bufp[word]; |
1262 | } else { | 1262 | } else { |
@@ -1333,7 +1333,7 @@ xfs_rtfind_forw( | |||
1333 | if (error) { | 1333 | if (error) { |
1334 | return error; | 1334 | return error; |
1335 | } | 1335 | } |
1336 | bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); | 1336 | bufp = bp->b_addr; |
1337 | /* | 1337 | /* |
1338 | * Get the first word's index & point to it. | 1338 | * Get the first word's index & point to it. |
1339 | */ | 1339 | */ |
@@ -1384,7 +1384,7 @@ xfs_rtfind_forw( | |||
1384 | if (error) { | 1384 | if (error) { |
1385 | return error; | 1385 | return error; |
1386 | } | 1386 | } |
1387 | b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); | 1387 | b = bufp = bp->b_addr; |
1388 | word = 0; | 1388 | word = 0; |
1389 | } else { | 1389 | } else { |
1390 | /* | 1390 | /* |
@@ -1429,7 +1429,7 @@ xfs_rtfind_forw( | |||
1429 | if (error) { | 1429 | if (error) { |
1430 | return error; | 1430 | return error; |
1431 | } | 1431 | } |
1432 | b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); | 1432 | b = bufp = bp->b_addr; |
1433 | word = 0; | 1433 | word = 0; |
1434 | } else { | 1434 | } else { |
1435 | /* | 1435 | /* |
@@ -1649,7 +1649,7 @@ xfs_rtmodify_range( | |||
1649 | if (error) { | 1649 | if (error) { |
1650 | return error; | 1650 | return error; |
1651 | } | 1651 | } |
1652 | bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); | 1652 | bufp = bp->b_addr; |
1653 | /* | 1653 | /* |
1654 | * Compute the starting word's address, and starting bit. | 1654 | * Compute the starting word's address, and starting bit. |
1655 | */ | 1655 | */ |
@@ -1694,7 +1694,7 @@ xfs_rtmodify_range( | |||
1694 | if (error) { | 1694 | if (error) { |
1695 | return error; | 1695 | return error; |
1696 | } | 1696 | } |
1697 | first = b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); | 1697 | first = b = bufp = bp->b_addr; |
1698 | word = 0; | 1698 | word = 0; |
1699 | } else { | 1699 | } else { |
1700 | /* | 1700 | /* |
@@ -1734,7 +1734,7 @@ xfs_rtmodify_range( | |||
1734 | if (error) { | 1734 | if (error) { |
1735 | return error; | 1735 | return error; |
1736 | } | 1736 | } |
1737 | first = b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); | 1737 | first = b = bufp = bp->b_addr; |
1738 | word = 0; | 1738 | word = 0; |
1739 | } else { | 1739 | } else { |
1740 | /* | 1740 | /* |
@@ -1832,8 +1832,8 @@ xfs_rtmodify_summary( | |||
1832 | */ | 1832 | */ |
1833 | sp = XFS_SUMPTR(mp, bp, so); | 1833 | sp = XFS_SUMPTR(mp, bp, so); |
1834 | *sp += delta; | 1834 | *sp += delta; |
1835 | xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)XFS_BUF_PTR(bp)), | 1835 | xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)bp->b_addr), |
1836 | (uint)((char *)sp - (char *)XFS_BUF_PTR(bp) + sizeof(*sp) - 1)); | 1836 | (uint)((char *)sp - (char *)bp->b_addr + sizeof(*sp) - 1)); |
1837 | return 0; | 1837 | return 0; |
1838 | } | 1838 | } |
1839 | 1839 | ||
diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h index 09e1f4f35e97..f7f3a359c1c5 100644 --- a/fs/xfs/xfs_rtalloc.h +++ b/fs/xfs/xfs_rtalloc.h | |||
@@ -47,7 +47,7 @@ struct xfs_trans; | |||
47 | #define XFS_SUMOFFSTOBLOCK(mp,s) \ | 47 | #define XFS_SUMOFFSTOBLOCK(mp,s) \ |
48 | (((s) * (uint)sizeof(xfs_suminfo_t)) >> (mp)->m_sb.sb_blocklog) | 48 | (((s) * (uint)sizeof(xfs_suminfo_t)) >> (mp)->m_sb.sb_blocklog) |
49 | #define XFS_SUMPTR(mp,bp,so) \ | 49 | #define XFS_SUMPTR(mp,bp,so) \ |
50 | ((xfs_suminfo_t *)((char *)XFS_BUF_PTR(bp) + \ | 50 | ((xfs_suminfo_t *)((bp)->b_addr + \ |
51 | (((so) * (uint)sizeof(xfs_suminfo_t)) & XFS_BLOCKMASK(mp)))) | 51 | (((so) * (uint)sizeof(xfs_suminfo_t)) & XFS_BLOCKMASK(mp)))) |
52 | 52 | ||
53 | #define XFS_BITTOBLOCK(mp,bi) ((bi) >> (mp)->m_blkbit_log) | 53 | #define XFS_BITTOBLOCK(mp,bi) ((bi) >> (mp)->m_blkbit_log) |
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c index d6d6fdfe9422..c96a8a05ac03 100644 --- a/fs/xfs/xfs_rw.c +++ b/fs/xfs/xfs_rw.c | |||
@@ -104,9 +104,9 @@ xfs_ioerror_alert( | |||
104 | xfs_alert(mp, | 104 | xfs_alert(mp, |
105 | "I/O error occurred: meta-data dev %s block 0x%llx" | 105 | "I/O error occurred: meta-data dev %s block 0x%llx" |
106 | " (\"%s\") error %d buf count %zd", | 106 | " (\"%s\") error %d buf count %zd", |
107 | XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)), | 107 | xfs_buf_target_name(bp->b_target), |
108 | (__uint64_t)blkno, func, | 108 | (__uint64_t)blkno, func, |
109 | XFS_BUF_GETERROR(bp), XFS_BUF_COUNT(bp)); | 109 | bp->b_error, XFS_BUF_COUNT(bp)); |
110 | } | 110 | } |
111 | 111 | ||
112 | /* | 112 | /* |
@@ -137,8 +137,8 @@ xfs_read_buf( | |||
137 | bp = xfs_buf_read(target, blkno, len, flags); | 137 | bp = xfs_buf_read(target, blkno, len, flags); |
138 | if (!bp) | 138 | if (!bp) |
139 | return XFS_ERROR(EIO); | 139 | return XFS_ERROR(EIO); |
140 | error = XFS_BUF_GETERROR(bp); | 140 | error = bp->b_error; |
141 | if (bp && !error && !XFS_FORCED_SHUTDOWN(mp)) { | 141 | if (!error && !XFS_FORCED_SHUTDOWN(mp)) { |
142 | *bpp = bp; | 142 | *bpp = bp; |
143 | } else { | 143 | } else { |
144 | *bpp = NULL; | 144 | *bpp = NULL; |
diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h index 1eb2ba586814..cb6ae715814a 100644 --- a/fs/xfs/xfs_sb.h +++ b/fs/xfs/xfs_sb.h | |||
@@ -509,7 +509,7 @@ static inline int xfs_sb_version_hasprojid32bit(xfs_sb_t *sbp) | |||
509 | 509 | ||
510 | #define XFS_SB_DADDR ((xfs_daddr_t)0) /* daddr in filesystem/ag */ | 510 | #define XFS_SB_DADDR ((xfs_daddr_t)0) /* daddr in filesystem/ag */ |
511 | #define XFS_SB_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_SB_DADDR) | 511 | #define XFS_SB_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_SB_DADDR) |
512 | #define XFS_BUF_TO_SBP(bp) ((xfs_dsb_t *)XFS_BUF_PTR(bp)) | 512 | #define XFS_BUF_TO_SBP(bp) ((xfs_dsb_t *)((bp)->b_addr)) |
513 | 513 | ||
514 | #define XFS_HDR_BLOCK(mp,d) ((xfs_agblock_t)XFS_BB_TO_FSBT(mp,d)) | 514 | #define XFS_HDR_BLOCK(mp,d) ((xfs_agblock_t)XFS_BB_TO_FSBT(mp,d)) |
515 | #define XFS_DADDR_TO_FSB(mp,d) XFS_AGB_TO_FSB(mp, \ | 515 | #define XFS_DADDR_TO_FSB(mp,d) XFS_AGB_TO_FSB(mp, \ |
diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/xfs_stats.c index 76fdc5861932..76fdc5861932 100644 --- a/fs/xfs/linux-2.6/xfs_stats.c +++ b/fs/xfs/xfs_stats.c | |||
diff --git a/fs/xfs/linux-2.6/xfs_stats.h b/fs/xfs/xfs_stats.h index 736854b1ca1a..736854b1ca1a 100644 --- a/fs/xfs/linux-2.6/xfs_stats.h +++ b/fs/xfs/xfs_stats.h | |||
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/xfs_super.c index 9a72dda58bd0..9a72dda58bd0 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/xfs_super.c | |||
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/xfs_super.h index 50a3266c999e..50a3266c999e 100644 --- a/fs/xfs/linux-2.6/xfs_super.h +++ b/fs/xfs/xfs_super.h | |||
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/xfs_sync.c index e4c938afb910..4604f90f86a3 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/xfs_sync.c | |||
@@ -332,7 +332,7 @@ xfs_sync_fsdata( | |||
332 | * between there and here. | 332 | * between there and here. |
333 | */ | 333 | */ |
334 | bp = xfs_getsb(mp, 0); | 334 | bp = xfs_getsb(mp, 0); |
335 | if (XFS_BUF_ISPINNED(bp)) | 335 | if (xfs_buf_ispinned(bp)) |
336 | xfs_log_force(mp, 0); | 336 | xfs_log_force(mp, 0); |
337 | 337 | ||
338 | return xfs_bwrite(mp, bp); | 338 | return xfs_bwrite(mp, bp); |
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/xfs_sync.h index 941202e7ac6e..941202e7ac6e 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/xfs_sync.h | |||
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c index ee2d2adaa438..ee2d2adaa438 100644 --- a/fs/xfs/linux-2.6/xfs_sysctl.c +++ b/fs/xfs/xfs_sysctl.c | |||
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.h b/fs/xfs/xfs_sysctl.h index b9937d450f8e..b9937d450f8e 100644 --- a/fs/xfs/linux-2.6/xfs_sysctl.h +++ b/fs/xfs/xfs_sysctl.h | |||
diff --git a/fs/xfs/linux-2.6/xfs_trace.c b/fs/xfs/xfs_trace.c index 88d25d4aa56e..9010ce885e6a 100644 --- a/fs/xfs/linux-2.6/xfs_trace.c +++ b/fs/xfs/xfs_trace.c | |||
@@ -43,8 +43,8 @@ | |||
43 | #include "xfs_quota.h" | 43 | #include "xfs_quota.h" |
44 | #include "xfs_iomap.h" | 44 | #include "xfs_iomap.h" |
45 | #include "xfs_aops.h" | 45 | #include "xfs_aops.h" |
46 | #include "quota/xfs_dquot_item.h" | 46 | #include "xfs_dquot_item.h" |
47 | #include "quota/xfs_dquot.h" | 47 | #include "xfs_dquot.h" |
48 | #include "xfs_log_recover.h" | 48 | #include "xfs_log_recover.h" |
49 | #include "xfs_inode_item.h" | 49 | #include "xfs_inode_item.h" |
50 | 50 | ||
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/xfs_trace.h index 690fc7a7bd72..690fc7a7bd72 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/xfs_trace.h | |||
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 43233e92f0f6..c15aa29fa169 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c | |||
@@ -299,7 +299,7 @@ xfs_trans_ail_cursor_last( | |||
299 | * Splice the log item list into the AIL at the given LSN. We splice to the | 299 | * Splice the log item list into the AIL at the given LSN. We splice to the |
300 | * tail of the given LSN to maintain insert order for push traversals. The | 300 | * tail of the given LSN to maintain insert order for push traversals. The |
301 | * cursor is optional, allowing repeated updates to the same LSN to avoid | 301 | * cursor is optional, allowing repeated updates to the same LSN to avoid |
302 | * repeated traversals. | 302 | * repeated traversals. This should not be called with an empty list. |
303 | */ | 303 | */ |
304 | static void | 304 | static void |
305 | xfs_ail_splice( | 305 | xfs_ail_splice( |
@@ -308,50 +308,39 @@ xfs_ail_splice( | |||
308 | struct list_head *list, | 308 | struct list_head *list, |
309 | xfs_lsn_t lsn) | 309 | xfs_lsn_t lsn) |
310 | { | 310 | { |
311 | struct xfs_log_item *lip = cur ? cur->item : NULL; | 311 | struct xfs_log_item *lip; |
312 | struct xfs_log_item *next_lip; | 312 | |
313 | ASSERT(!list_empty(list)); | ||
313 | 314 | ||
314 | /* | 315 | /* |
315 | * Get a new cursor if we don't have a placeholder or the existing one | 316 | * Use the cursor to determine the insertion point if one is |
316 | * has been invalidated. | 317 | * provided. If not, or if the one we got is not valid, |
318 | * find the place in the AIL where the items belong. | ||
317 | */ | 319 | */ |
318 | if (!lip || (__psint_t)lip & 1) { | 320 | lip = cur ? cur->item : NULL; |
321 | if (!lip || (__psint_t) lip & 1) | ||
319 | lip = __xfs_trans_ail_cursor_last(ailp, lsn); | 322 | lip = __xfs_trans_ail_cursor_last(ailp, lsn); |
320 | 323 | ||
321 | if (!lip) { | 324 | /* |
322 | /* The list is empty, so just splice and return. */ | 325 | * If a cursor is provided, we know we're processing the AIL |
323 | if (cur) | 326 | * in lsn order, and future items to be spliced in will |
324 | cur->item = NULL; | 327 | * follow the last one being inserted now. Update the |
325 | list_splice(list, &ailp->xa_ail); | 328 | * cursor to point to that last item, now while we have a |
326 | return; | 329 | * reliable pointer to it. |
327 | } | 330 | */ |
328 | } | 331 | if (cur) |
332 | cur->item = list_entry(list->prev, struct xfs_log_item, li_ail); | ||
329 | 333 | ||
330 | /* | 334 | /* |
331 | * Our cursor points to the item we want to insert _after_, so we have | 335 | * Finally perform the splice. Unless the AIL was empty, |
332 | * to update the cursor to point to the end of the list we are splicing | 336 | * lip points to the item in the AIL _after_ which the new |
333 | * in so that it points to the correct location for the next splice. | 337 | * items should go. If lip is null the AIL was empty, so |
334 | * i.e. before the splice | 338 | * the new items go at the head of the AIL. |
335 | * | ||
336 | * lsn -> lsn -> lsn + x -> lsn + x ... | ||
337 | * ^ | ||
338 | * | cursor points here | ||
339 | * | ||
340 | * After the splice we have: | ||
341 | * | ||
342 | * lsn -> lsn -> lsn -> lsn -> .... -> lsn -> lsn + x -> lsn + x ... | ||
343 | * ^ ^ | ||
344 | * | cursor points here | needs to move here | ||
345 | * | ||
346 | * So we set the cursor to the last item in the list to be spliced | ||
347 | * before we execute the splice, resulting in the cursor pointing to | ||
348 | * the correct item after the splice occurs. | ||
349 | */ | 339 | */ |
350 | if (cur) { | 340 | if (lip) |
351 | next_lip = list_entry(list->prev, struct xfs_log_item, li_ail); | 341 | list_splice(list, &lip->li_ail); |
352 | cur->item = next_lip; | 342 | else |
353 | } | 343 | list_splice(list, &ailp->xa_ail); |
354 | list_splice(list, &lip->li_ail); | ||
355 | } | 344 | } |
356 | 345 | ||
357 | /* | 346 | /* |
@@ -682,6 +671,7 @@ xfs_trans_ail_update_bulk( | |||
682 | int i; | 671 | int i; |
683 | LIST_HEAD(tmp); | 672 | LIST_HEAD(tmp); |
684 | 673 | ||
674 | ASSERT(nr_items > 0); /* Not required, but true. */ | ||
685 | mlip = xfs_ail_min(ailp); | 675 | mlip = xfs_ail_min(ailp); |
686 | 676 | ||
687 | for (i = 0; i < nr_items; i++) { | 677 | for (i = 0; i < nr_items; i++) { |
@@ -701,7 +691,8 @@ xfs_trans_ail_update_bulk( | |||
701 | list_add(&lip->li_ail, &tmp); | 691 | list_add(&lip->li_ail, &tmp); |
702 | } | 692 | } |
703 | 693 | ||
704 | xfs_ail_splice(ailp, cur, &tmp, lsn); | 694 | if (!list_empty(&tmp)) |
695 | xfs_ail_splice(ailp, cur, &tmp, lsn); | ||
705 | 696 | ||
706 | if (!mlip_changed) { | 697 | if (!mlip_changed) { |
707 | spin_unlock(&ailp->xa_lock); | 698 | spin_unlock(&ailp->xa_lock); |
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 15584fc3ed7d..137e2b9e2948 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c | |||
@@ -54,7 +54,7 @@ xfs_trans_buf_item_match( | |||
54 | list_for_each_entry(lidp, &tp->t_items, lid_trans) { | 54 | list_for_each_entry(lidp, &tp->t_items, lid_trans) { |
55 | blip = (struct xfs_buf_log_item *)lidp->lid_item; | 55 | blip = (struct xfs_buf_log_item *)lidp->lid_item; |
56 | if (blip->bli_item.li_type == XFS_LI_BUF && | 56 | if (blip->bli_item.li_type == XFS_LI_BUF && |
57 | XFS_BUF_TARGET(blip->bli_buf) == target && | 57 | blip->bli_buf->b_target == target && |
58 | XFS_BUF_ADDR(blip->bli_buf) == blkno && | 58 | XFS_BUF_ADDR(blip->bli_buf) == blkno && |
59 | XFS_BUF_COUNT(blip->bli_buf) == len) | 59 | XFS_BUF_COUNT(blip->bli_buf) == len) |
60 | return blip->bli_buf; | 60 | return blip->bli_buf; |
@@ -80,7 +80,6 @@ _xfs_trans_bjoin( | |||
80 | { | 80 | { |
81 | struct xfs_buf_log_item *bip; | 81 | struct xfs_buf_log_item *bip; |
82 | 82 | ||
83 | ASSERT(XFS_BUF_ISBUSY(bp)); | ||
84 | ASSERT(bp->b_transp == NULL); | 83 | ASSERT(bp->b_transp == NULL); |
85 | 84 | ||
86 | /* | 85 | /* |
@@ -194,7 +193,7 @@ xfs_trans_get_buf(xfs_trans_t *tp, | |||
194 | return NULL; | 193 | return NULL; |
195 | } | 194 | } |
196 | 195 | ||
197 | ASSERT(!XFS_BUF_GETERROR(bp)); | 196 | ASSERT(!bp->b_error); |
198 | 197 | ||
199 | _xfs_trans_bjoin(tp, bp, 1); | 198 | _xfs_trans_bjoin(tp, bp, 1); |
200 | trace_xfs_trans_get_buf(bp->b_fspriv); | 199 | trace_xfs_trans_get_buf(bp->b_fspriv); |
@@ -293,10 +292,10 @@ xfs_trans_read_buf( | |||
293 | return (flags & XBF_TRYLOCK) ? | 292 | return (flags & XBF_TRYLOCK) ? |
294 | EAGAIN : XFS_ERROR(ENOMEM); | 293 | EAGAIN : XFS_ERROR(ENOMEM); |
295 | 294 | ||
296 | if (XFS_BUF_GETERROR(bp) != 0) { | 295 | if (bp->b_error) { |
296 | error = bp->b_error; | ||
297 | xfs_ioerror_alert("xfs_trans_read_buf", mp, | 297 | xfs_ioerror_alert("xfs_trans_read_buf", mp, |
298 | bp, blkno); | 298 | bp, blkno); |
299 | error = XFS_BUF_GETERROR(bp); | ||
300 | xfs_buf_relse(bp); | 299 | xfs_buf_relse(bp); |
301 | return error; | 300 | return error; |
302 | } | 301 | } |
@@ -330,7 +329,7 @@ xfs_trans_read_buf( | |||
330 | ASSERT(xfs_buf_islocked(bp)); | 329 | ASSERT(xfs_buf_islocked(bp)); |
331 | ASSERT(bp->b_transp == tp); | 330 | ASSERT(bp->b_transp == tp); |
332 | ASSERT(bp->b_fspriv != NULL); | 331 | ASSERT(bp->b_fspriv != NULL); |
333 | ASSERT((XFS_BUF_ISERROR(bp)) == 0); | 332 | ASSERT(!bp->b_error); |
334 | if (!(XFS_BUF_ISDONE(bp))) { | 333 | if (!(XFS_BUF_ISDONE(bp))) { |
335 | trace_xfs_trans_read_buf_io(bp, _RET_IP_); | 334 | trace_xfs_trans_read_buf_io(bp, _RET_IP_); |
336 | ASSERT(!XFS_BUF_ISASYNC(bp)); | 335 | ASSERT(!XFS_BUF_ISASYNC(bp)); |
@@ -386,10 +385,9 @@ xfs_trans_read_buf( | |||
386 | return (flags & XBF_TRYLOCK) ? | 385 | return (flags & XBF_TRYLOCK) ? |
387 | 0 : XFS_ERROR(ENOMEM); | 386 | 0 : XFS_ERROR(ENOMEM); |
388 | } | 387 | } |
389 | if (XFS_BUF_GETERROR(bp) != 0) { | 388 | if (bp->b_error) { |
390 | XFS_BUF_SUPER_STALE(bp); | 389 | error = bp->b_error; |
391 | error = XFS_BUF_GETERROR(bp); | 390 | XFS_BUF_SUPER_STALE(bp); |
392 | |||
393 | xfs_ioerror_alert("xfs_trans_read_buf", mp, | 391 | xfs_ioerror_alert("xfs_trans_read_buf", mp, |
394 | bp, blkno); | 392 | bp, blkno); |
395 | if (tp->t_flags & XFS_TRANS_DIRTY) | 393 | if (tp->t_flags & XFS_TRANS_DIRTY) |
@@ -430,7 +428,7 @@ shutdown_abort: | |||
430 | if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp)) | 428 | if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp)) |
431 | xfs_notice(mp, "about to pop assert, bp == 0x%p", bp); | 429 | xfs_notice(mp, "about to pop assert, bp == 0x%p", bp); |
432 | #endif | 430 | #endif |
433 | ASSERT((XFS_BUF_BFLAGS(bp) & (XBF_STALE|XBF_DELWRI)) != | 431 | ASSERT((bp->b_flags & (XBF_STALE|XBF_DELWRI)) != |
434 | (XBF_STALE|XBF_DELWRI)); | 432 | (XBF_STALE|XBF_DELWRI)); |
435 | 433 | ||
436 | trace_xfs_trans_read_buf_shut(bp, _RET_IP_); | 434 | trace_xfs_trans_read_buf_shut(bp, _RET_IP_); |
@@ -581,7 +579,6 @@ xfs_trans_bhold(xfs_trans_t *tp, | |||
581 | { | 579 | { |
582 | xfs_buf_log_item_t *bip = bp->b_fspriv; | 580 | xfs_buf_log_item_t *bip = bp->b_fspriv; |
583 | 581 | ||
584 | ASSERT(XFS_BUF_ISBUSY(bp)); | ||
585 | ASSERT(bp->b_transp == tp); | 582 | ASSERT(bp->b_transp == tp); |
586 | ASSERT(bip != NULL); | 583 | ASSERT(bip != NULL); |
587 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); | 584 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); |
@@ -602,7 +599,6 @@ xfs_trans_bhold_release(xfs_trans_t *tp, | |||
602 | { | 599 | { |
603 | xfs_buf_log_item_t *bip = bp->b_fspriv; | 600 | xfs_buf_log_item_t *bip = bp->b_fspriv; |
604 | 601 | ||
605 | ASSERT(XFS_BUF_ISBUSY(bp)); | ||
606 | ASSERT(bp->b_transp == tp); | 602 | ASSERT(bp->b_transp == tp); |
607 | ASSERT(bip != NULL); | 603 | ASSERT(bip != NULL); |
608 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); | 604 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); |
@@ -631,7 +627,6 @@ xfs_trans_log_buf(xfs_trans_t *tp, | |||
631 | { | 627 | { |
632 | xfs_buf_log_item_t *bip = bp->b_fspriv; | 628 | xfs_buf_log_item_t *bip = bp->b_fspriv; |
633 | 629 | ||
634 | ASSERT(XFS_BUF_ISBUSY(bp)); | ||
635 | ASSERT(bp->b_transp == tp); | 630 | ASSERT(bp->b_transp == tp); |
636 | ASSERT(bip != NULL); | 631 | ASSERT(bip != NULL); |
637 | ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp))); | 632 | ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp))); |
@@ -702,7 +697,6 @@ xfs_trans_binval( | |||
702 | { | 697 | { |
703 | xfs_buf_log_item_t *bip = bp->b_fspriv; | 698 | xfs_buf_log_item_t *bip = bp->b_fspriv; |
704 | 699 | ||
705 | ASSERT(XFS_BUF_ISBUSY(bp)); | ||
706 | ASSERT(bp->b_transp == tp); | 700 | ASSERT(bp->b_transp == tp); |
707 | ASSERT(bip != NULL); | 701 | ASSERT(bip != NULL); |
708 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 702 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
@@ -774,7 +768,6 @@ xfs_trans_inode_buf( | |||
774 | { | 768 | { |
775 | xfs_buf_log_item_t *bip = bp->b_fspriv; | 769 | xfs_buf_log_item_t *bip = bp->b_fspriv; |
776 | 770 | ||
777 | ASSERT(XFS_BUF_ISBUSY(bp)); | ||
778 | ASSERT(bp->b_transp == tp); | 771 | ASSERT(bp->b_transp == tp); |
779 | ASSERT(bip != NULL); | 772 | ASSERT(bip != NULL); |
780 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 773 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
@@ -798,7 +791,6 @@ xfs_trans_stale_inode_buf( | |||
798 | { | 791 | { |
799 | xfs_buf_log_item_t *bip = bp->b_fspriv; | 792 | xfs_buf_log_item_t *bip = bp->b_fspriv; |
800 | 793 | ||
801 | ASSERT(XFS_BUF_ISBUSY(bp)); | ||
802 | ASSERT(bp->b_transp == tp); | 794 | ASSERT(bp->b_transp == tp); |
803 | ASSERT(bip != NULL); | 795 | ASSERT(bip != NULL); |
804 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 796 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
@@ -823,7 +815,6 @@ xfs_trans_inode_alloc_buf( | |||
823 | { | 815 | { |
824 | xfs_buf_log_item_t *bip = bp->b_fspriv; | 816 | xfs_buf_log_item_t *bip = bp->b_fspriv; |
825 | 817 | ||
826 | ASSERT(XFS_BUF_ISBUSY(bp)); | ||
827 | ASSERT(bp->b_transp == tp); | 818 | ASSERT(bp->b_transp == tp); |
828 | ASSERT(bip != NULL); | 819 | ASSERT(bip != NULL); |
829 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 820 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
@@ -851,7 +842,6 @@ xfs_trans_dquot_buf( | |||
851 | { | 842 | { |
852 | xfs_buf_log_item_t *bip = bp->b_fspriv; | 843 | xfs_buf_log_item_t *bip = bp->b_fspriv; |
853 | 844 | ||
854 | ASSERT(XFS_BUF_ISBUSY(bp)); | ||
855 | ASSERT(bp->b_transp == tp); | 845 | ASSERT(bp->b_transp == tp); |
856 | ASSERT(bip != NULL); | 846 | ASSERT(bip != NULL); |
857 | ASSERT(type == XFS_BLF_UDQUOT_BUF || | 847 | ASSERT(type == XFS_BLF_UDQUOT_BUF || |
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index 4d00ee67792d..4d00ee67792d 100644 --- a/fs/xfs/quota/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c | |||
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/xfs_vnode.h index 7c220b4227bc..7c220b4227bc 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ b/fs/xfs/xfs_vnode.h | |||
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 9322e13f0c63..51fc429527bc 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -83,7 +83,9 @@ xfs_readlink_bmap( | |||
83 | 83 | ||
84 | bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), | 84 | bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), |
85 | XBF_LOCK | XBF_MAPPED | XBF_DONT_BLOCK); | 85 | XBF_LOCK | XBF_MAPPED | XBF_DONT_BLOCK); |
86 | error = XFS_BUF_GETERROR(bp); | 86 | if (!bp) |
87 | return XFS_ERROR(ENOMEM); | ||
88 | error = bp->b_error; | ||
87 | if (error) { | 89 | if (error) { |
88 | xfs_ioerror_alert("xfs_readlink", | 90 | xfs_ioerror_alert("xfs_readlink", |
89 | ip->i_mount, bp, XFS_BUF_ADDR(bp)); | 91 | ip->i_mount, bp, XFS_BUF_ADDR(bp)); |
@@ -94,7 +96,7 @@ xfs_readlink_bmap( | |||
94 | byte_cnt = pathlen; | 96 | byte_cnt = pathlen; |
95 | pathlen -= byte_cnt; | 97 | pathlen -= byte_cnt; |
96 | 98 | ||
97 | memcpy(link, XFS_BUF_PTR(bp), byte_cnt); | 99 | memcpy(link, bp->b_addr, byte_cnt); |
98 | xfs_buf_relse(bp); | 100 | xfs_buf_relse(bp); |
99 | } | 101 | } |
100 | 102 | ||
@@ -1648,13 +1650,13 @@ xfs_symlink( | |||
1648 | byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); | 1650 | byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); |
1649 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, | 1651 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, |
1650 | BTOBB(byte_cnt), 0); | 1652 | BTOBB(byte_cnt), 0); |
1651 | ASSERT(bp && !XFS_BUF_GETERROR(bp)); | 1653 | ASSERT(!xfs_buf_geterror(bp)); |
1652 | if (pathlen < byte_cnt) { | 1654 | if (pathlen < byte_cnt) { |
1653 | byte_cnt = pathlen; | 1655 | byte_cnt = pathlen; |
1654 | } | 1656 | } |
1655 | pathlen -= byte_cnt; | 1657 | pathlen -= byte_cnt; |
1656 | 1658 | ||
1657 | memcpy(XFS_BUF_PTR(bp), cur_chunk, byte_cnt); | 1659 | memcpy(bp->b_addr, cur_chunk, byte_cnt); |
1658 | cur_chunk += byte_cnt; | 1660 | cur_chunk += byte_cnt; |
1659 | 1661 | ||
1660 | xfs_trans_log_buf(tp, bp, 0, byte_cnt - 1); | 1662 | xfs_trans_log_buf(tp, bp, 0, byte_cnt - 1); |
@@ -1999,7 +2001,7 @@ xfs_zero_remaining_bytes( | |||
1999 | mp, bp, XFS_BUF_ADDR(bp)); | 2001 | mp, bp, XFS_BUF_ADDR(bp)); |
2000 | break; | 2002 | break; |
2001 | } | 2003 | } |
2002 | memset(XFS_BUF_PTR(bp) + | 2004 | memset(bp->b_addr + |
2003 | (offset - XFS_FSB_TO_B(mp, imap.br_startoff)), | 2005 | (offset - XFS_FSB_TO_B(mp, imap.br_startoff)), |
2004 | 0, lastoffset - offset + 1); | 2006 | 0, lastoffset - offset + 1); |
2005 | XFS_BUF_UNDONE(bp); | 2007 | XFS_BUF_UNDONE(bp); |
diff --git a/fs/xfs/linux-2.6/xfs_xattr.c b/fs/xfs/xfs_xattr.c index 87d3e03878c8..87d3e03878c8 100644 --- a/fs/xfs/linux-2.6/xfs_xattr.c +++ b/fs/xfs/xfs_xattr.c | |||
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index fb2d63f13f4c..aea9e45efce6 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h | |||
@@ -39,7 +39,7 @@ | |||
39 | }) | 39 | }) |
40 | 40 | ||
41 | #define __page_to_pfn(pg) \ | 41 | #define __page_to_pfn(pg) \ |
42 | ({ struct page *__pg = (pg); \ | 42 | ({ const struct page *__pg = (pg); \ |
43 | struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \ | 43 | struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \ |
44 | (unsigned long)(__pg - __pgdat->node_mem_map) + \ | 44 | (unsigned long)(__pg - __pgdat->node_mem_map) + \ |
45 | __pgdat->node_start_pfn; \ | 45 | __pgdat->node_start_pfn; \ |
@@ -57,7 +57,7 @@ | |||
57 | * section[i].section_mem_map == mem_map's address - start_pfn; | 57 | * section[i].section_mem_map == mem_map's address - start_pfn; |
58 | */ | 58 | */ |
59 | #define __page_to_pfn(pg) \ | 59 | #define __page_to_pfn(pg) \ |
60 | ({ struct page *__pg = (pg); \ | 60 | ({ const struct page *__pg = (pg); \ |
61 | int __sec = page_to_section(__pg); \ | 61 | int __sec = page_to_section(__pg); \ |
62 | (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \ | 62 | (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \ |
63 | }) | 63 | }) |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 6395692b2e7a..32f0076e844b 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -125,7 +125,11 @@ enum rq_flag_bits { | |||
125 | __REQ_SYNC, /* request is sync (sync write or read) */ | 125 | __REQ_SYNC, /* request is sync (sync write or read) */ |
126 | __REQ_META, /* metadata io request */ | 126 | __REQ_META, /* metadata io request */ |
127 | __REQ_DISCARD, /* request to discard sectors */ | 127 | __REQ_DISCARD, /* request to discard sectors */ |
128 | __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ | ||
129 | |||
128 | __REQ_NOIDLE, /* don't anticipate more IO after this one */ | 130 | __REQ_NOIDLE, /* don't anticipate more IO after this one */ |
131 | __REQ_FUA, /* forced unit access */ | ||
132 | __REQ_FLUSH, /* request for cache flush */ | ||
129 | 133 | ||
130 | /* bio only flags */ | 134 | /* bio only flags */ |
131 | __REQ_RAHEAD, /* read ahead, can fail anytime */ | 135 | __REQ_RAHEAD, /* read ahead, can fail anytime */ |
@@ -135,7 +139,6 @@ enum rq_flag_bits { | |||
135 | /* request only flags */ | 139 | /* request only flags */ |
136 | __REQ_SORTED, /* elevator knows about this request */ | 140 | __REQ_SORTED, /* elevator knows about this request */ |
137 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | 141 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ |
138 | __REQ_FUA, /* forced unit access */ | ||
139 | __REQ_NOMERGE, /* don't touch this for merging */ | 142 | __REQ_NOMERGE, /* don't touch this for merging */ |
140 | __REQ_STARTED, /* drive already may have started this one */ | 143 | __REQ_STARTED, /* drive already may have started this one */ |
141 | __REQ_DONTPREP, /* don't call prep for this one */ | 144 | __REQ_DONTPREP, /* don't call prep for this one */ |
@@ -146,11 +149,9 @@ enum rq_flag_bits { | |||
146 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | 149 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ |
147 | __REQ_ALLOCED, /* request came from our alloc pool */ | 150 | __REQ_ALLOCED, /* request came from our alloc pool */ |
148 | __REQ_COPY_USER, /* contains copies of user pages */ | 151 | __REQ_COPY_USER, /* contains copies of user pages */ |
149 | __REQ_FLUSH, /* request for cache flush */ | ||
150 | __REQ_FLUSH_SEQ, /* request for flush sequence */ | 152 | __REQ_FLUSH_SEQ, /* request for flush sequence */ |
151 | __REQ_IO_STAT, /* account I/O stat */ | 153 | __REQ_IO_STAT, /* account I/O stat */ |
152 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ | 154 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ |
153 | __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ | ||
154 | __REQ_NR_BITS, /* stops here */ | 155 | __REQ_NR_BITS, /* stops here */ |
155 | }; | 156 | }; |
156 | 157 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0e67c45b3bc9..84b15d54f8c2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -30,6 +30,7 @@ struct request_pm_state; | |||
30 | struct blk_trace; | 30 | struct blk_trace; |
31 | struct request; | 31 | struct request; |
32 | struct sg_io_hdr; | 32 | struct sg_io_hdr; |
33 | struct bsg_job; | ||
33 | 34 | ||
34 | #define BLKDEV_MIN_RQ 4 | 35 | #define BLKDEV_MIN_RQ 4 |
35 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ | 36 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ |
@@ -117,6 +118,7 @@ struct request { | |||
117 | struct { | 118 | struct { |
118 | unsigned int seq; | 119 | unsigned int seq; |
119 | struct list_head list; | 120 | struct list_head list; |
121 | rq_end_io_fn *saved_end_io; | ||
120 | } flush; | 122 | } flush; |
121 | }; | 123 | }; |
122 | 124 | ||
@@ -209,6 +211,7 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, | |||
209 | typedef void (softirq_done_fn)(struct request *); | 211 | typedef void (softirq_done_fn)(struct request *); |
210 | typedef int (dma_drain_needed_fn)(struct request *); | 212 | typedef int (dma_drain_needed_fn)(struct request *); |
211 | typedef int (lld_busy_fn) (struct request_queue *q); | 213 | typedef int (lld_busy_fn) (struct request_queue *q); |
214 | typedef int (bsg_job_fn) (struct bsg_job *); | ||
212 | 215 | ||
213 | enum blk_eh_timer_return { | 216 | enum blk_eh_timer_return { |
214 | BLK_EH_NOT_HANDLED, | 217 | BLK_EH_NOT_HANDLED, |
@@ -375,6 +378,8 @@ struct request_queue { | |||
375 | struct mutex sysfs_lock; | 378 | struct mutex sysfs_lock; |
376 | 379 | ||
377 | #if defined(CONFIG_BLK_DEV_BSG) | 380 | #if defined(CONFIG_BLK_DEV_BSG) |
381 | bsg_job_fn *bsg_job_fn; | ||
382 | int bsg_job_size; | ||
378 | struct bsg_class_device bsg_dev; | 383 | struct bsg_class_device bsg_dev; |
379 | #endif | 384 | #endif |
380 | 385 | ||
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 8c7c2de7631a..8e9e4bc6d73b 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
@@ -14,7 +14,7 @@ | |||
14 | enum blktrace_cat { | 14 | enum blktrace_cat { |
15 | BLK_TC_READ = 1 << 0, /* reads */ | 15 | BLK_TC_READ = 1 << 0, /* reads */ |
16 | BLK_TC_WRITE = 1 << 1, /* writes */ | 16 | BLK_TC_WRITE = 1 << 1, /* writes */ |
17 | BLK_TC_BARRIER = 1 << 2, /* barrier */ | 17 | BLK_TC_FLUSH = 1 << 2, /* flush */ |
18 | BLK_TC_SYNC = 1 << 3, /* sync IO */ | 18 | BLK_TC_SYNC = 1 << 3, /* sync IO */ |
19 | BLK_TC_SYNCIO = BLK_TC_SYNC, | 19 | BLK_TC_SYNCIO = BLK_TC_SYNC, |
20 | BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ | 20 | BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ |
@@ -28,8 +28,9 @@ enum blktrace_cat { | |||
28 | BLK_TC_META = 1 << 12, /* metadata */ | 28 | BLK_TC_META = 1 << 12, /* metadata */ |
29 | BLK_TC_DISCARD = 1 << 13, /* discard requests */ | 29 | BLK_TC_DISCARD = 1 << 13, /* discard requests */ |
30 | BLK_TC_DRV_DATA = 1 << 14, /* binary per-driver data */ | 30 | BLK_TC_DRV_DATA = 1 << 14, /* binary per-driver data */ |
31 | BLK_TC_FUA = 1 << 15, /* fua requests */ | ||
31 | 32 | ||
32 | BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ | 33 | BLK_TC_END = 1 << 15, /* we've run out of bits! */ |
33 | }; | 34 | }; |
34 | 35 | ||
35 | #define BLK_TC_SHIFT (16) | 36 | #define BLK_TC_SHIFT (16) |
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h new file mode 100644 index 000000000000..f55ab8cdc106 --- /dev/null +++ b/include/linux/bsg-lib.h | |||
@@ -0,0 +1,73 @@ | |||
1 | /* | ||
2 | * BSG helper library | ||
3 | * | ||
4 | * Copyright (C) 2008 James Smart, Emulex Corporation | ||
5 | * Copyright (C) 2011 Red Hat, Inc. All rights reserved. | ||
6 | * Copyright (C) 2011 Mike Christie | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
21 | * | ||
22 | */ | ||
23 | #ifndef _BLK_BSG_ | ||
24 | #define _BLK_BSG_ | ||
25 | |||
26 | #include <linux/blkdev.h> | ||
27 | |||
28 | struct request; | ||
29 | struct device; | ||
30 | struct scatterlist; | ||
31 | struct request_queue; | ||
32 | |||
33 | struct bsg_buffer { | ||
34 | unsigned int payload_len; | ||
35 | int sg_cnt; | ||
36 | struct scatterlist *sg_list; | ||
37 | }; | ||
38 | |||
39 | struct bsg_job { | ||
40 | struct device *dev; | ||
41 | struct request *req; | ||
42 | |||
43 | /* Transport/driver specific request/reply structs */ | ||
44 | void *request; | ||
45 | void *reply; | ||
46 | |||
47 | unsigned int request_len; | ||
48 | unsigned int reply_len; | ||
49 | /* | ||
50 | * On entry : reply_len indicates the buffer size allocated for | ||
51 | * the reply. | ||
52 | * | ||
53 | * Upon completion : the message handler must set reply_len | ||
54 | * to indicates the size of the reply to be returned to the | ||
55 | * caller. | ||
56 | */ | ||
57 | |||
58 | /* DMA payloads for the request/response */ | ||
59 | struct bsg_buffer request_payload; | ||
60 | struct bsg_buffer reply_payload; | ||
61 | |||
62 | void *dd_data; /* Used for driver-specific storage */ | ||
63 | }; | ||
64 | |||
65 | void bsg_job_done(struct bsg_job *job, int result, | ||
66 | unsigned int reply_payload_rcv_len); | ||
67 | int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, | ||
68 | bsg_job_fn *job_fn, int dd_job_size); | ||
69 | void bsg_request_fn(struct request_queue *q); | ||
70 | void bsg_remove_queue(struct request_queue *q); | ||
71 | void bsg_goose_queue(struct request_queue *q); | ||
72 | |||
73 | #endif | ||
diff --git a/include/linux/connector.h b/include/linux/connector.h index 0c69ad825b39..3c9c54fd5690 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * connector.h | 2 | * connector.h |
3 | * | 3 | * |
4 | * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * 2004-2005 Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> |
5 | * All rights reserved. | 5 | * All rights reserved. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/include/linux/cred.h b/include/linux/cred.h index 98f46efbe2d2..40308969ed00 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
@@ -269,7 +269,7 @@ static inline void put_cred(const struct cred *_cred) | |||
269 | * since nobody else can modify it. | 269 | * since nobody else can modify it. |
270 | */ | 270 | */ |
271 | #define current_cred() \ | 271 | #define current_cred() \ |
272 | (*(__force struct cred **)¤t->cred) | 272 | rcu_dereference_protected(current->cred, 1) |
273 | 273 | ||
274 | /** | 274 | /** |
275 | * __task_cred - Access a task's objective credentials | 275 | * __task_cred - Access a task's objective credentials |
@@ -307,7 +307,7 @@ static inline void put_cred(const struct cred *_cred) | |||
307 | #define get_current_user() \ | 307 | #define get_current_user() \ |
308 | ({ \ | 308 | ({ \ |
309 | struct user_struct *__u; \ | 309 | struct user_struct *__u; \ |
310 | struct cred *__cred; \ | 310 | const struct cred *__cred; \ |
311 | __cred = current_cred(); \ | 311 | __cred = current_cred(); \ |
312 | __u = get_uid(__cred->user); \ | 312 | __u = get_uid(__cred->user); \ |
313 | __u; \ | 313 | __u; \ |
@@ -322,7 +322,7 @@ static inline void put_cred(const struct cred *_cred) | |||
322 | #define get_current_groups() \ | 322 | #define get_current_groups() \ |
323 | ({ \ | 323 | ({ \ |
324 | struct group_info *__groups; \ | 324 | struct group_info *__groups; \ |
325 | struct cred *__cred; \ | 325 | const struct cred *__cred; \ |
326 | __cred = current_cred(); \ | 326 | __cred = current_cred(); \ |
327 | __groups = get_group_info(__cred->group_info); \ | 327 | __groups = get_group_info(__cred->group_info); \ |
328 | __groups; \ | 328 | __groups; \ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 178cdb4f1d4a..c2bd68f2277a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -2318,6 +2318,11 @@ extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*te | |||
2318 | extern struct inode * iget_locked(struct super_block *, unsigned long); | 2318 | extern struct inode * iget_locked(struct super_block *, unsigned long); |
2319 | extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); | 2319 | extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); |
2320 | extern int insert_inode_locked(struct inode *); | 2320 | extern int insert_inode_locked(struct inode *); |
2321 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
2322 | extern void lockdep_annotate_inode_mutex_key(struct inode *inode); | ||
2323 | #else | ||
2324 | static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { }; | ||
2325 | #endif | ||
2321 | extern void unlock_new_inode(struct inode *); | 2326 | extern void unlock_new_inode(struct inode *); |
2322 | extern unsigned int get_next_ino(void); | 2327 | extern unsigned int get_next_ino(void); |
2323 | 2328 | ||
diff --git a/include/linux/fuse.h b/include/linux/fuse.h index d464de53db43..464cff526860 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h | |||
@@ -47,6 +47,9 @@ | |||
47 | * - FUSE_IOCTL_UNRESTRICTED shall now return with array of 'struct | 47 | * - FUSE_IOCTL_UNRESTRICTED shall now return with array of 'struct |
48 | * fuse_ioctl_iovec' instead of ambiguous 'struct iovec' | 48 | * fuse_ioctl_iovec' instead of ambiguous 'struct iovec' |
49 | * - add FUSE_IOCTL_32BIT flag | 49 | * - add FUSE_IOCTL_32BIT flag |
50 | * | ||
51 | * 7.17 | ||
52 | * - add FUSE_FLOCK_LOCKS and FUSE_RELEASE_FLOCK_UNLOCK | ||
50 | */ | 53 | */ |
51 | 54 | ||
52 | #ifndef _LINUX_FUSE_H | 55 | #ifndef _LINUX_FUSE_H |
@@ -78,7 +81,7 @@ | |||
78 | #define FUSE_KERNEL_VERSION 7 | 81 | #define FUSE_KERNEL_VERSION 7 |
79 | 82 | ||
80 | /** Minor version number of this interface */ | 83 | /** Minor version number of this interface */ |
81 | #define FUSE_KERNEL_MINOR_VERSION 16 | 84 | #define FUSE_KERNEL_MINOR_VERSION 17 |
82 | 85 | ||
83 | /** The node ID of the root inode */ | 86 | /** The node ID of the root inode */ |
84 | #define FUSE_ROOT_ID 1 | 87 | #define FUSE_ROOT_ID 1 |
@@ -153,8 +156,10 @@ struct fuse_file_lock { | |||
153 | /** | 156 | /** |
154 | * INIT request/reply flags | 157 | * INIT request/reply flags |
155 | * | 158 | * |
159 | * FUSE_POSIX_LOCKS: remote locking for POSIX file locks | ||
156 | * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".." | 160 | * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".." |
157 | * FUSE_DONT_MASK: don't apply umask to file mode on create operations | 161 | * FUSE_DONT_MASK: don't apply umask to file mode on create operations |
162 | * FUSE_FLOCK_LOCKS: remote locking for BSD style file locks | ||
158 | */ | 163 | */ |
159 | #define FUSE_ASYNC_READ (1 << 0) | 164 | #define FUSE_ASYNC_READ (1 << 0) |
160 | #define FUSE_POSIX_LOCKS (1 << 1) | 165 | #define FUSE_POSIX_LOCKS (1 << 1) |
@@ -163,6 +168,7 @@ struct fuse_file_lock { | |||
163 | #define FUSE_EXPORT_SUPPORT (1 << 4) | 168 | #define FUSE_EXPORT_SUPPORT (1 << 4) |
164 | #define FUSE_BIG_WRITES (1 << 5) | 169 | #define FUSE_BIG_WRITES (1 << 5) |
165 | #define FUSE_DONT_MASK (1 << 6) | 170 | #define FUSE_DONT_MASK (1 << 6) |
171 | #define FUSE_FLOCK_LOCKS (1 << 10) | ||
166 | 172 | ||
167 | /** | 173 | /** |
168 | * CUSE INIT request/reply flags | 174 | * CUSE INIT request/reply flags |
@@ -175,6 +181,7 @@ struct fuse_file_lock { | |||
175 | * Release flags | 181 | * Release flags |
176 | */ | 182 | */ |
177 | #define FUSE_RELEASE_FLUSH (1 << 0) | 183 | #define FUSE_RELEASE_FLUSH (1 << 0) |
184 | #define FUSE_RELEASE_FLOCK_UNLOCK (1 << 1) | ||
178 | 185 | ||
179 | /** | 186 | /** |
180 | * Getattr flags | 187 | * Getattr flags |
diff --git a/include/linux/hash.h b/include/linux/hash.h index 06d25c189cc5..b80506bdd733 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h | |||
@@ -63,7 +63,7 @@ static inline u32 hash_32(u32 val, unsigned int bits) | |||
63 | return hash >> (32 - bits); | 63 | return hash >> (32 - bits); |
64 | } | 64 | } |
65 | 65 | ||
66 | static inline unsigned long hash_ptr(void *ptr, unsigned int bits) | 66 | static inline unsigned long hash_ptr(const void *ptr, unsigned int bits) |
67 | { | 67 | { |
68 | return hash_long((unsigned long)ptr, bits); | 68 | return hash_long((unsigned long)ptr, bits); |
69 | } | 69 | } |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 87a06f345bd2..59517300a315 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
24 | #include <linux/topology.h> | 24 | #include <linux/topology.h> |
25 | #include <linux/wait.h> | 25 | #include <linux/wait.h> |
26 | #include <linux/module.h> | ||
26 | 27 | ||
27 | #include <asm/irq.h> | 28 | #include <asm/irq.h> |
28 | #include <asm/ptrace.h> | 29 | #include <asm/ptrace.h> |
@@ -547,7 +548,15 @@ static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) | |||
547 | return d->msi_desc; | 548 | return d->msi_desc; |
548 | } | 549 | } |
549 | 550 | ||
550 | int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); | 551 | int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
552 | struct module *owner); | ||
553 | |||
554 | static inline int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, | ||
555 | int node) | ||
556 | { | ||
557 | return __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE); | ||
558 | } | ||
559 | |||
551 | void irq_free_descs(unsigned int irq, unsigned int cnt); | 560 | void irq_free_descs(unsigned int irq, unsigned int cnt); |
552 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); | 561 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); |
553 | 562 | ||
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 2d921b35212c..150134ac709a 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
@@ -66,6 +66,7 @@ struct irq_desc { | |||
66 | #ifdef CONFIG_PROC_FS | 66 | #ifdef CONFIG_PROC_FS |
67 | struct proc_dir_entry *dir; | 67 | struct proc_dir_entry *dir; |
68 | #endif | 68 | #endif |
69 | struct module *owner; | ||
69 | const char *name; | 70 | const char *name; |
70 | } ____cacheline_internodealigned_in_smp; | 71 | } ____cacheline_internodealigned_in_smp; |
71 | 72 | ||
diff --git a/include/linux/loop.h b/include/linux/loop.h index 66c194e2d9b9..683d69890119 100644 --- a/include/linux/loop.h +++ b/include/linux/loop.h | |||
@@ -64,7 +64,6 @@ struct loop_device { | |||
64 | 64 | ||
65 | struct request_queue *lo_queue; | 65 | struct request_queue *lo_queue; |
66 | struct gendisk *lo_disk; | 66 | struct gendisk *lo_disk; |
67 | struct list_head lo_list; | ||
68 | }; | 67 | }; |
69 | 68 | ||
70 | #endif /* __KERNEL__ */ | 69 | #endif /* __KERNEL__ */ |
@@ -161,4 +160,8 @@ int loop_unregister_transfer(int number); | |||
161 | #define LOOP_CHANGE_FD 0x4C06 | 160 | #define LOOP_CHANGE_FD 0x4C06 |
162 | #define LOOP_SET_CAPACITY 0x4C07 | 161 | #define LOOP_SET_CAPACITY 0x4C07 |
163 | 162 | ||
163 | /* /dev/loop-control interface */ | ||
164 | #define LOOP_CTL_ADD 0x4C80 | ||
165 | #define LOOP_CTL_REMOVE 0x4C81 | ||
166 | #define LOOP_CTL_GET_FREE 0x4C82 | ||
164 | #endif | 167 | #endif |
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 18fd13028ba1..c309b1ecdc1c 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h | |||
@@ -40,6 +40,7 @@ | |||
40 | #define BTRFS_MINOR 234 | 40 | #define BTRFS_MINOR 234 |
41 | #define AUTOFS_MINOR 235 | 41 | #define AUTOFS_MINOR 235 |
42 | #define MAPPER_CTRL_MINOR 236 | 42 | #define MAPPER_CTRL_MINOR 236 |
43 | #define LOOP_CTRL_MINOR 237 | ||
43 | #define MISC_DYNAMIC_MINOR 255 | 44 | #define MISC_DYNAMIC_MINOR 255 |
44 | 45 | ||
45 | struct device; | 46 | struct device; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index f2690cf49827..7438071b44aa 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -685,7 +685,7 @@ static inline void set_page_section(struct page *page, unsigned long section) | |||
685 | page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; | 685 | page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; |
686 | } | 686 | } |
687 | 687 | ||
688 | static inline unsigned long page_to_section(struct page *page) | 688 | static inline unsigned long page_to_section(const struct page *page) |
689 | { | 689 | { |
690 | return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; | 690 | return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; |
691 | } | 691 | } |
@@ -720,7 +720,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone, | |||
720 | 720 | ||
721 | static __always_inline void *lowmem_page_address(const struct page *page) | 721 | static __always_inline void *lowmem_page_address(const struct page *page) |
722 | { | 722 | { |
723 | return __va(PFN_PHYS(page_to_pfn((struct page *)page))); | 723 | return __va(PFN_PHYS(page_to_pfn(page))); |
724 | } | 724 | } |
725 | 725 | ||
726 | #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) | 726 | #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) |
@@ -737,7 +737,7 @@ static __always_inline void *lowmem_page_address(const struct page *page) | |||
737 | #endif | 737 | #endif |
738 | 738 | ||
739 | #if defined(HASHED_PAGE_VIRTUAL) | 739 | #if defined(HASHED_PAGE_VIRTUAL) |
740 | void *page_address(struct page *page); | 740 | void *page_address(const struct page *page); |
741 | void set_page_address(struct page *page, void *virtual); | 741 | void set_page_address(struct page *page, void *virtual); |
742 | void page_address_init(void); | 742 | void page_address_init(void); |
743 | #endif | 743 | #endif |
@@ -962,6 +962,8 @@ int invalidate_inode_page(struct page *page); | |||
962 | #ifdef CONFIG_MMU | 962 | #ifdef CONFIG_MMU |
963 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 963 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
964 | unsigned long address, unsigned int flags); | 964 | unsigned long address, unsigned int flags); |
965 | extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, | ||
966 | unsigned long address, unsigned int fault_flags); | ||
965 | #else | 967 | #else |
966 | static inline int handle_mm_fault(struct mm_struct *mm, | 968 | static inline int handle_mm_fault(struct mm_struct *mm, |
967 | struct vm_area_struct *vma, unsigned long address, | 969 | struct vm_area_struct *vma, unsigned long address, |
@@ -971,6 +973,14 @@ static inline int handle_mm_fault(struct mm_struct *mm, | |||
971 | BUG(); | 973 | BUG(); |
972 | return VM_FAULT_SIGBUS; | 974 | return VM_FAULT_SIGBUS; |
973 | } | 975 | } |
976 | static inline int fixup_user_fault(struct task_struct *tsk, | ||
977 | struct mm_struct *mm, unsigned long address, | ||
978 | unsigned int fault_flags) | ||
979 | { | ||
980 | /* should never happen if there's no MMU */ | ||
981 | BUG(); | ||
982 | return -EFAULT; | ||
983 | } | ||
974 | #endif | 984 | #endif |
975 | 985 | ||
976 | extern int make_pages_present(unsigned long addr, unsigned long end); | 986 | extern int make_pages_present(unsigned long addr, unsigned long end); |
@@ -988,8 +998,6 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
988 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | 998 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, |
989 | struct page **pages); | 999 | struct page **pages); |
990 | struct page *get_dump_page(unsigned long addr); | 1000 | struct page *get_dump_page(unsigned long addr); |
991 | extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, | ||
992 | unsigned long address, unsigned int fault_flags); | ||
993 | 1001 | ||
994 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); | 1002 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); |
995 | extern void do_invalidatepage(struct page *page, unsigned long offset); | 1003 | extern void do_invalidatepage(struct page *page, unsigned long offset); |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 0f83858147a6..1d09562ccf73 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
@@ -56,8 +56,6 @@ struct mmc_ios { | |||
56 | #define MMC_TIMING_UHS_SDR104 4 | 56 | #define MMC_TIMING_UHS_SDR104 4 |
57 | #define MMC_TIMING_UHS_DDR50 5 | 57 | #define MMC_TIMING_UHS_DDR50 5 |
58 | 58 | ||
59 | unsigned char ddr; /* dual data rate used */ | ||
60 | |||
61 | #define MMC_SDR_MODE 0 | 59 | #define MMC_SDR_MODE 0 |
62 | #define MMC_1_2V_DDR_MODE 1 | 60 | #define MMC_1_2V_DDR_MODE 1 |
63 | #define MMC_1_8V_DDR_MODE 2 | 61 | #define MMC_1_8V_DDR_MODE 2 |
diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 2e17c5dbdcb8..180540a84d37 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h | |||
@@ -29,7 +29,7 @@ | |||
29 | #define MAX_LINKS 32 | 29 | #define MAX_LINKS 32 |
30 | 30 | ||
31 | struct sockaddr_nl { | 31 | struct sockaddr_nl { |
32 | sa_family_t nl_family; /* AF_NETLINK */ | 32 | __kernel_sa_family_t nl_family; /* AF_NETLINK */ |
33 | unsigned short nl_pad; /* zero */ | 33 | unsigned short nl_pad; /* zero */ |
34 | __u32 nl_pid; /* port ID */ | 34 | __u32 nl_pid; /* port ID */ |
35 | __u32 nl_groups; /* multicast groups mask */ | 35 | __u32 nl_groups; /* multicast groups mask */ |
diff --git a/include/linux/of.h b/include/linux/of.h index 0085bb01c041..9180dc5cb00b 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
@@ -256,6 +256,13 @@ static inline int of_property_read_string(struct device_node *np, | |||
256 | return -ENOSYS; | 256 | return -ENOSYS; |
257 | } | 257 | } |
258 | 258 | ||
259 | static inline const void *of_get_property(const struct device_node *node, | ||
260 | const char *name, | ||
261 | int *lenp) | ||
262 | { | ||
263 | return NULL; | ||
264 | } | ||
265 | |||
259 | #endif /* CONFIG_OF */ | 266 | #endif /* CONFIG_OF */ |
260 | 267 | ||
261 | static inline int of_property_read_u32(const struct device_node *np, | 268 | static inline int of_property_read_u32(const struct device_node *np, |
diff --git a/include/linux/pci.h b/include/linux/pci.h index f27893b3b724..8c230cbcbb48 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -251,7 +251,8 @@ struct pci_dev { | |||
251 | u8 revision; /* PCI revision, low byte of class word */ | 251 | u8 revision; /* PCI revision, low byte of class word */ |
252 | u8 hdr_type; /* PCI header type (`multi' flag masked out) */ | 252 | u8 hdr_type; /* PCI header type (`multi' flag masked out) */ |
253 | u8 pcie_cap; /* PCI-E capability offset */ | 253 | u8 pcie_cap; /* PCI-E capability offset */ |
254 | u8 pcie_type; /* PCI-E device/port type */ | 254 | u8 pcie_type:4; /* PCI-E device/port type */ |
255 | u8 pcie_mpss:3; /* PCI-E Max Payload Size Supported */ | ||
255 | u8 rom_base_reg; /* which config register controls the ROM */ | 256 | u8 rom_base_reg; /* which config register controls the ROM */ |
256 | u8 pin; /* which interrupt pin this device uses */ | 257 | u8 pin; /* which interrupt pin this device uses */ |
257 | 258 | ||
@@ -617,6 +618,16 @@ struct pci_driver { | |||
617 | /* these external functions are only available when PCI support is enabled */ | 618 | /* these external functions are only available when PCI support is enabled */ |
618 | #ifdef CONFIG_PCI | 619 | #ifdef CONFIG_PCI |
619 | 620 | ||
621 | extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss); | ||
622 | |||
623 | enum pcie_bus_config_types { | ||
624 | PCIE_BUS_PERFORMANCE, | ||
625 | PCIE_BUS_SAFE, | ||
626 | PCIE_BUS_PEER2PEER, | ||
627 | }; | ||
628 | |||
629 | extern enum pcie_bus_config_types pcie_bus_config; | ||
630 | |||
620 | extern struct bus_type pci_bus_type; | 631 | extern struct bus_type pci_bus_type; |
621 | 632 | ||
622 | /* Do NOT directly access these two variables, unless you are arch specific pci | 633 | /* Do NOT directly access these two variables, unless you are arch specific pci |
@@ -796,10 +807,13 @@ int pcix_get_mmrbc(struct pci_dev *dev); | |||
796 | int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc); | 807 | int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc); |
797 | int pcie_get_readrq(struct pci_dev *dev); | 808 | int pcie_get_readrq(struct pci_dev *dev); |
798 | int pcie_set_readrq(struct pci_dev *dev, int rq); | 809 | int pcie_set_readrq(struct pci_dev *dev, int rq); |
810 | int pcie_get_mps(struct pci_dev *dev); | ||
811 | int pcie_set_mps(struct pci_dev *dev, int mps); | ||
799 | int __pci_reset_function(struct pci_dev *dev); | 812 | int __pci_reset_function(struct pci_dev *dev); |
800 | int pci_reset_function(struct pci_dev *dev); | 813 | int pci_reset_function(struct pci_dev *dev); |
801 | void pci_update_resource(struct pci_dev *dev, int resno); | 814 | void pci_update_resource(struct pci_dev *dev, int resno); |
802 | int __must_check pci_assign_resource(struct pci_dev *dev, int i); | 815 | int __must_check pci_assign_resource(struct pci_dev *dev, int i); |
816 | int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); | ||
803 | int pci_select_bars(struct pci_dev *dev, unsigned long flags); | 817 | int pci_select_bars(struct pci_dev *dev, unsigned long flags); |
804 | 818 | ||
805 | /* ROM control related routines */ | 819 | /* ROM control related routines */ |
diff --git a/include/linux/personality.h b/include/linux/personality.h index eec3bae164d4..8fc7dd1a57ff 100644 --- a/include/linux/personality.h +++ b/include/linux/personality.h | |||
@@ -22,6 +22,7 @@ extern int __set_personality(unsigned int); | |||
22 | * These occupy the top three bytes. | 22 | * These occupy the top three bytes. |
23 | */ | 23 | */ |
24 | enum { | 24 | enum { |
25 | UNAME26 = 0x0020000, | ||
25 | ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ | 26 | ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ |
26 | FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors | 27 | FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors |
27 | * (signal handling) | 28 | * (signal handling) |
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 21097cb086fe..f9ec1736a116 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h | |||
@@ -72,8 +72,6 @@ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | |||
72 | extern void pm_genpd_init(struct generic_pm_domain *genpd, | 72 | extern void pm_genpd_init(struct generic_pm_domain *genpd, |
73 | struct dev_power_governor *gov, bool is_off); | 73 | struct dev_power_governor *gov, bool is_off); |
74 | extern int pm_genpd_poweron(struct generic_pm_domain *genpd); | 74 | extern int pm_genpd_poweron(struct generic_pm_domain *genpd); |
75 | extern void pm_genpd_poweroff_unused(void); | ||
76 | extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd); | ||
77 | #else | 75 | #else |
78 | static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, | 76 | static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, |
79 | struct device *dev) | 77 | struct device *dev) |
@@ -101,8 +99,14 @@ static inline int pm_genpd_poweron(struct generic_pm_domain *genpd) | |||
101 | { | 99 | { |
102 | return -ENOSYS; | 100 | return -ENOSYS; |
103 | } | 101 | } |
104 | static inline void pm_genpd_poweroff_unused(void) {} | 102 | #endif |
103 | |||
104 | #ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME | ||
105 | extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd); | ||
106 | extern void pm_genpd_poweroff_unused(void); | ||
107 | #else | ||
105 | static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {} | 108 | static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {} |
109 | static inline void pm_genpd_poweroff_unused(void) {} | ||
106 | #endif | 110 | #endif |
107 | 111 | ||
108 | #endif /* _LINUX_PM_DOMAIN_H */ | 112 | #endif /* _LINUX_PM_DOMAIN_H */ |
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h index 5e3e25a3c9c3..63d2df43e61a 100644 --- a/include/linux/pwm_backlight.h +++ b/include/linux/pwm_backlight.h | |||
@@ -14,6 +14,7 @@ struct platform_pwm_backlight_data { | |||
14 | unsigned int pwm_period_ns; | 14 | unsigned int pwm_period_ns; |
15 | int (*init)(struct device *dev); | 15 | int (*init)(struct device *dev); |
16 | int (*notify)(struct device *dev, int brightness); | 16 | int (*notify)(struct device *dev, int brightness); |
17 | void (*notify_after)(struct device *dev, int brightness); | ||
17 | void (*exit)(struct device *dev); | 18 | void (*exit)(struct device *dev); |
18 | int (*check_fb)(struct device *dev, struct fb_info *info); | 19 | int (*check_fb)(struct device *dev, struct fb_info *info); |
19 | }; | 20 | }; |
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h index 9026b30238f3..218168a2b5e9 100644 --- a/include/linux/rio_regs.h +++ b/include/linux/rio_regs.h | |||
@@ -36,12 +36,12 @@ | |||
36 | #define RIO_PEF_PROCESSOR 0x20000000 /* [I] Processor */ | 36 | #define RIO_PEF_PROCESSOR 0x20000000 /* [I] Processor */ |
37 | #define RIO_PEF_SWITCH 0x10000000 /* [I] Switch */ | 37 | #define RIO_PEF_SWITCH 0x10000000 /* [I] Switch */ |
38 | #define RIO_PEF_MULTIPORT 0x08000000 /* [VI, 2.1] Multiport */ | 38 | #define RIO_PEF_MULTIPORT 0x08000000 /* [VI, 2.1] Multiport */ |
39 | #define RIO_PEF_INB_MBOX 0x00f00000 /* [II] Mailboxes */ | 39 | #define RIO_PEF_INB_MBOX 0x00f00000 /* [II, <= 1.2] Mailboxes */ |
40 | #define RIO_PEF_INB_MBOX0 0x00800000 /* [II] Mailbox 0 */ | 40 | #define RIO_PEF_INB_MBOX0 0x00800000 /* [II, <= 1.2] Mailbox 0 */ |
41 | #define RIO_PEF_INB_MBOX1 0x00400000 /* [II] Mailbox 1 */ | 41 | #define RIO_PEF_INB_MBOX1 0x00400000 /* [II, <= 1.2] Mailbox 1 */ |
42 | #define RIO_PEF_INB_MBOX2 0x00200000 /* [II] Mailbox 2 */ | 42 | #define RIO_PEF_INB_MBOX2 0x00200000 /* [II, <= 1.2] Mailbox 2 */ |
43 | #define RIO_PEF_INB_MBOX3 0x00100000 /* [II] Mailbox 3 */ | 43 | #define RIO_PEF_INB_MBOX3 0x00100000 /* [II, <= 1.2] Mailbox 3 */ |
44 | #define RIO_PEF_INB_DOORBELL 0x00080000 /* [II] Doorbells */ | 44 | #define RIO_PEF_INB_DOORBELL 0x00080000 /* [II, <= 1.2] Doorbells */ |
45 | #define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */ | 45 | #define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */ |
46 | #define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */ | 46 | #define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */ |
47 | #define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */ | 47 | #define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */ |
@@ -102,7 +102,7 @@ | |||
102 | #define RIO_SWITCH_RT_LIMIT 0x34 /* [III, 1.3] Switch Route Table Destination ID Limit CAR */ | 102 | #define RIO_SWITCH_RT_LIMIT 0x34 /* [III, 1.3] Switch Route Table Destination ID Limit CAR */ |
103 | #define RIO_RT_MAX_DESTID 0x0000ffff | 103 | #define RIO_RT_MAX_DESTID 0x0000ffff |
104 | 104 | ||
105 | #define RIO_MBOX_CSR 0x40 /* [II] Mailbox CSR */ | 105 | #define RIO_MBOX_CSR 0x40 /* [II, <= 1.2] Mailbox CSR */ |
106 | #define RIO_MBOX0_AVAIL 0x80000000 /* [II] Mbox 0 avail */ | 106 | #define RIO_MBOX0_AVAIL 0x80000000 /* [II] Mbox 0 avail */ |
107 | #define RIO_MBOX0_FULL 0x40000000 /* [II] Mbox 0 full */ | 107 | #define RIO_MBOX0_FULL 0x40000000 /* [II] Mbox 0 full */ |
108 | #define RIO_MBOX0_EMPTY 0x20000000 /* [II] Mbox 0 empty */ | 108 | #define RIO_MBOX0_EMPTY 0x20000000 /* [II] Mbox 0 empty */ |
@@ -128,8 +128,8 @@ | |||
128 | #define RIO_MBOX3_FAIL 0x00000008 /* [II] Mbox 3 fail */ | 128 | #define RIO_MBOX3_FAIL 0x00000008 /* [II] Mbox 3 fail */ |
129 | #define RIO_MBOX3_ERROR 0x00000004 /* [II] Mbox 3 error */ | 129 | #define RIO_MBOX3_ERROR 0x00000004 /* [II] Mbox 3 error */ |
130 | 130 | ||
131 | #define RIO_WRITE_PORT_CSR 0x44 /* [I] Write Port CSR */ | 131 | #define RIO_WRITE_PORT_CSR 0x44 /* [I, <= 1.2] Write Port CSR */ |
132 | #define RIO_DOORBELL_CSR 0x44 /* [II] Doorbell CSR */ | 132 | #define RIO_DOORBELL_CSR 0x44 /* [II, <= 1.2] Doorbell CSR */ |
133 | #define RIO_DOORBELL_AVAIL 0x80000000 /* [II] Doorbell avail */ | 133 | #define RIO_DOORBELL_AVAIL 0x80000000 /* [II] Doorbell avail */ |
134 | #define RIO_DOORBELL_FULL 0x40000000 /* [II] Doorbell full */ | 134 | #define RIO_DOORBELL_FULL 0x40000000 /* [II] Doorbell full */ |
135 | #define RIO_DOORBELL_EMPTY 0x20000000 /* [II] Doorbell empty */ | 135 | #define RIO_DOORBELL_EMPTY 0x20000000 /* [II] Doorbell empty */ |
diff --git a/include/linux/rtc.h b/include/linux/rtc.h index b27ebea25660..93f4d035076b 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h | |||
@@ -97,6 +97,9 @@ struct rtc_pll_info { | |||
97 | #define RTC_AF 0x20 /* Alarm interrupt */ | 97 | #define RTC_AF 0x20 /* Alarm interrupt */ |
98 | #define RTC_UF 0x10 /* Update interrupt for 1Hz RTC */ | 98 | #define RTC_UF 0x10 /* Update interrupt for 1Hz RTC */ |
99 | 99 | ||
100 | |||
101 | #define RTC_MAX_FREQ 8192 | ||
102 | |||
100 | #ifdef __KERNEL__ | 103 | #ifdef __KERNEL__ |
101 | 104 | ||
102 | #include <linux/types.h> | 105 | #include <linux/types.h> |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 20b03bf94748..4ac2c0578e0f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1767,6 +1767,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1767 | #define PF_DUMPCORE 0x00000200 /* dumped core */ | 1767 | #define PF_DUMPCORE 0x00000200 /* dumped core */ |
1768 | #define PF_SIGNALED 0x00000400 /* killed by a signal */ | 1768 | #define PF_SIGNALED 0x00000400 /* killed by a signal */ |
1769 | #define PF_MEMALLOC 0x00000800 /* Allocating memory */ | 1769 | #define PF_MEMALLOC 0x00000800 /* Allocating memory */ |
1770 | #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ | ||
1770 | #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ | 1771 | #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ |
1771 | #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ | 1772 | #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ |
1772 | #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ | 1773 | #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ |
diff --git a/include/linux/socket.h b/include/linux/socket.h index e17f82266639..d0e77f607a79 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h | |||
@@ -8,8 +8,10 @@ | |||
8 | #define _K_SS_ALIGNSIZE (__alignof__ (struct sockaddr *)) | 8 | #define _K_SS_ALIGNSIZE (__alignof__ (struct sockaddr *)) |
9 | /* Implementation specific desired alignment */ | 9 | /* Implementation specific desired alignment */ |
10 | 10 | ||
11 | typedef unsigned short __kernel_sa_family_t; | ||
12 | |||
11 | struct __kernel_sockaddr_storage { | 13 | struct __kernel_sockaddr_storage { |
12 | unsigned short ss_family; /* address family */ | 14 | __kernel_sa_family_t ss_family; /* address family */ |
13 | /* Following field(s) are implementation specific */ | 15 | /* Following field(s) are implementation specific */ |
14 | char __data[_K_SS_MAXSIZE - sizeof(unsigned short)]; | 16 | char __data[_K_SS_MAXSIZE - sizeof(unsigned short)]; |
15 | /* space to achieve desired size, */ | 17 | /* space to achieve desired size, */ |
@@ -35,7 +37,7 @@ struct seq_file; | |||
35 | extern void socket_seq_show(struct seq_file *seq); | 37 | extern void socket_seq_show(struct seq_file *seq); |
36 | #endif | 38 | #endif |
37 | 39 | ||
38 | typedef unsigned short sa_family_t; | 40 | typedef __kernel_sa_family_t sa_family_t; |
39 | 41 | ||
40 | /* | 42 | /* |
41 | * 1003.1g requires sa_family_t and that sa_data is char. | 43 | * 1003.1g requires sa_family_t and that sa_data is char. |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index f1bfa12ea246..2b8963ff0f35 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -12,15 +12,6 @@ | |||
12 | * | 12 | * |
13 | * (thresh - thresh/DIRTY_FULL_SCOPE, thresh) | 13 | * (thresh - thresh/DIRTY_FULL_SCOPE, thresh) |
14 | * | 14 | * |
15 | * The 1/16 region above the global dirty limit will be put to maximum pauses: | ||
16 | * | ||
17 | * (limit, limit + limit/DIRTY_MAXPAUSE_AREA) | ||
18 | * | ||
19 | * The 1/16 region above the max-pause region, dirty exceeded bdi's will be put | ||
20 | * to loops: | ||
21 | * | ||
22 | * (limit + limit/DIRTY_MAXPAUSE_AREA, limit + limit/DIRTY_PASSGOOD_AREA) | ||
23 | * | ||
24 | * Further beyond, all dirtier tasks will enter a loop waiting (possibly long | 15 | * Further beyond, all dirtier tasks will enter a loop waiting (possibly long |
25 | * time) for the dirty pages to drop, unless written enough pages. | 16 | * time) for the dirty pages to drop, unless written enough pages. |
26 | * | 17 | * |
@@ -31,8 +22,6 @@ | |||
31 | */ | 22 | */ |
32 | #define DIRTY_SCOPE 8 | 23 | #define DIRTY_SCOPE 8 |
33 | #define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2) | 24 | #define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2) |
34 | #define DIRTY_MAXPAUSE_AREA 16 | ||
35 | #define DIRTY_PASSGOOD_AREA 8 | ||
36 | 25 | ||
37 | /* | 26 | /* |
38 | * 4MB minimal write chunk size | 27 | * 4MB minimal write chunk size |
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index caaff5f5f39f..b897d6e6d0a5 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h | |||
@@ -238,7 +238,7 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk) | |||
238 | { | 238 | { |
239 | __u8 flags = 0; | 239 | __u8 flags = 0; |
240 | 240 | ||
241 | if (inet_sk(sk)->transparent) | 241 | if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl) |
242 | flags |= FLOWI_FLAG_ANYSRC; | 242 | flags |= FLOWI_FLAG_ANYSRC; |
243 | if (sk->sk_protocol == IPPROTO_TCP) | 243 | if (sk->sk_protocol == IPPROTO_TCP) |
244 | flags |= FLOWI_FLAG_PRECOW_METRICS; | 244 | flags |= FLOWI_FLAG_PRECOW_METRICS; |
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h index 2de8fe907596..126c675f4f14 100644 --- a/include/target/target_core_fabric_ops.h +++ b/include/target/target_core_fabric_ops.h | |||
@@ -27,6 +27,12 @@ struct target_core_fabric_ops { | |||
27 | int (*tpg_check_demo_mode_cache)(struct se_portal_group *); | 27 | int (*tpg_check_demo_mode_cache)(struct se_portal_group *); |
28 | int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); | 28 | int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); |
29 | int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *); | 29 | int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *); |
30 | /* | ||
31 | * Optionally used by fabrics to allow demo-mode login, but not | ||
32 | * expose any TPG LUNs, and return 'not connected' in standard | ||
33 | * inquiry response | ||
34 | */ | ||
35 | int (*tpg_check_demo_mode_login_only)(struct se_portal_group *); | ||
30 | struct se_node_acl *(*tpg_alloc_fabric_acl)( | 36 | struct se_node_acl *(*tpg_alloc_fabric_acl)( |
31 | struct se_portal_group *); | 37 | struct se_portal_group *); |
32 | void (*tpg_release_fabric_acl)(struct se_portal_group *, | 38 | void (*tpg_release_fabric_acl)(struct se_portal_group *, |
diff --git a/include/trace/events/block.h b/include/trace/events/block.h index bf366547da25..05c5e61f0a7c 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h | |||
@@ -8,6 +8,8 @@ | |||
8 | #include <linux/blkdev.h> | 8 | #include <linux/blkdev.h> |
9 | #include <linux/tracepoint.h> | 9 | #include <linux/tracepoint.h> |
10 | 10 | ||
11 | #define RWBS_LEN 8 | ||
12 | |||
11 | DECLARE_EVENT_CLASS(block_rq_with_error, | 13 | DECLARE_EVENT_CLASS(block_rq_with_error, |
12 | 14 | ||
13 | TP_PROTO(struct request_queue *q, struct request *rq), | 15 | TP_PROTO(struct request_queue *q, struct request *rq), |
@@ -19,7 +21,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error, | |||
19 | __field( sector_t, sector ) | 21 | __field( sector_t, sector ) |
20 | __field( unsigned int, nr_sector ) | 22 | __field( unsigned int, nr_sector ) |
21 | __field( int, errors ) | 23 | __field( int, errors ) |
22 | __array( char, rwbs, 6 ) | 24 | __array( char, rwbs, RWBS_LEN ) |
23 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | 25 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) |
24 | ), | 26 | ), |
25 | 27 | ||
@@ -104,7 +106,7 @@ DECLARE_EVENT_CLASS(block_rq, | |||
104 | __field( sector_t, sector ) | 106 | __field( sector_t, sector ) |
105 | __field( unsigned int, nr_sector ) | 107 | __field( unsigned int, nr_sector ) |
106 | __field( unsigned int, bytes ) | 108 | __field( unsigned int, bytes ) |
107 | __array( char, rwbs, 6 ) | 109 | __array( char, rwbs, RWBS_LEN ) |
108 | __array( char, comm, TASK_COMM_LEN ) | 110 | __array( char, comm, TASK_COMM_LEN ) |
109 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | 111 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) |
110 | ), | 112 | ), |
@@ -183,7 +185,7 @@ TRACE_EVENT(block_bio_bounce, | |||
183 | __field( dev_t, dev ) | 185 | __field( dev_t, dev ) |
184 | __field( sector_t, sector ) | 186 | __field( sector_t, sector ) |
185 | __field( unsigned int, nr_sector ) | 187 | __field( unsigned int, nr_sector ) |
186 | __array( char, rwbs, 6 ) | 188 | __array( char, rwbs, RWBS_LEN ) |
187 | __array( char, comm, TASK_COMM_LEN ) | 189 | __array( char, comm, TASK_COMM_LEN ) |
188 | ), | 190 | ), |
189 | 191 | ||
@@ -222,7 +224,7 @@ TRACE_EVENT(block_bio_complete, | |||
222 | __field( sector_t, sector ) | 224 | __field( sector_t, sector ) |
223 | __field( unsigned, nr_sector ) | 225 | __field( unsigned, nr_sector ) |
224 | __field( int, error ) | 226 | __field( int, error ) |
225 | __array( char, rwbs, 6 ) | 227 | __array( char, rwbs, RWBS_LEN) |
226 | ), | 228 | ), |
227 | 229 | ||
228 | TP_fast_assign( | 230 | TP_fast_assign( |
@@ -249,7 +251,7 @@ DECLARE_EVENT_CLASS(block_bio, | |||
249 | __field( dev_t, dev ) | 251 | __field( dev_t, dev ) |
250 | __field( sector_t, sector ) | 252 | __field( sector_t, sector ) |
251 | __field( unsigned int, nr_sector ) | 253 | __field( unsigned int, nr_sector ) |
252 | __array( char, rwbs, 6 ) | 254 | __array( char, rwbs, RWBS_LEN ) |
253 | __array( char, comm, TASK_COMM_LEN ) | 255 | __array( char, comm, TASK_COMM_LEN ) |
254 | ), | 256 | ), |
255 | 257 | ||
@@ -321,7 +323,7 @@ DECLARE_EVENT_CLASS(block_get_rq, | |||
321 | __field( dev_t, dev ) | 323 | __field( dev_t, dev ) |
322 | __field( sector_t, sector ) | 324 | __field( sector_t, sector ) |
323 | __field( unsigned int, nr_sector ) | 325 | __field( unsigned int, nr_sector ) |
324 | __array( char, rwbs, 6 ) | 326 | __array( char, rwbs, RWBS_LEN ) |
325 | __array( char, comm, TASK_COMM_LEN ) | 327 | __array( char, comm, TASK_COMM_LEN ) |
326 | ), | 328 | ), |
327 | 329 | ||
@@ -456,7 +458,7 @@ TRACE_EVENT(block_split, | |||
456 | __field( dev_t, dev ) | 458 | __field( dev_t, dev ) |
457 | __field( sector_t, sector ) | 459 | __field( sector_t, sector ) |
458 | __field( sector_t, new_sector ) | 460 | __field( sector_t, new_sector ) |
459 | __array( char, rwbs, 6 ) | 461 | __array( char, rwbs, RWBS_LEN ) |
460 | __array( char, comm, TASK_COMM_LEN ) | 462 | __array( char, comm, TASK_COMM_LEN ) |
461 | ), | 463 | ), |
462 | 464 | ||
@@ -498,7 +500,7 @@ TRACE_EVENT(block_bio_remap, | |||
498 | __field( unsigned int, nr_sector ) | 500 | __field( unsigned int, nr_sector ) |
499 | __field( dev_t, old_dev ) | 501 | __field( dev_t, old_dev ) |
500 | __field( sector_t, old_sector ) | 502 | __field( sector_t, old_sector ) |
501 | __array( char, rwbs, 6 ) | 503 | __array( char, rwbs, RWBS_LEN) |
502 | ), | 504 | ), |
503 | 505 | ||
504 | TP_fast_assign( | 506 | TP_fast_assign( |
@@ -542,7 +544,7 @@ TRACE_EVENT(block_rq_remap, | |||
542 | __field( unsigned int, nr_sector ) | 544 | __field( unsigned int, nr_sector ) |
543 | __field( dev_t, old_dev ) | 545 | __field( dev_t, old_dev ) |
544 | __field( sector_t, old_sector ) | 546 | __field( sector_t, old_sector ) |
545 | __array( char, rwbs, 6 ) | 547 | __array( char, rwbs, RWBS_LEN) |
546 | ), | 548 | ), |
547 | 549 | ||
548 | TP_fast_assign( | 550 | TP_fast_assign( |
diff --git a/kernel/Makefile b/kernel/Makefile index d06467fc8f7c..eca595e2fd52 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -10,7 +10,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ | |||
10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ | 10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ |
11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ | 12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ |
13 | async.o range.o jump_label.o | 13 | async.o range.o |
14 | obj-y += groups.o | 14 | obj-y += groups.o |
15 | 15 | ||
16 | ifdef CONFIG_FUNCTION_TRACER | 16 | ifdef CONFIG_FUNCTION_TRACER |
@@ -107,6 +107,7 @@ obj-$(CONFIG_PERF_EVENTS) += events/ | |||
107 | obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o | 107 | obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o |
108 | obj-$(CONFIG_PADATA) += padata.o | 108 | obj-$(CONFIG_PADATA) += padata.o |
109 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | 109 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
110 | obj-$(CONFIG_JUMP_LABEL) += jump_label.o | ||
110 | 111 | ||
111 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) | 112 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) |
112 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is | 113 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is |
diff --git a/kernel/cred.c b/kernel/cred.c index 174fa84eca30..8ef31f53c44c 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -508,10 +508,8 @@ int commit_creds(struct cred *new) | |||
508 | key_fsgid_changed(task); | 508 | key_fsgid_changed(task); |
509 | 509 | ||
510 | /* do it | 510 | /* do it |
511 | * - What if a process setreuid()'s and this brings the | 511 | * RLIMIT_NPROC limits on user->processes have already been checked |
512 | * new uid over his NPROC rlimit? We can check this now | 512 | * in set_user(). |
513 | * cheaply with the new uid cache, so if it matters | ||
514 | * we should be checking for it. -DaveM | ||
515 | */ | 513 | */ |
516 | alter_cred_subscribers(new, 2); | 514 | alter_cred_subscribers(new, 2); |
517 | if (new->user != old->user) | 515 | if (new->user != old->user) |
diff --git a/kernel/fork.c b/kernel/fork.c index e7ceaca89609..8e6b6f4fb272 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1111,6 +1111,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1111 | p->real_cred->user != INIT_USER) | 1111 | p->real_cred->user != INIT_USER) |
1112 | goto bad_fork_free; | 1112 | goto bad_fork_free; |
1113 | } | 1113 | } |
1114 | current->flags &= ~PF_NPROC_EXCEEDED; | ||
1114 | 1115 | ||
1115 | retval = copy_creds(p, clone_flags); | 1116 | retval = copy_creds(p, clone_flags); |
1116 | if (retval < 0) | 1117 | if (retval < 0) |
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c index 3a2cab407b93..e38544dddb18 100644 --- a/kernel/irq/generic-chip.c +++ b/kernel/irq/generic-chip.c | |||
@@ -246,7 +246,7 @@ void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, | |||
246 | gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask); | 246 | gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask); |
247 | 247 | ||
248 | for (i = gc->irq_base; msk; msk >>= 1, i++) { | 248 | for (i = gc->irq_base; msk; msk >>= 1, i++) { |
249 | if (!msk & 0x01) | 249 | if (!(msk & 0x01)) |
250 | continue; | 250 | continue; |
251 | 251 | ||
252 | if (flags & IRQ_GC_INIT_NESTED_LOCK) | 252 | if (flags & IRQ_GC_INIT_NESTED_LOCK) |
@@ -301,7 +301,7 @@ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, | |||
301 | raw_spin_unlock(&gc_lock); | 301 | raw_spin_unlock(&gc_lock); |
302 | 302 | ||
303 | for (; msk; msk >>= 1, i++) { | 303 | for (; msk; msk >>= 1, i++) { |
304 | if (!msk & 0x01) | 304 | if (!(msk & 0x01)) |
305 | continue; | 305 | continue; |
306 | 306 | ||
307 | /* Remove handler first. That will mask the irq line */ | 307 | /* Remove handler first. That will mask the irq line */ |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 4c60a50e66b2..039b889ea053 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -70,7 +70,8 @@ static inline void desc_smp_init(struct irq_desc *desc, int node) { } | |||
70 | static inline int desc_node(struct irq_desc *desc) { return 0; } | 70 | static inline int desc_node(struct irq_desc *desc) { return 0; } |
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | 73 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, |
74 | struct module *owner) | ||
74 | { | 75 | { |
75 | int cpu; | 76 | int cpu; |
76 | 77 | ||
@@ -86,6 +87,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | |||
86 | desc->irq_count = 0; | 87 | desc->irq_count = 0; |
87 | desc->irqs_unhandled = 0; | 88 | desc->irqs_unhandled = 0; |
88 | desc->name = NULL; | 89 | desc->name = NULL; |
90 | desc->owner = owner; | ||
89 | for_each_possible_cpu(cpu) | 91 | for_each_possible_cpu(cpu) |
90 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; | 92 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; |
91 | desc_smp_init(desc, node); | 93 | desc_smp_init(desc, node); |
@@ -128,7 +130,7 @@ static void free_masks(struct irq_desc *desc) | |||
128 | static inline void free_masks(struct irq_desc *desc) { } | 130 | static inline void free_masks(struct irq_desc *desc) { } |
129 | #endif | 131 | #endif |
130 | 132 | ||
131 | static struct irq_desc *alloc_desc(int irq, int node) | 133 | static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) |
132 | { | 134 | { |
133 | struct irq_desc *desc; | 135 | struct irq_desc *desc; |
134 | gfp_t gfp = GFP_KERNEL; | 136 | gfp_t gfp = GFP_KERNEL; |
@@ -147,7 +149,7 @@ static struct irq_desc *alloc_desc(int irq, int node) | |||
147 | raw_spin_lock_init(&desc->lock); | 149 | raw_spin_lock_init(&desc->lock); |
148 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 150 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
149 | 151 | ||
150 | desc_set_defaults(irq, desc, node); | 152 | desc_set_defaults(irq, desc, node, owner); |
151 | 153 | ||
152 | return desc; | 154 | return desc; |
153 | 155 | ||
@@ -173,13 +175,14 @@ static void free_desc(unsigned int irq) | |||
173 | kfree(desc); | 175 | kfree(desc); |
174 | } | 176 | } |
175 | 177 | ||
176 | static int alloc_descs(unsigned int start, unsigned int cnt, int node) | 178 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, |
179 | struct module *owner) | ||
177 | { | 180 | { |
178 | struct irq_desc *desc; | 181 | struct irq_desc *desc; |
179 | int i; | 182 | int i; |
180 | 183 | ||
181 | for (i = 0; i < cnt; i++) { | 184 | for (i = 0; i < cnt; i++) { |
182 | desc = alloc_desc(start + i, node); | 185 | desc = alloc_desc(start + i, node, owner); |
183 | if (!desc) | 186 | if (!desc) |
184 | goto err; | 187 | goto err; |
185 | mutex_lock(&sparse_irq_lock); | 188 | mutex_lock(&sparse_irq_lock); |
@@ -227,7 +230,7 @@ int __init early_irq_init(void) | |||
227 | nr_irqs = initcnt; | 230 | nr_irqs = initcnt; |
228 | 231 | ||
229 | for (i = 0; i < initcnt; i++) { | 232 | for (i = 0; i < initcnt; i++) { |
230 | desc = alloc_desc(i, node); | 233 | desc = alloc_desc(i, node, NULL); |
231 | set_bit(i, allocated_irqs); | 234 | set_bit(i, allocated_irqs); |
232 | irq_insert_desc(i, desc); | 235 | irq_insert_desc(i, desc); |
233 | } | 236 | } |
@@ -261,7 +264,7 @@ int __init early_irq_init(void) | |||
261 | alloc_masks(&desc[i], GFP_KERNEL, node); | 264 | alloc_masks(&desc[i], GFP_KERNEL, node); |
262 | raw_spin_lock_init(&desc[i].lock); | 265 | raw_spin_lock_init(&desc[i].lock); |
263 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 266 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
264 | desc_set_defaults(i, &desc[i], node); | 267 | desc_set_defaults(i, &desc[i], node, NULL); |
265 | } | 268 | } |
266 | return arch_early_irq_init(); | 269 | return arch_early_irq_init(); |
267 | } | 270 | } |
@@ -276,8 +279,16 @@ static void free_desc(unsigned int irq) | |||
276 | dynamic_irq_cleanup(irq); | 279 | dynamic_irq_cleanup(irq); |
277 | } | 280 | } |
278 | 281 | ||
279 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) | 282 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, |
283 | struct module *owner) | ||
280 | { | 284 | { |
285 | u32 i; | ||
286 | |||
287 | for (i = 0; i < cnt; i++) { | ||
288 | struct irq_desc *desc = irq_to_desc(start + i); | ||
289 | |||
290 | desc->owner = owner; | ||
291 | } | ||
281 | return start; | 292 | return start; |
282 | } | 293 | } |
283 | 294 | ||
@@ -333,11 +344,13 @@ EXPORT_SYMBOL_GPL(irq_free_descs); | |||
333 | * @from: Start the search from this irq number | 344 | * @from: Start the search from this irq number |
334 | * @cnt: Number of consecutive irqs to allocate. | 345 | * @cnt: Number of consecutive irqs to allocate. |
335 | * @node: Preferred node on which the irq descriptor should be allocated | 346 | * @node: Preferred node on which the irq descriptor should be allocated |
347 | * @owner: Owning module (can be NULL) | ||
336 | * | 348 | * |
337 | * Returns the first irq number or error code | 349 | * Returns the first irq number or error code |
338 | */ | 350 | */ |
339 | int __ref | 351 | int __ref |
340 | irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) | 352 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
353 | struct module *owner) | ||
341 | { | 354 | { |
342 | int start, ret; | 355 | int start, ret; |
343 | 356 | ||
@@ -366,13 +379,13 @@ irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) | |||
366 | 379 | ||
367 | bitmap_set(allocated_irqs, start, cnt); | 380 | bitmap_set(allocated_irqs, start, cnt); |
368 | mutex_unlock(&sparse_irq_lock); | 381 | mutex_unlock(&sparse_irq_lock); |
369 | return alloc_descs(start, cnt, node); | 382 | return alloc_descs(start, cnt, node, owner); |
370 | 383 | ||
371 | err: | 384 | err: |
372 | mutex_unlock(&sparse_irq_lock); | 385 | mutex_unlock(&sparse_irq_lock); |
373 | return ret; | 386 | return ret; |
374 | } | 387 | } |
375 | EXPORT_SYMBOL_GPL(irq_alloc_descs); | 388 | EXPORT_SYMBOL_GPL(__irq_alloc_descs); |
376 | 389 | ||
377 | /** | 390 | /** |
378 | * irq_reserve_irqs - mark irqs allocated | 391 | * irq_reserve_irqs - mark irqs allocated |
@@ -440,7 +453,7 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
440 | unsigned long flags; | 453 | unsigned long flags; |
441 | 454 | ||
442 | raw_spin_lock_irqsave(&desc->lock, flags); | 455 | raw_spin_lock_irqsave(&desc->lock, flags); |
443 | desc_set_defaults(irq, desc, desc_node(desc)); | 456 | desc_set_defaults(irq, desc, desc_node(desc), NULL); |
444 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 457 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
445 | } | 458 | } |
446 | 459 | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0a7840aeb0fb..9b956fa20308 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -883,6 +883,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
883 | 883 | ||
884 | if (desc->irq_data.chip == &no_irq_chip) | 884 | if (desc->irq_data.chip == &no_irq_chip) |
885 | return -ENOSYS; | 885 | return -ENOSYS; |
886 | if (!try_module_get(desc->owner)) | ||
887 | return -ENODEV; | ||
886 | /* | 888 | /* |
887 | * Some drivers like serial.c use request_irq() heavily, | 889 | * Some drivers like serial.c use request_irq() heavily, |
888 | * so we have to be careful not to interfere with a | 890 | * so we have to be careful not to interfere with a |
@@ -906,8 +908,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
906 | */ | 908 | */ |
907 | nested = irq_settings_is_nested_thread(desc); | 909 | nested = irq_settings_is_nested_thread(desc); |
908 | if (nested) { | 910 | if (nested) { |
909 | if (!new->thread_fn) | 911 | if (!new->thread_fn) { |
910 | return -EINVAL; | 912 | ret = -EINVAL; |
913 | goto out_mput; | ||
914 | } | ||
911 | /* | 915 | /* |
912 | * Replace the primary handler which was provided from | 916 | * Replace the primary handler which was provided from |
913 | * the driver for non nested interrupt handling by the | 917 | * the driver for non nested interrupt handling by the |
@@ -929,8 +933,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
929 | 933 | ||
930 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | 934 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, |
931 | new->name); | 935 | new->name); |
932 | if (IS_ERR(t)) | 936 | if (IS_ERR(t)) { |
933 | return PTR_ERR(t); | 937 | ret = PTR_ERR(t); |
938 | goto out_mput; | ||
939 | } | ||
934 | /* | 940 | /* |
935 | * We keep the reference to the task struct even if | 941 | * We keep the reference to the task struct even if |
936 | * the thread dies to avoid that the interrupt code | 942 | * the thread dies to avoid that the interrupt code |
@@ -1095,6 +1101,8 @@ out_thread: | |||
1095 | kthread_stop(t); | 1101 | kthread_stop(t); |
1096 | put_task_struct(t); | 1102 | put_task_struct(t); |
1097 | } | 1103 | } |
1104 | out_mput: | ||
1105 | module_put(desc->owner); | ||
1098 | return ret; | 1106 | return ret; |
1099 | } | 1107 | } |
1100 | 1108 | ||
@@ -1203,6 +1211,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
1203 | put_task_struct(action->thread); | 1211 | put_task_struct(action->thread); |
1204 | } | 1212 | } |
1205 | 1213 | ||
1214 | module_put(desc->owner); | ||
1206 | return action; | 1215 | return action; |
1207 | } | 1216 | } |
1208 | 1217 | ||
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 8c24294e477f..91d67ce3a8d5 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -3111,7 +3111,13 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) | |||
3111 | if (!class) | 3111 | if (!class) |
3112 | class = look_up_lock_class(lock, 0); | 3112 | class = look_up_lock_class(lock, 0); |
3113 | 3113 | ||
3114 | if (DEBUG_LOCKS_WARN_ON(!class)) | 3114 | /* |
3115 | * If look_up_lock_class() failed to find a class, we're trying | ||
3116 | * to test if we hold a lock that has never yet been acquired. | ||
3117 | * Clearly if the lock hasn't been acquired _ever_, we're not | ||
3118 | * holding it either, so report failure. | ||
3119 | */ | ||
3120 | if (!class) | ||
3115 | return 0; | 3121 | return 0; |
3116 | 3122 | ||
3117 | if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) | 3123 | if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index b1914cb9095c..3744c594b19b 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -231,3 +231,7 @@ config PM_CLK | |||
231 | config PM_GENERIC_DOMAINS | 231 | config PM_GENERIC_DOMAINS |
232 | bool | 232 | bool |
233 | depends on PM | 233 | depends on PM |
234 | |||
235 | config PM_GENERIC_DOMAINS_RUNTIME | ||
236 | def_bool y | ||
237 | depends on PM_RUNTIME && PM_GENERIC_DOMAINS | ||
diff --git a/kernel/printk.c b/kernel/printk.c index 37dff3429adb..28a40d8171b8 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -318,8 +318,10 @@ static int check_syslog_permissions(int type, bool from_file) | |||
318 | return 0; | 318 | return 0; |
319 | /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */ | 319 | /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */ |
320 | if (capable(CAP_SYS_ADMIN)) { | 320 | if (capable(CAP_SYS_ADMIN)) { |
321 | WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN " | 321 | printk_once(KERN_WARNING "%s (%d): " |
322 | "but no CAP_SYSLOG (deprecated).\n"); | 322 | "Attempt to access syslog with CAP_SYS_ADMIN " |
323 | "but no CAP_SYSLOG (deprecated).\n", | ||
324 | current->comm, task_pid_nr(current)); | ||
323 | return 0; | 325 | return 0; |
324 | } | 326 | } |
325 | return -EPERM; | 327 | return -EPERM; |
@@ -1602,7 +1604,7 @@ static int __init printk_late_init(void) | |||
1602 | struct console *con; | 1604 | struct console *con; |
1603 | 1605 | ||
1604 | for_each_console(con) { | 1606 | for_each_console(con) { |
1605 | if (con->flags & CON_BOOT) { | 1607 | if (!keep_bootcon && con->flags & CON_BOOT) { |
1606 | printk(KERN_INFO "turn off boot console %s%d\n", | 1608 | printk(KERN_INFO "turn off boot console %s%d\n", |
1607 | con->name, con->index); | 1609 | con->name, con->index); |
1608 | unregister_console(con); | 1610 | unregister_console(con); |
diff --git a/kernel/sys.c b/kernel/sys.c index a101ba36c444..18ee1d2f6474 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -37,6 +37,8 @@ | |||
37 | #include <linux/fs_struct.h> | 37 | #include <linux/fs_struct.h> |
38 | #include <linux/gfp.h> | 38 | #include <linux/gfp.h> |
39 | #include <linux/syscore_ops.h> | 39 | #include <linux/syscore_ops.h> |
40 | #include <linux/version.h> | ||
41 | #include <linux/ctype.h> | ||
40 | 42 | ||
41 | #include <linux/compat.h> | 43 | #include <linux/compat.h> |
42 | #include <linux/syscalls.h> | 44 | #include <linux/syscalls.h> |
@@ -44,6 +46,8 @@ | |||
44 | #include <linux/user_namespace.h> | 46 | #include <linux/user_namespace.h> |
45 | 47 | ||
46 | #include <linux/kmsg_dump.h> | 48 | #include <linux/kmsg_dump.h> |
49 | /* Move somewhere else to avoid recompiling? */ | ||
50 | #include <generated/utsrelease.h> | ||
47 | 51 | ||
48 | #include <asm/uaccess.h> | 52 | #include <asm/uaccess.h> |
49 | #include <asm/io.h> | 53 | #include <asm/io.h> |
@@ -621,11 +625,18 @@ static int set_user(struct cred *new) | |||
621 | if (!new_user) | 625 | if (!new_user) |
622 | return -EAGAIN; | 626 | return -EAGAIN; |
623 | 627 | ||
628 | /* | ||
629 | * We don't fail in case of NPROC limit excess here because too many | ||
630 | * poorly written programs don't check set*uid() return code, assuming | ||
631 | * it never fails if called by root. We may still enforce NPROC limit | ||
632 | * for programs doing set*uid()+execve() by harmlessly deferring the | ||
633 | * failure to the execve() stage. | ||
634 | */ | ||
624 | if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) && | 635 | if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) && |
625 | new_user != INIT_USER) { | 636 | new_user != INIT_USER) |
626 | free_uid(new_user); | 637 | current->flags |= PF_NPROC_EXCEEDED; |
627 | return -EAGAIN; | 638 | else |
628 | } | 639 | current->flags &= ~PF_NPROC_EXCEEDED; |
629 | 640 | ||
630 | free_uid(new->user); | 641 | free_uid(new->user); |
631 | new->user = new_user; | 642 | new->user = new_user; |
@@ -1154,6 +1165,34 @@ DECLARE_RWSEM(uts_sem); | |||
1154 | #define override_architecture(name) 0 | 1165 | #define override_architecture(name) 0 |
1155 | #endif | 1166 | #endif |
1156 | 1167 | ||
1168 | /* | ||
1169 | * Work around broken programs that cannot handle "Linux 3.0". | ||
1170 | * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 | ||
1171 | */ | ||
1172 | static int override_release(char __user *release, int len) | ||
1173 | { | ||
1174 | int ret = 0; | ||
1175 | char buf[len]; | ||
1176 | |||
1177 | if (current->personality & UNAME26) { | ||
1178 | char *rest = UTS_RELEASE; | ||
1179 | int ndots = 0; | ||
1180 | unsigned v; | ||
1181 | |||
1182 | while (*rest) { | ||
1183 | if (*rest == '.' && ++ndots >= 3) | ||
1184 | break; | ||
1185 | if (!isdigit(*rest) && *rest != '.') | ||
1186 | break; | ||
1187 | rest++; | ||
1188 | } | ||
1189 | v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40; | ||
1190 | snprintf(buf, len, "2.6.%u%s", v, rest); | ||
1191 | ret = copy_to_user(release, buf, len); | ||
1192 | } | ||
1193 | return ret; | ||
1194 | } | ||
1195 | |||
1157 | SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) | 1196 | SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) |
1158 | { | 1197 | { |
1159 | int errno = 0; | 1198 | int errno = 0; |
@@ -1163,6 +1202,8 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) | |||
1163 | errno = -EFAULT; | 1202 | errno = -EFAULT; |
1164 | up_read(&uts_sem); | 1203 | up_read(&uts_sem); |
1165 | 1204 | ||
1205 | if (!errno && override_release(name->release, sizeof(name->release))) | ||
1206 | errno = -EFAULT; | ||
1166 | if (!errno && override_architecture(name)) | 1207 | if (!errno && override_architecture(name)) |
1167 | errno = -EFAULT; | 1208 | errno = -EFAULT; |
1168 | return errno; | 1209 | return errno; |
@@ -1184,6 +1225,8 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) | |||
1184 | error = -EFAULT; | 1225 | error = -EFAULT; |
1185 | up_read(&uts_sem); | 1226 | up_read(&uts_sem); |
1186 | 1227 | ||
1228 | if (!error && override_release(name->release, sizeof(name->release))) | ||
1229 | error = -EFAULT; | ||
1187 | if (!error && override_architecture(name)) | 1230 | if (!error && override_architecture(name)) |
1188 | error = -EFAULT; | 1231 | error = -EFAULT; |
1189 | return error; | 1232 | return error; |
@@ -1218,6 +1261,8 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) | |||
1218 | 1261 | ||
1219 | if (!error && override_architecture(name)) | 1262 | if (!error && override_architecture(name)) |
1220 | error = -EFAULT; | 1263 | error = -EFAULT; |
1264 | if (!error && override_release(name->release, sizeof(name->release))) | ||
1265 | error = -EFAULT; | ||
1221 | return error ? -EFAULT : 0; | 1266 | return error ? -EFAULT : 0; |
1222 | } | 1267 | } |
1223 | #endif | 1268 | #endif |
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index 3b8e028b9601..e8bffbe2ba4b 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c | |||
@@ -1,6 +1,6 @@ | |||
1 | #include <linux/stat.h> | 1 | #include <linux/stat.h> |
2 | #include <linux/sysctl.h> | 2 | #include <linux/sysctl.h> |
3 | #include "../fs/xfs/linux-2.6/xfs_sysctl.h" | 3 | #include "../fs/xfs/xfs_sysctl.h" |
4 | #include <linux/sunrpc/debug.h> | 4 | #include <linux/sunrpc/debug.h> |
5 | #include <linux/string.h> | 5 | #include <linux/string.h> |
6 | #include <net/ip_vs.h> | 6 | #include <net/ip_vs.h> |
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c index 4e4932a7b360..362da653813d 100644 --- a/kernel/sysctl_check.c +++ b/kernel/sysctl_check.c | |||
@@ -1,6 +1,6 @@ | |||
1 | #include <linux/stat.h> | 1 | #include <linux/stat.h> |
2 | #include <linux/sysctl.h> | 2 | #include <linux/sysctl.h> |
3 | #include "../fs/xfs/linux-2.6/xfs_sysctl.h" | 3 | #include "../fs/xfs/xfs_sysctl.h" |
4 | #include <linux/sunrpc/debug.h> | 4 | #include <linux/sunrpc/debug.h> |
5 | #include <linux/string.h> | 5 | #include <linux/string.h> |
6 | #include <net/ip_vs.h> | 6 | #include <net/ip_vs.h> |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 2ad39e556cb4..cd3134510f3d 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -82,7 +82,7 @@ config EVENT_POWER_TRACING_DEPRECATED | |||
82 | power:power_frequency | 82 | power:power_frequency |
83 | This is for userspace compatibility | 83 | This is for userspace compatibility |
84 | and will vanish after 5 kernel iterations, | 84 | and will vanish after 5 kernel iterations, |
85 | namely 2.6.41. | 85 | namely 3.1. |
86 | 86 | ||
87 | config CONTEXT_SWITCH_TRACER | 87 | config CONTEXT_SWITCH_TRACER |
88 | bool | 88 | bool |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 6957aa298dfa..7c910a5593a6 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -206,6 +206,8 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
206 | what |= MASK_TC_BIT(rw, RAHEAD); | 206 | what |= MASK_TC_BIT(rw, RAHEAD); |
207 | what |= MASK_TC_BIT(rw, META); | 207 | what |= MASK_TC_BIT(rw, META); |
208 | what |= MASK_TC_BIT(rw, DISCARD); | 208 | what |= MASK_TC_BIT(rw, DISCARD); |
209 | what |= MASK_TC_BIT(rw, FLUSH); | ||
210 | what |= MASK_TC_BIT(rw, FUA); | ||
209 | 211 | ||
210 | pid = tsk->pid; | 212 | pid = tsk->pid; |
211 | if (act_log_check(bt, what, sector, pid)) | 213 | if (act_log_check(bt, what, sector, pid)) |
@@ -1054,6 +1056,9 @@ static void fill_rwbs(char *rwbs, const struct blk_io_trace *t) | |||
1054 | goto out; | 1056 | goto out; |
1055 | } | 1057 | } |
1056 | 1058 | ||
1059 | if (tc & BLK_TC_FLUSH) | ||
1060 | rwbs[i++] = 'F'; | ||
1061 | |||
1057 | if (tc & BLK_TC_DISCARD) | 1062 | if (tc & BLK_TC_DISCARD) |
1058 | rwbs[i++] = 'D'; | 1063 | rwbs[i++] = 'D'; |
1059 | else if (tc & BLK_TC_WRITE) | 1064 | else if (tc & BLK_TC_WRITE) |
@@ -1063,10 +1068,10 @@ static void fill_rwbs(char *rwbs, const struct blk_io_trace *t) | |||
1063 | else | 1068 | else |
1064 | rwbs[i++] = 'N'; | 1069 | rwbs[i++] = 'N'; |
1065 | 1070 | ||
1071 | if (tc & BLK_TC_FUA) | ||
1072 | rwbs[i++] = 'F'; | ||
1066 | if (tc & BLK_TC_AHEAD) | 1073 | if (tc & BLK_TC_AHEAD) |
1067 | rwbs[i++] = 'A'; | 1074 | rwbs[i++] = 'A'; |
1068 | if (tc & BLK_TC_BARRIER) | ||
1069 | rwbs[i++] = 'B'; | ||
1070 | if (tc & BLK_TC_SYNC) | 1075 | if (tc & BLK_TC_SYNC) |
1071 | rwbs[i++] = 'S'; | 1076 | rwbs[i++] = 'S'; |
1072 | if (tc & BLK_TC_META) | 1077 | if (tc & BLK_TC_META) |
@@ -1132,7 +1137,7 @@ typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act); | |||
1132 | 1137 | ||
1133 | static int blk_log_action_classic(struct trace_iterator *iter, const char *act) | 1138 | static int blk_log_action_classic(struct trace_iterator *iter, const char *act) |
1134 | { | 1139 | { |
1135 | char rwbs[6]; | 1140 | char rwbs[RWBS_LEN]; |
1136 | unsigned long long ts = iter->ts; | 1141 | unsigned long long ts = iter->ts; |
1137 | unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC); | 1142 | unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC); |
1138 | unsigned secs = (unsigned long)ts; | 1143 | unsigned secs = (unsigned long)ts; |
@@ -1148,7 +1153,7 @@ static int blk_log_action_classic(struct trace_iterator *iter, const char *act) | |||
1148 | 1153 | ||
1149 | static int blk_log_action(struct trace_iterator *iter, const char *act) | 1154 | static int blk_log_action(struct trace_iterator *iter, const char *act) |
1150 | { | 1155 | { |
1151 | char rwbs[6]; | 1156 | char rwbs[RWBS_LEN]; |
1152 | const struct blk_io_trace *t = te_blk_io_trace(iter->ent); | 1157 | const struct blk_io_trace *t = te_blk_io_trace(iter->ent); |
1153 | 1158 | ||
1154 | fill_rwbs(rwbs, t); | 1159 | fill_rwbs(rwbs, t); |
@@ -1561,7 +1566,7 @@ static const struct { | |||
1561 | } mask_maps[] = { | 1566 | } mask_maps[] = { |
1562 | { BLK_TC_READ, "read" }, | 1567 | { BLK_TC_READ, "read" }, |
1563 | { BLK_TC_WRITE, "write" }, | 1568 | { BLK_TC_WRITE, "write" }, |
1564 | { BLK_TC_BARRIER, "barrier" }, | 1569 | { BLK_TC_FLUSH, "flush" }, |
1565 | { BLK_TC_SYNC, "sync" }, | 1570 | { BLK_TC_SYNC, "sync" }, |
1566 | { BLK_TC_QUEUE, "queue" }, | 1571 | { BLK_TC_QUEUE, "queue" }, |
1567 | { BLK_TC_REQUEUE, "requeue" }, | 1572 | { BLK_TC_REQUEUE, "requeue" }, |
@@ -1573,6 +1578,7 @@ static const struct { | |||
1573 | { BLK_TC_META, "meta" }, | 1578 | { BLK_TC_META, "meta" }, |
1574 | { BLK_TC_DISCARD, "discard" }, | 1579 | { BLK_TC_DISCARD, "discard" }, |
1575 | { BLK_TC_DRV_DATA, "drv_data" }, | 1580 | { BLK_TC_DRV_DATA, "drv_data" }, |
1581 | { BLK_TC_FUA, "fua" }, | ||
1576 | }; | 1582 | }; |
1577 | 1583 | ||
1578 | static int blk_trace_str2mask(const char *str) | 1584 | static int blk_trace_str2mask(const char *str) |
@@ -1788,6 +1794,9 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) | |||
1788 | { | 1794 | { |
1789 | int i = 0; | 1795 | int i = 0; |
1790 | 1796 | ||
1797 | if (rw & REQ_FLUSH) | ||
1798 | rwbs[i++] = 'F'; | ||
1799 | |||
1791 | if (rw & WRITE) | 1800 | if (rw & WRITE) |
1792 | rwbs[i++] = 'W'; | 1801 | rwbs[i++] = 'W'; |
1793 | else if (rw & REQ_DISCARD) | 1802 | else if (rw & REQ_DISCARD) |
@@ -1797,6 +1806,8 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) | |||
1797 | else | 1806 | else |
1798 | rwbs[i++] = 'N'; | 1807 | rwbs[i++] = 'N'; |
1799 | 1808 | ||
1809 | if (rw & REQ_FUA) | ||
1810 | rwbs[i++] = 'F'; | ||
1800 | if (rw & REQ_RAHEAD) | 1811 | if (rw & REQ_RAHEAD) |
1801 | rwbs[i++] = 'A'; | 1812 | rwbs[i++] = 'A'; |
1802 | if (rw & REQ_SYNC) | 1813 | if (rw & REQ_SYNC) |
diff --git a/mm/highmem.c b/mm/highmem.c index 693394daa2ed..5ef672c07f75 100644 --- a/mm/highmem.c +++ b/mm/highmem.c | |||
@@ -326,7 +326,7 @@ static struct page_address_slot { | |||
326 | spinlock_t lock; /* Protect this bucket's list */ | 326 | spinlock_t lock; /* Protect this bucket's list */ |
327 | } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; | 327 | } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; |
328 | 328 | ||
329 | static struct page_address_slot *page_slot(struct page *page) | 329 | static struct page_address_slot *page_slot(const struct page *page) |
330 | { | 330 | { |
331 | return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; | 331 | return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; |
332 | } | 332 | } |
@@ -337,7 +337,7 @@ static struct page_address_slot *page_slot(struct page *page) | |||
337 | * | 337 | * |
338 | * Returns the page's virtual address. | 338 | * Returns the page's virtual address. |
339 | */ | 339 | */ |
340 | void *page_address(struct page *page) | 340 | void *page_address(const struct page *page) |
341 | { | 341 | { |
342 | unsigned long flags; | 342 | unsigned long flags; |
343 | void *ret; | 343 | void *ret; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f4ec4e7ca4cd..ebd1e86bef1c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1841,29 +1841,23 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, | |||
1841 | */ | 1841 | */ |
1842 | static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) | 1842 | static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) |
1843 | { | 1843 | { |
1844 | int lock_count = -1; | ||
1845 | struct mem_cgroup *iter, *failed = NULL; | 1844 | struct mem_cgroup *iter, *failed = NULL; |
1846 | bool cond = true; | 1845 | bool cond = true; |
1847 | 1846 | ||
1848 | for_each_mem_cgroup_tree_cond(iter, mem, cond) { | 1847 | for_each_mem_cgroup_tree_cond(iter, mem, cond) { |
1849 | bool locked = iter->oom_lock; | 1848 | if (iter->oom_lock) { |
1850 | |||
1851 | iter->oom_lock = true; | ||
1852 | if (lock_count == -1) | ||
1853 | lock_count = iter->oom_lock; | ||
1854 | else if (lock_count != locked) { | ||
1855 | /* | 1849 | /* |
1856 | * this subtree of our hierarchy is already locked | 1850 | * this subtree of our hierarchy is already locked |
1857 | * so we cannot give a lock. | 1851 | * so we cannot give a lock. |
1858 | */ | 1852 | */ |
1859 | lock_count = 0; | ||
1860 | failed = iter; | 1853 | failed = iter; |
1861 | cond = false; | 1854 | cond = false; |
1862 | } | 1855 | } else |
1856 | iter->oom_lock = true; | ||
1863 | } | 1857 | } |
1864 | 1858 | ||
1865 | if (!failed) | 1859 | if (!failed) |
1866 | goto done; | 1860 | return true; |
1867 | 1861 | ||
1868 | /* | 1862 | /* |
1869 | * OK, we failed to lock the whole subtree so we have to clean up | 1863 | * OK, we failed to lock the whole subtree so we have to clean up |
@@ -1877,8 +1871,7 @@ static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) | |||
1877 | } | 1871 | } |
1878 | iter->oom_lock = false; | 1872 | iter->oom_lock = false; |
1879 | } | 1873 | } |
1880 | done: | 1874 | return false; |
1881 | return lock_count; | ||
1882 | } | 1875 | } |
1883 | 1876 | ||
1884 | /* | 1877 | /* |
@@ -2091,6 +2084,7 @@ struct memcg_stock_pcp { | |||
2091 | #define FLUSHING_CACHED_CHARGE (0) | 2084 | #define FLUSHING_CACHED_CHARGE (0) |
2092 | }; | 2085 | }; |
2093 | static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); | 2086 | static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); |
2087 | static DEFINE_MUTEX(percpu_charge_mutex); | ||
2094 | 2088 | ||
2095 | /* | 2089 | /* |
2096 | * Try to consume stocked charge on this cpu. If success, one page is consumed | 2090 | * Try to consume stocked charge on this cpu. If success, one page is consumed |
@@ -2168,13 +2162,7 @@ static void drain_all_stock(struct mem_cgroup *root_mem, bool sync) | |||
2168 | 2162 | ||
2169 | /* Notify other cpus that system-wide "drain" is running */ | 2163 | /* Notify other cpus that system-wide "drain" is running */ |
2170 | get_online_cpus(); | 2164 | get_online_cpus(); |
2171 | /* | 2165 | curcpu = get_cpu(); |
2172 | * Get a hint for avoiding draining charges on the current cpu, | ||
2173 | * which must be exhausted by our charging. It is not required that | ||
2174 | * this be a precise check, so we use raw_smp_processor_id() instead of | ||
2175 | * getcpu()/putcpu(). | ||
2176 | */ | ||
2177 | curcpu = raw_smp_processor_id(); | ||
2178 | for_each_online_cpu(cpu) { | 2166 | for_each_online_cpu(cpu) { |
2179 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); | 2167 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); |
2180 | struct mem_cgroup *mem; | 2168 | struct mem_cgroup *mem; |
@@ -2191,14 +2179,14 @@ static void drain_all_stock(struct mem_cgroup *root_mem, bool sync) | |||
2191 | schedule_work_on(cpu, &stock->work); | 2179 | schedule_work_on(cpu, &stock->work); |
2192 | } | 2180 | } |
2193 | } | 2181 | } |
2182 | put_cpu(); | ||
2194 | 2183 | ||
2195 | if (!sync) | 2184 | if (!sync) |
2196 | goto out; | 2185 | goto out; |
2197 | 2186 | ||
2198 | for_each_online_cpu(cpu) { | 2187 | for_each_online_cpu(cpu) { |
2199 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); | 2188 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); |
2200 | if (mem_cgroup_same_or_subtree(root_mem, stock->cached) && | 2189 | if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) |
2201 | test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) | ||
2202 | flush_work(&stock->work); | 2190 | flush_work(&stock->work); |
2203 | } | 2191 | } |
2204 | out: | 2192 | out: |
@@ -2213,14 +2201,22 @@ out: | |||
2213 | */ | 2201 | */ |
2214 | static void drain_all_stock_async(struct mem_cgroup *root_mem) | 2202 | static void drain_all_stock_async(struct mem_cgroup *root_mem) |
2215 | { | 2203 | { |
2204 | /* | ||
2205 | * If someone calls draining, avoid adding more kworker runs. | ||
2206 | */ | ||
2207 | if (!mutex_trylock(&percpu_charge_mutex)) | ||
2208 | return; | ||
2216 | drain_all_stock(root_mem, false); | 2209 | drain_all_stock(root_mem, false); |
2210 | mutex_unlock(&percpu_charge_mutex); | ||
2217 | } | 2211 | } |
2218 | 2212 | ||
2219 | /* This is a synchronous drain interface. */ | 2213 | /* This is a synchronous drain interface. */ |
2220 | static void drain_all_stock_sync(struct mem_cgroup *root_mem) | 2214 | static void drain_all_stock_sync(struct mem_cgroup *root_mem) |
2221 | { | 2215 | { |
2222 | /* called when force_empty is called */ | 2216 | /* called when force_empty is called */ |
2217 | mutex_lock(&percpu_charge_mutex); | ||
2223 | drain_all_stock(root_mem, true); | 2218 | drain_all_stock(root_mem, true); |
2219 | mutex_unlock(&percpu_charge_mutex); | ||
2224 | } | 2220 | } |
2225 | 2221 | ||
2226 | /* | 2222 | /* |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index d1960744f881..0e309cd1b5b9 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -754,21 +754,10 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
754 | * 200ms is typically more than enough to curb heavy dirtiers; | 754 | * 200ms is typically more than enough to curb heavy dirtiers; |
755 | * (b) the pause time limit makes the dirtiers more responsive. | 755 | * (b) the pause time limit makes the dirtiers more responsive. |
756 | */ | 756 | */ |
757 | if (nr_dirty < dirty_thresh + | 757 | if (nr_dirty < dirty_thresh && |
758 | dirty_thresh / DIRTY_MAXPAUSE_AREA && | 758 | bdi_dirty < (task_bdi_thresh + bdi_thresh) / 2 && |
759 | time_after(jiffies, start_time + MAX_PAUSE)) | 759 | time_after(jiffies, start_time + MAX_PAUSE)) |
760 | break; | 760 | break; |
761 | /* | ||
762 | * pass-good area. When some bdi gets blocked (eg. NFS server | ||
763 | * not responding), or write bandwidth dropped dramatically due | ||
764 | * to concurrent reads, or dirty threshold suddenly dropped and | ||
765 | * the dirty pages cannot be brought down anytime soon (eg. on | ||
766 | * slow USB stick), at least let go of the good bdi's. | ||
767 | */ | ||
768 | if (nr_dirty < dirty_thresh + | ||
769 | dirty_thresh / DIRTY_PASSGOOD_AREA && | ||
770 | bdi_dirty < bdi_thresh) | ||
771 | break; | ||
772 | 761 | ||
773 | /* | 762 | /* |
774 | * Increase the delay for each loop, up to our previous | 763 | * Increase the delay for each loop, up to our previous |
@@ -701,7 +701,7 @@ static u8 *check_bytes(u8 *start, u8 value, unsigned int bytes) | |||
701 | return check_bytes8(start, value, bytes); | 701 | return check_bytes8(start, value, bytes); |
702 | 702 | ||
703 | value64 = value | value << 8 | value << 16 | value << 24; | 703 | value64 = value | value << 8 | value << 16 | value << 24; |
704 | value64 = value64 | value64 << 32; | 704 | value64 = (value64 & 0xffffffff) | value64 << 32; |
705 | prefix = 8 - ((unsigned long)start) % 8; | 705 | prefix = 8 - ((unsigned long)start) % 8; |
706 | 706 | ||
707 | if (prefix) { | 707 | if (prefix) { |
@@ -1854,7 +1854,7 @@ redo: | |||
1854 | 1854 | ||
1855 | new.frozen = 0; | 1855 | new.frozen = 0; |
1856 | 1856 | ||
1857 | if (!new.inuse && n->nr_partial < s->min_partial) | 1857 | if (!new.inuse && n->nr_partial > s->min_partial) |
1858 | m = M_FREE; | 1858 | m = M_FREE; |
1859 | else if (new.freelist) { | 1859 | else if (new.freelist) { |
1860 | m = M_PARTIAL; | 1860 | m = M_PARTIAL; |
@@ -2387,11 +2387,13 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2387 | slab_empty: | 2387 | slab_empty: |
2388 | if (prior) { | 2388 | if (prior) { |
2389 | /* | 2389 | /* |
2390 | * Slab still on the partial list. | 2390 | * Slab on the partial list. |
2391 | */ | 2391 | */ |
2392 | remove_partial(n, page); | 2392 | remove_partial(n, page); |
2393 | stat(s, FREE_REMOVE_PARTIAL); | 2393 | stat(s, FREE_REMOVE_PARTIAL); |
2394 | } | 2394 | } else |
2395 | /* Slab must be on the full list */ | ||
2396 | remove_full(s, page); | ||
2395 | 2397 | ||
2396 | spin_unlock_irqrestore(&n->list_lock, flags); | 2398 | spin_unlock_irqrestore(&n->list_lock, flags); |
2397 | stat(s, FREE_SLAB); | 2399 | stat(s, FREE_SLAB); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 464621d18eb2..7ef0903058ee 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -725,9 +725,10 @@ static void free_unmap_vmap_area_addr(unsigned long addr) | |||
725 | #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) | 725 | #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) |
726 | #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ | 726 | #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ |
727 | #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ | 727 | #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ |
728 | #define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ | 728 | #define VMAP_BBMAP_BITS \ |
729 | VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ | 729 | VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ |
730 | VMALLOC_PAGES / NR_CPUS / 16)) | 730 | VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ |
731 | VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) | ||
731 | 732 | ||
732 | #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) | 733 | #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) |
733 | 734 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 7ef69124fa3e..b7719ec10dc5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2283,7 +2283,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | |||
2283 | .mem_cgroup = mem, | 2283 | .mem_cgroup = mem, |
2284 | .memcg_record = rec, | 2284 | .memcg_record = rec, |
2285 | }; | 2285 | }; |
2286 | unsigned long start, end; | 2286 | ktime_t start, end; |
2287 | 2287 | ||
2288 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | | 2288 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | |
2289 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); | 2289 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); |
@@ -2292,7 +2292,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | |||
2292 | sc.may_writepage, | 2292 | sc.may_writepage, |
2293 | sc.gfp_mask); | 2293 | sc.gfp_mask); |
2294 | 2294 | ||
2295 | start = sched_clock(); | 2295 | start = ktime_get(); |
2296 | /* | 2296 | /* |
2297 | * NOTE: Although we can get the priority field, using it | 2297 | * NOTE: Although we can get the priority field, using it |
2298 | * here is not a good idea, since it limits the pages we can scan. | 2298 | * here is not a good idea, since it limits the pages we can scan. |
@@ -2301,10 +2301,10 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | |||
2301 | * the priority and make it zero. | 2301 | * the priority and make it zero. |
2302 | */ | 2302 | */ |
2303 | shrink_zone(0, zone, &sc); | 2303 | shrink_zone(0, zone, &sc); |
2304 | end = sched_clock(); | 2304 | end = ktime_get(); |
2305 | 2305 | ||
2306 | if (rec) | 2306 | if (rec) |
2307 | rec->elapsed += end - start; | 2307 | rec->elapsed += ktime_to_ns(ktime_sub(end, start)); |
2308 | *scanned = sc.nr_scanned; | 2308 | *scanned = sc.nr_scanned; |
2309 | 2309 | ||
2310 | trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); | 2310 | trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); |
@@ -2319,7 +2319,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | |||
2319 | { | 2319 | { |
2320 | struct zonelist *zonelist; | 2320 | struct zonelist *zonelist; |
2321 | unsigned long nr_reclaimed; | 2321 | unsigned long nr_reclaimed; |
2322 | unsigned long start, end; | 2322 | ktime_t start, end; |
2323 | int nid; | 2323 | int nid; |
2324 | struct scan_control sc = { | 2324 | struct scan_control sc = { |
2325 | .may_writepage = !laptop_mode, | 2325 | .may_writepage = !laptop_mode, |
@@ -2337,7 +2337,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | |||
2337 | .gfp_mask = sc.gfp_mask, | 2337 | .gfp_mask = sc.gfp_mask, |
2338 | }; | 2338 | }; |
2339 | 2339 | ||
2340 | start = sched_clock(); | 2340 | start = ktime_get(); |
2341 | /* | 2341 | /* |
2342 | * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't | 2342 | * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't |
2343 | * take care of from where we get pages. So the node where we start the | 2343 | * take care of from where we get pages. So the node where we start the |
@@ -2352,9 +2352,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | |||
2352 | sc.gfp_mask); | 2352 | sc.gfp_mask); |
2353 | 2353 | ||
2354 | nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); | 2354 | nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); |
2355 | end = sched_clock(); | 2355 | end = ktime_get(); |
2356 | if (rec) | 2356 | if (rec) |
2357 | rec->elapsed += end - start; | 2357 | rec->elapsed += ktime_to_ns(ktime_sub(end, start)); |
2358 | 2358 | ||
2359 | trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); | 2359 | trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); |
2360 | 2360 | ||
@@ -2529,6 +2529,9 @@ loop_again: | |||
2529 | high_wmark_pages(zone), 0, 0)) { | 2529 | high_wmark_pages(zone), 0, 0)) { |
2530 | end_zone = i; | 2530 | end_zone = i; |
2531 | break; | 2531 | break; |
2532 | } else { | ||
2533 | /* If balanced, clear the congested flag */ | ||
2534 | zone_clear_flag(zone, ZONE_CONGESTED); | ||
2532 | } | 2535 | } |
2533 | } | 2536 | } |
2534 | if (i < 0) | 2537 | if (i < 0) |
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 5f27f8e30254..f1f2f7bb6661 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -167,6 +167,8 @@ struct sk_buff *vlan_untag(struct sk_buff *skb) | |||
167 | if (unlikely(!skb)) | 167 | if (unlikely(!skb)) |
168 | goto err_free; | 168 | goto err_free; |
169 | 169 | ||
170 | skb_reset_network_header(skb); | ||
171 | skb_reset_transport_header(skb); | ||
170 | return skb; | 172 | return skb; |
171 | 173 | ||
172 | err_free: | 174 | err_free: |
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 52cfd0c3ea71..d07223c834af 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c | |||
@@ -558,12 +558,13 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) | |||
558 | spin_unlock_irqrestore(&rq->lock, flags); | 558 | spin_unlock_irqrestore(&rq->lock, flags); |
559 | 559 | ||
560 | skb_queue_walk_safe(&queue, skb, tmp) { | 560 | skb_queue_walk_safe(&queue, skb, tmp) { |
561 | struct net_device *dev = skb->dev; | 561 | struct net_device *dev; |
562 | |||
563 | br2684_push(atmvcc, skb); | ||
564 | dev = skb->dev; | ||
562 | 565 | ||
563 | dev->stats.rx_bytes -= skb->len; | 566 | dev->stats.rx_bytes -= skb->len; |
564 | dev->stats.rx_packets--; | 567 | dev->stats.rx_packets--; |
565 | |||
566 | br2684_push(atmvcc, skb); | ||
567 | } | 568 | } |
568 | 569 | ||
569 | /* initialize netdev carrier state */ | 570 | /* initialize netdev carrier state */ |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 3176e2e13d9b..e73815456adf 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -231,6 +231,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, | |||
231 | int br_add_bridge(struct net *net, const char *name) | 231 | int br_add_bridge(struct net *net, const char *name) |
232 | { | 232 | { |
233 | struct net_device *dev; | 233 | struct net_device *dev; |
234 | int res; | ||
234 | 235 | ||
235 | dev = alloc_netdev(sizeof(struct net_bridge), name, | 236 | dev = alloc_netdev(sizeof(struct net_bridge), name, |
236 | br_dev_setup); | 237 | br_dev_setup); |
@@ -240,7 +241,10 @@ int br_add_bridge(struct net *net, const char *name) | |||
240 | 241 | ||
241 | dev_net_set(dev, net); | 242 | dev_net_set(dev, net); |
242 | 243 | ||
243 | return register_netdev(dev); | 244 | res = register_netdev(dev); |
245 | if (res) | ||
246 | free_netdev(dev); | ||
247 | return res; | ||
244 | } | 248 | } |
245 | 249 | ||
246 | int br_del_bridge(struct net *net, const char *name) | 250 | int br_del_bridge(struct net *net, const char *name) |
@@ -417,6 +421,7 @@ put_back: | |||
417 | int br_del_if(struct net_bridge *br, struct net_device *dev) | 421 | int br_del_if(struct net_bridge *br, struct net_device *dev) |
418 | { | 422 | { |
419 | struct net_bridge_port *p; | 423 | struct net_bridge_port *p; |
424 | bool changed_addr; | ||
420 | 425 | ||
421 | p = br_port_get_rtnl(dev); | 426 | p = br_port_get_rtnl(dev); |
422 | if (!p || p->br != br) | 427 | if (!p || p->br != br) |
@@ -425,9 +430,12 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) | |||
425 | del_nbp(p); | 430 | del_nbp(p); |
426 | 431 | ||
427 | spin_lock_bh(&br->lock); | 432 | spin_lock_bh(&br->lock); |
428 | br_stp_recalculate_bridge_id(br); | 433 | changed_addr = br_stp_recalculate_bridge_id(br); |
429 | spin_unlock_bh(&br->lock); | 434 | spin_unlock_bh(&br->lock); |
430 | 435 | ||
436 | if (changed_addr) | ||
437 | call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev); | ||
438 | |||
431 | netdev_update_features(br->dev); | 439 | netdev_update_features(br->dev); |
432 | 440 | ||
433 | return 0; | 441 | return 0; |
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c index 6545ee9591d1..a76b62135558 100644 --- a/net/bridge/br_notify.c +++ b/net/bridge/br_notify.c | |||
@@ -34,6 +34,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v | |||
34 | struct net_device *dev = ptr; | 34 | struct net_device *dev = ptr; |
35 | struct net_bridge_port *p; | 35 | struct net_bridge_port *p; |
36 | struct net_bridge *br; | 36 | struct net_bridge *br; |
37 | bool changed_addr; | ||
37 | int err; | 38 | int err; |
38 | 39 | ||
39 | /* register of bridge completed, add sysfs entries */ | 40 | /* register of bridge completed, add sysfs entries */ |
@@ -57,8 +58,12 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v | |||
57 | case NETDEV_CHANGEADDR: | 58 | case NETDEV_CHANGEADDR: |
58 | spin_lock_bh(&br->lock); | 59 | spin_lock_bh(&br->lock); |
59 | br_fdb_changeaddr(p, dev->dev_addr); | 60 | br_fdb_changeaddr(p, dev->dev_addr); |
60 | br_stp_recalculate_bridge_id(br); | 61 | changed_addr = br_stp_recalculate_bridge_id(br); |
61 | spin_unlock_bh(&br->lock); | 62 | spin_unlock_bh(&br->lock); |
63 | |||
64 | if (changed_addr) | ||
65 | call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev); | ||
66 | |||
62 | break; | 67 | break; |
63 | 68 | ||
64 | case NETDEV_CHANGE: | 69 | case NETDEV_CHANGE: |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 2b5ca1a0054d..5864cc491369 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -1198,7 +1198,8 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table) | |||
1198 | 1198 | ||
1199 | if (table->check && table->check(newinfo, table->valid_hooks)) { | 1199 | if (table->check && table->check(newinfo, table->valid_hooks)) { |
1200 | BUGPRINT("The table doesn't like its own initial data, lol\n"); | 1200 | BUGPRINT("The table doesn't like its own initial data, lol\n"); |
1201 | return ERR_PTR(-EINVAL); | 1201 | ret = -EINVAL; |
1202 | goto free_chainstack; | ||
1202 | } | 1203 | } |
1203 | 1204 | ||
1204 | table->private = newinfo; | 1205 | table->private = newinfo; |
diff --git a/net/core/scm.c b/net/core/scm.c index 4c1ef026d695..811b53fb330e 100644 --- a/net/core/scm.c +++ b/net/core/scm.c | |||
@@ -192,7 +192,7 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) | |||
192 | goto error; | 192 | goto error; |
193 | 193 | ||
194 | cred->uid = cred->euid = p->creds.uid; | 194 | cred->uid = cred->euid = p->creds.uid; |
195 | cred->gid = cred->egid = p->creds.uid; | 195 | cred->gid = cred->egid = p->creds.gid; |
196 | put_cred(p->cred); | 196 | put_cred(p->cred); |
197 | p->cred = cred; | 197 | p->cred = cred; |
198 | } | 198 | } |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 77d3eded665a..8c6563361ab5 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -122,6 +122,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb) | |||
122 | newskb->pkt_type = PACKET_LOOPBACK; | 122 | newskb->pkt_type = PACKET_LOOPBACK; |
123 | newskb->ip_summed = CHECKSUM_UNNECESSARY; | 123 | newskb->ip_summed = CHECKSUM_UNNECESSARY; |
124 | WARN_ON(!skb_dst(newskb)); | 124 | WARN_ON(!skb_dst(newskb)); |
125 | skb_dst_force(newskb); | ||
125 | netif_rx_ni(newskb); | 126 | netif_rx_ni(newskb); |
126 | return 0; | 127 | return 0; |
127 | } | 128 | } |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index ab0c9efd1efa..8905e92f896a 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -1067,7 +1067,7 @@ EXPORT_SYMBOL(compat_ip_setsockopt); | |||
1067 | */ | 1067 | */ |
1068 | 1068 | ||
1069 | static int do_ip_getsockopt(struct sock *sk, int level, int optname, | 1069 | static int do_ip_getsockopt(struct sock *sk, int level, int optname, |
1070 | char __user *optval, int __user *optlen) | 1070 | char __user *optval, int __user *optlen, unsigned flags) |
1071 | { | 1071 | { |
1072 | struct inet_sock *inet = inet_sk(sk); | 1072 | struct inet_sock *inet = inet_sk(sk); |
1073 | int val; | 1073 | int val; |
@@ -1240,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1240 | 1240 | ||
1241 | msg.msg_control = optval; | 1241 | msg.msg_control = optval; |
1242 | msg.msg_controllen = len; | 1242 | msg.msg_controllen = len; |
1243 | msg.msg_flags = 0; | 1243 | msg.msg_flags = flags; |
1244 | 1244 | ||
1245 | if (inet->cmsg_flags & IP_CMSG_PKTINFO) { | 1245 | if (inet->cmsg_flags & IP_CMSG_PKTINFO) { |
1246 | struct in_pktinfo info; | 1246 | struct in_pktinfo info; |
@@ -1294,7 +1294,7 @@ int ip_getsockopt(struct sock *sk, int level, | |||
1294 | { | 1294 | { |
1295 | int err; | 1295 | int err; |
1296 | 1296 | ||
1297 | err = do_ip_getsockopt(sk, level, optname, optval, optlen); | 1297 | err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0); |
1298 | #ifdef CONFIG_NETFILTER | 1298 | #ifdef CONFIG_NETFILTER |
1299 | /* we need to exclude all possible ENOPROTOOPTs except default case */ | 1299 | /* we need to exclude all possible ENOPROTOOPTs except default case */ |
1300 | if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS && | 1300 | if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS && |
@@ -1327,7 +1327,8 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1327 | return compat_mc_getsockopt(sk, level, optname, optval, optlen, | 1327 | return compat_mc_getsockopt(sk, level, optname, optval, optlen, |
1328 | ip_getsockopt); | 1328 | ip_getsockopt); |
1329 | 1329 | ||
1330 | err = do_ip_getsockopt(sk, level, optname, optval, optlen); | 1330 | err = do_ip_getsockopt(sk, level, optname, optval, optlen, |
1331 | MSG_CMSG_COMPAT); | ||
1331 | 1332 | ||
1332 | #ifdef CONFIG_NETFILTER | 1333 | #ifdef CONFIG_NETFILTER |
1333 | /* we need to exclude all possible ENOPROTOOPTs except default case */ | 1334 | /* we need to exclude all possible ENOPROTOOPTs except default case */ |
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 2e97e3ec1eb7..929b27bdeb79 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
@@ -18,17 +18,15 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) | |||
18 | struct rtable *rt; | 18 | struct rtable *rt; |
19 | struct flowi4 fl4 = {}; | 19 | struct flowi4 fl4 = {}; |
20 | __be32 saddr = iph->saddr; | 20 | __be32 saddr = iph->saddr; |
21 | __u8 flags = 0; | 21 | __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; |
22 | unsigned int hh_len; | 22 | unsigned int hh_len; |
23 | 23 | ||
24 | if (!skb->sk && addr_type != RTN_LOCAL) { | 24 | if (addr_type == RTN_UNSPEC) |
25 | if (addr_type == RTN_UNSPEC) | 25 | addr_type = inet_addr_type(net, saddr); |
26 | addr_type = inet_addr_type(net, saddr); | 26 | if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) |
27 | if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) | 27 | flags |= FLOWI_FLAG_ANYSRC; |
28 | flags |= FLOWI_FLAG_ANYSRC; | 28 | else |
29 | else | 29 | saddr = 0; |
30 | saddr = 0; | ||
31 | } | ||
32 | 30 | ||
33 | /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause | 31 | /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause |
34 | * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook. | 32 | * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook. |
@@ -38,7 +36,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) | |||
38 | fl4.flowi4_tos = RT_TOS(iph->tos); | 36 | fl4.flowi4_tos = RT_TOS(iph->tos); |
39 | fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; | 37 | fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; |
40 | fl4.flowi4_mark = skb->mark; | 38 | fl4.flowi4_mark = skb->mark; |
41 | fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : flags; | 39 | fl4.flowi4_flags = flags; |
42 | rt = ip_route_output_key(net, &fl4); | 40 | rt = ip_route_output_key(net, &fl4); |
43 | if (IS_ERR(rt)) | 41 | if (IS_ERR(rt)) |
44 | return -1; | 42 | return -1; |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 1457acb39cec..61714bd52925 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -563,7 +563,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
563 | flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, | 563 | flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, |
564 | RT_SCOPE_UNIVERSE, | 564 | RT_SCOPE_UNIVERSE, |
565 | inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, | 565 | inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, |
566 | FLOWI_FLAG_CAN_SLEEP, daddr, saddr, 0, 0); | 566 | inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP, |
567 | daddr, saddr, 0, 0); | ||
567 | 568 | ||
568 | if (!inet->hdrincl) { | 569 | if (!inet->hdrincl) { |
569 | err = raw_probe_proto_opt(&fl4, msg); | 570 | err = raw_probe_proto_opt(&fl4, msg); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index e3dec1c9f09d..075212e41b83 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -722,7 +722,7 @@ static inline bool compare_hash_inputs(const struct rtable *rt1, | |||
722 | { | 722 | { |
723 | return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) | | 723 | return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) | |
724 | ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) | | 724 | ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) | |
725 | (rt1->rt_iif ^ rt2->rt_iif)) == 0); | 725 | (rt1->rt_route_iif ^ rt2->rt_route_iif)) == 0); |
726 | } | 726 | } |
727 | 727 | ||
728 | static inline int compare_keys(struct rtable *rt1, struct rtable *rt2) | 728 | static inline int compare_keys(struct rtable *rt1, struct rtable *rt2) |
@@ -731,8 +731,8 @@ static inline int compare_keys(struct rtable *rt1, struct rtable *rt2) | |||
731 | ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) | | 731 | ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) | |
732 | (rt1->rt_mark ^ rt2->rt_mark) | | 732 | (rt1->rt_mark ^ rt2->rt_mark) | |
733 | (rt1->rt_key_tos ^ rt2->rt_key_tos) | | 733 | (rt1->rt_key_tos ^ rt2->rt_key_tos) | |
734 | (rt1->rt_oif ^ rt2->rt_oif) | | 734 | (rt1->rt_route_iif ^ rt2->rt_route_iif) | |
735 | (rt1->rt_iif ^ rt2->rt_iif)) == 0; | 735 | (rt1->rt_oif ^ rt2->rt_oif)) == 0; |
736 | } | 736 | } |
737 | 737 | ||
738 | static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) | 738 | static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) |
@@ -2320,8 +2320,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2320 | rth = rcu_dereference(rth->dst.rt_next)) { | 2320 | rth = rcu_dereference(rth->dst.rt_next)) { |
2321 | if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) | | 2321 | if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) | |
2322 | ((__force u32)rth->rt_key_src ^ (__force u32)saddr) | | 2322 | ((__force u32)rth->rt_key_src ^ (__force u32)saddr) | |
2323 | (rth->rt_iif ^ iif) | | 2323 | (rth->rt_route_iif ^ iif) | |
2324 | rth->rt_oif | | ||
2325 | (rth->rt_key_tos ^ tos)) == 0 && | 2324 | (rth->rt_key_tos ^ tos)) == 0 && |
2326 | rth->rt_mark == skb->mark && | 2325 | rth->rt_mark == skb->mark && |
2327 | net_eq(dev_net(rth->dst.dev), net) && | 2326 | net_eq(dev_net(rth->dst.dev), net) && |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 92bb9434b338..3bc5c8f7c71b 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -276,7 +276,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
276 | int mss; | 276 | int mss; |
277 | struct rtable *rt; | 277 | struct rtable *rt; |
278 | __u8 rcv_wscale; | 278 | __u8 rcv_wscale; |
279 | bool ecn_ok; | 279 | bool ecn_ok = false; |
280 | 280 | ||
281 | if (!sysctl_tcp_syncookies || !th->ack || th->rst) | 281 | if (!sysctl_tcp_syncookies || !th->ack || th->rst) |
282 | goto out; | 282 | goto out; |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 9cb191ecaba8..147ede38ab48 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -913,7 +913,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt, | |||
913 | } | 913 | } |
914 | 914 | ||
915 | static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | 915 | static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, |
916 | char __user *optval, int __user *optlen) | 916 | char __user *optval, int __user *optlen, unsigned flags) |
917 | { | 917 | { |
918 | struct ipv6_pinfo *np = inet6_sk(sk); | 918 | struct ipv6_pinfo *np = inet6_sk(sk); |
919 | int len; | 919 | int len; |
@@ -962,7 +962,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
962 | 962 | ||
963 | msg.msg_control = optval; | 963 | msg.msg_control = optval; |
964 | msg.msg_controllen = len; | 964 | msg.msg_controllen = len; |
965 | msg.msg_flags = 0; | 965 | msg.msg_flags = flags; |
966 | 966 | ||
967 | lock_sock(sk); | 967 | lock_sock(sk); |
968 | skb = np->pktoptions; | 968 | skb = np->pktoptions; |
@@ -1222,7 +1222,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1222 | if(level != SOL_IPV6) | 1222 | if(level != SOL_IPV6) |
1223 | return -ENOPROTOOPT; | 1223 | return -ENOPROTOOPT; |
1224 | 1224 | ||
1225 | err = do_ipv6_getsockopt(sk, level, optname, optval, optlen); | 1225 | err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, 0); |
1226 | #ifdef CONFIG_NETFILTER | 1226 | #ifdef CONFIG_NETFILTER |
1227 | /* we need to exclude all possible ENOPROTOOPTs except default case */ | 1227 | /* we need to exclude all possible ENOPROTOOPTs except default case */ |
1228 | if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { | 1228 | if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { |
@@ -1264,7 +1264,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1264 | return compat_mc_getsockopt(sk, level, optname, optval, optlen, | 1264 | return compat_mc_getsockopt(sk, level, optname, optval, optlen, |
1265 | ipv6_getsockopt); | 1265 | ipv6_getsockopt); |
1266 | 1266 | ||
1267 | err = do_ipv6_getsockopt(sk, level, optname, optval, optlen); | 1267 | err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, |
1268 | MSG_CMSG_COMPAT); | ||
1268 | #ifdef CONFIG_NETFILTER | 1269 | #ifdef CONFIG_NETFILTER |
1269 | /* we need to exclude all possible ENOPROTOOPTs except default case */ | 1270 | /* we need to exclude all possible ENOPROTOOPTs except default case */ |
1270 | if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { | 1271 | if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 07bf1085458f..00b15ac7a702 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -672,6 +672,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
672 | if (skb->protocol != htons(ETH_P_IPV6)) | 672 | if (skb->protocol != htons(ETH_P_IPV6)) |
673 | goto tx_error; | 673 | goto tx_error; |
674 | 674 | ||
675 | if (tos == 1) | ||
676 | tos = ipv6_get_dsfield(iph6); | ||
677 | |||
675 | /* ISATAP (RFC4214) - must come before 6to4 */ | 678 | /* ISATAP (RFC4214) - must come before 6to4 */ |
676 | if (dev->priv_flags & IFF_ISATAP) { | 679 | if (dev->priv_flags & IFF_ISATAP) { |
677 | struct neighbour *neigh = NULL; | 680 | struct neighbour *neigh = NULL; |
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 89d5bf806222..ac838965ff34 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -165,7 +165,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
165 | int mss; | 165 | int mss; |
166 | struct dst_entry *dst; | 166 | struct dst_entry *dst; |
167 | __u8 rcv_wscale; | 167 | __u8 rcv_wscale; |
168 | bool ecn_ok; | 168 | bool ecn_ok = false; |
169 | 169 | ||
170 | if (!sysctl_tcp_syncookies || !th->ack || th->rst) | 170 | if (!sysctl_tcp_syncookies || !th->ack || th->rst) |
171 | goto out; | 171 | goto out; |
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index 5b466cd1272f..84d0fd47636a 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c | |||
@@ -312,6 +312,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) | |||
312 | } | 312 | } |
313 | break; | 313 | break; |
314 | case NF_STOLEN: | 314 | case NF_STOLEN: |
315 | break; | ||
315 | default: | 316 | default: |
316 | kfree_skb(skb); | 317 | kfree_skb(skb); |
317 | } | 318 | } |
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index 58107d060846..9c24de10a657 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c | |||
@@ -341,11 +341,11 @@ int netlbl_cfg_cipsov4_map_add(u32 doi, | |||
341 | 341 | ||
342 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); | 342 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); |
343 | if (entry == NULL) | 343 | if (entry == NULL) |
344 | return -ENOMEM; | 344 | goto out_entry; |
345 | if (domain != NULL) { | 345 | if (domain != NULL) { |
346 | entry->domain = kstrdup(domain, GFP_ATOMIC); | 346 | entry->domain = kstrdup(domain, GFP_ATOMIC); |
347 | if (entry->domain == NULL) | 347 | if (entry->domain == NULL) |
348 | goto cfg_cipsov4_map_add_failure; | 348 | goto out_domain; |
349 | } | 349 | } |
350 | 350 | ||
351 | if (addr == NULL && mask == NULL) { | 351 | if (addr == NULL && mask == NULL) { |
@@ -354,13 +354,13 @@ int netlbl_cfg_cipsov4_map_add(u32 doi, | |||
354 | } else if (addr != NULL && mask != NULL) { | 354 | } else if (addr != NULL && mask != NULL) { |
355 | addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); | 355 | addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); |
356 | if (addrmap == NULL) | 356 | if (addrmap == NULL) |
357 | goto cfg_cipsov4_map_add_failure; | 357 | goto out_addrmap; |
358 | INIT_LIST_HEAD(&addrmap->list4); | 358 | INIT_LIST_HEAD(&addrmap->list4); |
359 | INIT_LIST_HEAD(&addrmap->list6); | 359 | INIT_LIST_HEAD(&addrmap->list6); |
360 | 360 | ||
361 | addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC); | 361 | addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC); |
362 | if (addrinfo == NULL) | 362 | if (addrinfo == NULL) |
363 | goto cfg_cipsov4_map_add_failure; | 363 | goto out_addrinfo; |
364 | addrinfo->type_def.cipsov4 = doi_def; | 364 | addrinfo->type_def.cipsov4 = doi_def; |
365 | addrinfo->type = NETLBL_NLTYPE_CIPSOV4; | 365 | addrinfo->type = NETLBL_NLTYPE_CIPSOV4; |
366 | addrinfo->list.addr = addr->s_addr & mask->s_addr; | 366 | addrinfo->list.addr = addr->s_addr & mask->s_addr; |
@@ -374,7 +374,7 @@ int netlbl_cfg_cipsov4_map_add(u32 doi, | |||
374 | entry->type = NETLBL_NLTYPE_ADDRSELECT; | 374 | entry->type = NETLBL_NLTYPE_ADDRSELECT; |
375 | } else { | 375 | } else { |
376 | ret_val = -EINVAL; | 376 | ret_val = -EINVAL; |
377 | goto cfg_cipsov4_map_add_failure; | 377 | goto out_addrmap; |
378 | } | 378 | } |
379 | 379 | ||
380 | ret_val = netlbl_domhsh_add(entry, audit_info); | 380 | ret_val = netlbl_domhsh_add(entry, audit_info); |
@@ -384,11 +384,15 @@ int netlbl_cfg_cipsov4_map_add(u32 doi, | |||
384 | return 0; | 384 | return 0; |
385 | 385 | ||
386 | cfg_cipsov4_map_add_failure: | 386 | cfg_cipsov4_map_add_failure: |
387 | cipso_v4_doi_putdef(doi_def); | 387 | kfree(addrinfo); |
388 | out_addrinfo: | ||
389 | kfree(addrmap); | ||
390 | out_addrmap: | ||
388 | kfree(entry->domain); | 391 | kfree(entry->domain); |
392 | out_domain: | ||
389 | kfree(entry); | 393 | kfree(entry); |
390 | kfree(addrmap); | 394 | out_entry: |
391 | kfree(addrinfo); | 395 | cipso_v4_doi_putdef(doi_def); |
392 | return ret_val; | 396 | return ret_val; |
393 | } | 397 | } |
394 | 398 | ||
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 102fc212cd64..e051398fdf6b 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -196,8 +196,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, | |||
196 | 196 | ||
197 | skb2->skb_iif = skb->dev->ifindex; | 197 | skb2->skb_iif = skb->dev->ifindex; |
198 | skb2->dev = dev; | 198 | skb2->dev = dev; |
199 | dev_queue_xmit(skb2); | 199 | err = dev_queue_xmit(skb2); |
200 | err = 0; | ||
201 | 200 | ||
202 | out: | 201 | out: |
203 | if (err) { | 202 | if (err) { |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 2a318f2dc3e5..b5d56a22b1d2 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -112,7 +112,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch) | |||
112 | 112 | ||
113 | for (prio = 0; prio < q->bands; prio++) { | 113 | for (prio = 0; prio < q->bands; prio++) { |
114 | struct Qdisc *qdisc = q->queues[prio]; | 114 | struct Qdisc *qdisc = q->queues[prio]; |
115 | struct sk_buff *skb = qdisc->dequeue(qdisc); | 115 | struct sk_buff *skb = qdisc_dequeue_peeked(qdisc); |
116 | if (skb) { | 116 | if (skb) { |
117 | qdisc_bstats_update(sch, skb); | 117 | qdisc_bstats_update(sch, skb); |
118 | sch->q.qlen--; | 118 | sch->q.qlen--; |
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 9d761c95eca2..3dfc47134e51 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -2574,7 +2574,8 @@ sub process { | |||
2574 | } else { | 2574 | } else { |
2575 | $cast = $cast2; | 2575 | $cast = $cast2; |
2576 | } | 2576 | } |
2577 | WARN("$call() should probably be ${call}_t($cast, $arg1, $arg2)\n" . $herecurr); | 2577 | WARN("MINMAX", |
2578 | "$call() should probably be ${call}_t($cast, $arg1, $arg2)\n" . $herecurr); | ||
2578 | } | 2579 | } |
2579 | } | 2580 | } |
2580 | 2581 | ||
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl index eb2f1e64edf7..4594f3341051 100755 --- a/scripts/get_maintainer.pl +++ b/scripts/get_maintainer.pl | |||
@@ -1389,7 +1389,7 @@ sub vcs_exists { | |||
1389 | warn("$P: No supported VCS found. Add --nogit to options?\n"); | 1389 | warn("$P: No supported VCS found. Add --nogit to options?\n"); |
1390 | warn("Using a git repository produces better results.\n"); | 1390 | warn("Using a git repository produces better results.\n"); |
1391 | warn("Try Linus Torvalds' latest git repository using:\n"); | 1391 | warn("Try Linus Torvalds' latest git repository using:\n"); |
1392 | warn("git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git\n"); | 1392 | warn("git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git\n"); |
1393 | $printed_novcs = 1; | 1393 | $printed_novcs = 1; |
1394 | } | 1394 | } |
1395 | return 0; | 1395 | return 0; |
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index c8439cf2a448..2e43aec1c36b 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c | |||
@@ -710,8 +710,10 @@ static void tomoyo_read_profile(struct tomoyo_io_buffer *head) | |||
710 | head->r.index++) | 710 | head->r.index++) |
711 | if (ns->profile_ptr[head->r.index]) | 711 | if (ns->profile_ptr[head->r.index]) |
712 | break; | 712 | break; |
713 | if (head->r.index == TOMOYO_MAX_PROFILES) | 713 | if (head->r.index == TOMOYO_MAX_PROFILES) { |
714 | head->r.eof = true; | ||
714 | return; | 715 | return; |
716 | } | ||
715 | head->r.step++; | 717 | head->r.step++; |
716 | break; | 718 | break; |
717 | case 2: | 719 | case 2: |
@@ -723,6 +725,7 @@ static void tomoyo_read_profile(struct tomoyo_io_buffer *head) | |||
723 | tomoyo_io_printf(head, "%u-COMMENT=", index); | 725 | tomoyo_io_printf(head, "%u-COMMENT=", index); |
724 | tomoyo_set_string(head, comment ? comment->name : ""); | 726 | tomoyo_set_string(head, comment ? comment->name : ""); |
725 | tomoyo_set_lf(head); | 727 | tomoyo_set_lf(head); |
728 | tomoyo_print_namespace(head); | ||
726 | tomoyo_io_printf(head, "%u-PREFERENCE={ ", index); | 729 | tomoyo_io_printf(head, "%u-PREFERENCE={ ", index); |
727 | for (i = 0; i < TOMOYO_MAX_PREF; i++) | 730 | for (i = 0; i < TOMOYO_MAX_PREF; i++) |
728 | tomoyo_io_printf(head, "%s=%u ", | 731 | tomoyo_io_printf(head, "%s=%u ", |
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 56d62d3fb167..3b8f7b80376b 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -181,9 +181,9 @@ strip-libs = $(filter-out -l%,$(1)) | |||
181 | 181 | ||
182 | $(OUTPUT)python/perf.so: $(PYRF_OBJS) | 182 | $(OUTPUT)python/perf.so: $(PYRF_OBJS) |
183 | $(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' $(PYTHON_WORD) util/setup.py \ | 183 | $(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' $(PYTHON_WORD) util/setup.py \ |
184 | --quiet build_ext \ | 184 | --quiet build_ext; \ |
185 | --build-lib='$(OUTPUT)python' \ | 185 | mkdir -p $(OUTPUT)python && \ |
186 | --build-temp='$(OUTPUT)python/temp' | 186 | cp $(PYTHON_EXTBUILD_LIB)perf.so $(OUTPUT)python/ |
187 | # | 187 | # |
188 | # No Perl scripts right now: | 188 | # No Perl scripts right now: |
189 | # | 189 | # |
@@ -509,9 +509,13 @@ else | |||
509 | 509 | ||
510 | PYTHON_WORD := $(call shell-wordify,$(PYTHON)) | 510 | PYTHON_WORD := $(call shell-wordify,$(PYTHON)) |
511 | 511 | ||
512 | python-clean := $(PYTHON_WORD) util/setup.py clean \ | 512 | # python extension build directories |
513 | --build-lib='$(OUTPUT)python' \ | 513 | PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/ |
514 | --build-temp='$(OUTPUT)python/temp' | 514 | PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/ |
515 | PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/ | ||
516 | export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP | ||
517 | |||
518 | python-clean := rm -rf $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so | ||
515 | 519 | ||
516 | ifdef NO_LIBPYTHON | 520 | ifdef NO_LIBPYTHON |
517 | $(call disable-python) | 521 | $(call disable-python) |
@@ -868,6 +872,9 @@ install: all | |||
868 | $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python' | 872 | $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python' |
869 | $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin' | 873 | $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin' |
870 | 874 | ||
875 | install-python_ext: | ||
876 | $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)' | ||
877 | |||
871 | install-doc: | 878 | install-doc: |
872 | $(MAKE) -C Documentation install | 879 | $(MAKE) -C Documentation install |
873 | 880 | ||
@@ -895,7 +902,7 @@ quick-install-html: | |||
895 | ### Cleaning rules | 902 | ### Cleaning rules |
896 | 903 | ||
897 | clean: | 904 | clean: |
898 | $(RM) $(OUTPUT){*.o,*/*.o,*/*/*.o,*/*/*/*.o,$(LIB_FILE),perf-archive} | 905 | $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS) |
899 | $(RM) $(ALL_PROGRAMS) perf | 906 | $(RM) $(ALL_PROGRAMS) perf |
900 | $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* | 907 | $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* |
901 | $(MAKE) -C Documentation/ clean | 908 | $(MAKE) -C Documentation/ clean |
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index 9ac05aafd9b2..899080ace267 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c | |||
@@ -942,10 +942,10 @@ static const char *record_args[] = { | |||
942 | "-f", | 942 | "-f", |
943 | "-m", "1024", | 943 | "-m", "1024", |
944 | "-c", "1", | 944 | "-c", "1", |
945 | "-e", "lock:lock_acquire:r", | 945 | "-e", "lock:lock_acquire", |
946 | "-e", "lock:lock_acquired:r", | 946 | "-e", "lock:lock_acquired", |
947 | "-e", "lock:lock_contended:r", | 947 | "-e", "lock:lock_contended", |
948 | "-e", "lock:lock_release:r", | 948 | "-e", "lock:lock_release", |
949 | }; | 949 | }; |
950 | 950 | ||
951 | static int __cmd_record(int argc, const char **argv) | 951 | static int __cmd_record(int argc, const char **argv) |
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 5f2a5c7046df..710ae3d0a489 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c | |||
@@ -134,10 +134,18 @@ static int opt_show_lines(const struct option *opt __used, | |||
134 | { | 134 | { |
135 | int ret = 0; | 135 | int ret = 0; |
136 | 136 | ||
137 | if (str) | 137 | if (!str) |
138 | ret = parse_line_range_desc(str, ¶ms.line_range); | 138 | return 0; |
139 | INIT_LIST_HEAD(¶ms.line_range.line_list); | 139 | |
140 | if (params.show_lines) { | ||
141 | pr_warning("Warning: more than one --line options are" | ||
142 | " detected. Only the first one is valid.\n"); | ||
143 | return 0; | ||
144 | } | ||
145 | |||
140 | params.show_lines = true; | 146 | params.show_lines = true; |
147 | ret = parse_line_range_desc(str, ¶ms.line_range); | ||
148 | INIT_LIST_HEAD(¶ms.line_range.line_list); | ||
141 | 149 | ||
142 | return ret; | 150 | return ret; |
143 | } | 151 | } |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 80dc5b790e47..6b0519f885e4 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -30,8 +30,6 @@ | |||
30 | #include <sched.h> | 30 | #include <sched.h> |
31 | #include <sys/mman.h> | 31 | #include <sys/mman.h> |
32 | 32 | ||
33 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) | ||
34 | |||
35 | enum write_mode_t { | 33 | enum write_mode_t { |
36 | WRITE_FORCE, | 34 | WRITE_FORCE, |
37 | WRITE_APPEND | 35 | WRITE_APPEND |
@@ -47,7 +45,7 @@ static int freq = 1000; | |||
47 | static int output; | 45 | static int output; |
48 | static int pipe_output = 0; | 46 | static int pipe_output = 0; |
49 | static const char *output_name = NULL; | 47 | static const char *output_name = NULL; |
50 | static int group = 0; | 48 | static bool group = false; |
51 | static int realtime_prio = 0; | 49 | static int realtime_prio = 0; |
52 | static bool nodelay = false; | 50 | static bool nodelay = false; |
53 | static bool raw_samples = false; | 51 | static bool raw_samples = false; |
@@ -438,7 +436,6 @@ static void mmap_read_all(void) | |||
438 | 436 | ||
439 | static int __cmd_record(int argc, const char **argv) | 437 | static int __cmd_record(int argc, const char **argv) |
440 | { | 438 | { |
441 | int i; | ||
442 | struct stat st; | 439 | struct stat st; |
443 | int flags; | 440 | int flags; |
444 | int err; | 441 | int err; |
@@ -682,7 +679,6 @@ static int __cmd_record(int argc, const char **argv) | |||
682 | 679 | ||
683 | for (;;) { | 680 | for (;;) { |
684 | int hits = samples; | 681 | int hits = samples; |
685 | int thread; | ||
686 | 682 | ||
687 | mmap_read_all(); | 683 | mmap_read_all(); |
688 | 684 | ||
@@ -693,19 +689,8 @@ static int __cmd_record(int argc, const char **argv) | |||
693 | waking++; | 689 | waking++; |
694 | } | 690 | } |
695 | 691 | ||
696 | if (done) { | 692 | if (done) |
697 | for (i = 0; i < evsel_list->cpus->nr; i++) { | 693 | perf_evlist__disable(evsel_list); |
698 | struct perf_evsel *pos; | ||
699 | |||
700 | list_for_each_entry(pos, &evsel_list->entries, node) { | ||
701 | for (thread = 0; | ||
702 | thread < evsel_list->threads->nr; | ||
703 | thread++) | ||
704 | ioctl(FD(pos, i, thread), | ||
705 | PERF_EVENT_IOC_DISABLE); | ||
706 | } | ||
707 | } | ||
708 | } | ||
709 | } | 694 | } |
710 | 695 | ||
711 | if (quiet || signr == SIGUSR1) | 696 | if (quiet || signr == SIGUSR1) |
@@ -768,6 +753,8 @@ const struct option record_options[] = { | |||
768 | "child tasks do not inherit counters"), | 753 | "child tasks do not inherit counters"), |
769 | OPT_UINTEGER('F', "freq", &user_freq, "profile at this frequency"), | 754 | OPT_UINTEGER('F', "freq", &user_freq, "profile at this frequency"), |
770 | OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"), | 755 | OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"), |
756 | OPT_BOOLEAN(0, "group", &group, | ||
757 | "put the counters into a counter group"), | ||
771 | OPT_BOOLEAN('g', "call-graph", &call_graph, | 758 | OPT_BOOLEAN('g', "call-graph", &call_graph, |
772 | "do call-graph (stack chain/backtrace) recording"), | 759 | "do call-graph (stack chain/backtrace) recording"), |
773 | OPT_INCR('v', "verbose", &verbose, | 760 | OPT_INCR('v', "verbose", &verbose, |
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index f854efda7686..d7ff277bdb78 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c | |||
@@ -162,23 +162,22 @@ static int perf_session__setup_sample_type(struct perf_session *self) | |||
162 | { | 162 | { |
163 | if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) { | 163 | if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) { |
164 | if (sort__has_parent) { | 164 | if (sort__has_parent) { |
165 | fprintf(stderr, "selected --sort parent, but no" | 165 | ui__warning("Selected --sort parent, but no " |
166 | " callchain data. Did you call" | 166 | "callchain data. Did you call " |
167 | " perf record without -g?\n"); | 167 | "'perf record' without -g?\n"); |
168 | return -EINVAL; | 168 | return -EINVAL; |
169 | } | 169 | } |
170 | if (symbol_conf.use_callchain) { | 170 | if (symbol_conf.use_callchain) { |
171 | fprintf(stderr, "selected -g but no callchain data." | 171 | ui__warning("Selected -g but no callchain data. Did " |
172 | " Did you call perf record without" | 172 | "you call 'perf record' without -g?\n"); |
173 | " -g?\n"); | ||
174 | return -1; | 173 | return -1; |
175 | } | 174 | } |
176 | } else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE && | 175 | } else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE && |
177 | !symbol_conf.use_callchain) { | 176 | !symbol_conf.use_callchain) { |
178 | symbol_conf.use_callchain = true; | 177 | symbol_conf.use_callchain = true; |
179 | if (callchain_register_param(&callchain_param) < 0) { | 178 | if (callchain_register_param(&callchain_param) < 0) { |
180 | fprintf(stderr, "Can't register callchain" | 179 | ui__warning("Can't register callchain " |
181 | " params\n"); | 180 | "params.\n"); |
182 | return -EINVAL; | 181 | return -EINVAL; |
183 | } | 182 | } |
184 | } | 183 | } |
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index dcfe8873c9a1..5177964943e7 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c | |||
@@ -1637,23 +1637,29 @@ static struct perf_event_ops event_ops = { | |||
1637 | .ordered_samples = true, | 1637 | .ordered_samples = true, |
1638 | }; | 1638 | }; |
1639 | 1639 | ||
1640 | static int read_events(void) | 1640 | static void read_events(bool destroy, struct perf_session **psession) |
1641 | { | 1641 | { |
1642 | int err = -EINVAL; | 1642 | int err = -EINVAL; |
1643 | struct perf_session *session = perf_session__new(input_name, O_RDONLY, | 1643 | struct perf_session *session = perf_session__new(input_name, O_RDONLY, |
1644 | 0, false, &event_ops); | 1644 | 0, false, &event_ops); |
1645 | if (session == NULL) | 1645 | if (session == NULL) |
1646 | return -ENOMEM; | 1646 | die("No Memory"); |
1647 | 1647 | ||
1648 | if (perf_session__has_traces(session, "record -R")) { | 1648 | if (perf_session__has_traces(session, "record -R")) { |
1649 | err = perf_session__process_events(session, &event_ops); | 1649 | err = perf_session__process_events(session, &event_ops); |
1650 | if (err) | ||
1651 | die("Failed to process events, error %d", err); | ||
1652 | |||
1650 | nr_events = session->hists.stats.nr_events[0]; | 1653 | nr_events = session->hists.stats.nr_events[0]; |
1651 | nr_lost_events = session->hists.stats.total_lost; | 1654 | nr_lost_events = session->hists.stats.total_lost; |
1652 | nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST]; | 1655 | nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST]; |
1653 | } | 1656 | } |
1654 | 1657 | ||
1655 | perf_session__delete(session); | 1658 | if (destroy) |
1656 | return err; | 1659 | perf_session__delete(session); |
1660 | |||
1661 | if (psession) | ||
1662 | *psession = session; | ||
1657 | } | 1663 | } |
1658 | 1664 | ||
1659 | static void print_bad_events(void) | 1665 | static void print_bad_events(void) |
@@ -1689,9 +1695,10 @@ static void print_bad_events(void) | |||
1689 | static void __cmd_lat(void) | 1695 | static void __cmd_lat(void) |
1690 | { | 1696 | { |
1691 | struct rb_node *next; | 1697 | struct rb_node *next; |
1698 | struct perf_session *session; | ||
1692 | 1699 | ||
1693 | setup_pager(); | 1700 | setup_pager(); |
1694 | read_events(); | 1701 | read_events(false, &session); |
1695 | sort_lat(); | 1702 | sort_lat(); |
1696 | 1703 | ||
1697 | printf("\n ---------------------------------------------------------------------------------------------------------------\n"); | 1704 | printf("\n ---------------------------------------------------------------------------------------------------------------\n"); |
@@ -1717,6 +1724,7 @@ static void __cmd_lat(void) | |||
1717 | print_bad_events(); | 1724 | print_bad_events(); |
1718 | printf("\n"); | 1725 | printf("\n"); |
1719 | 1726 | ||
1727 | perf_session__delete(session); | ||
1720 | } | 1728 | } |
1721 | 1729 | ||
1722 | static struct trace_sched_handler map_ops = { | 1730 | static struct trace_sched_handler map_ops = { |
@@ -1731,7 +1739,7 @@ static void __cmd_map(void) | |||
1731 | max_cpu = sysconf(_SC_NPROCESSORS_CONF); | 1739 | max_cpu = sysconf(_SC_NPROCESSORS_CONF); |
1732 | 1740 | ||
1733 | setup_pager(); | 1741 | setup_pager(); |
1734 | read_events(); | 1742 | read_events(true, NULL); |
1735 | print_bad_events(); | 1743 | print_bad_events(); |
1736 | } | 1744 | } |
1737 | 1745 | ||
@@ -1744,7 +1752,7 @@ static void __cmd_replay(void) | |||
1744 | 1752 | ||
1745 | test_calibrations(); | 1753 | test_calibrations(); |
1746 | 1754 | ||
1747 | read_events(); | 1755 | read_events(true, NULL); |
1748 | 1756 | ||
1749 | printf("nr_run_events: %ld\n", nr_run_events); | 1757 | printf("nr_run_events: %ld\n", nr_run_events); |
1750 | printf("nr_sleep_events: %ld\n", nr_sleep_events); | 1758 | printf("nr_sleep_events: %ld\n", nr_sleep_events); |
@@ -1769,7 +1777,7 @@ static void __cmd_replay(void) | |||
1769 | 1777 | ||
1770 | 1778 | ||
1771 | static const char * const sched_usage[] = { | 1779 | static const char * const sched_usage[] = { |
1772 | "perf sched [<options>] {record|latency|map|replay|trace}", | 1780 | "perf sched [<options>] {record|latency|map|replay|script}", |
1773 | NULL | 1781 | NULL |
1774 | }; | 1782 | }; |
1775 | 1783 | ||
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 1ad04ce29c34..5deb17d9e795 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -193,6 +193,7 @@ static int big_num_opt = -1; | |||
193 | static const char *cpu_list; | 193 | static const char *cpu_list; |
194 | static const char *csv_sep = NULL; | 194 | static const char *csv_sep = NULL; |
195 | static bool csv_output = false; | 195 | static bool csv_output = false; |
196 | static bool group = false; | ||
196 | 197 | ||
197 | static volatile int done = 0; | 198 | static volatile int done = 0; |
198 | 199 | ||
@@ -280,14 +281,14 @@ static int create_perf_stat_counter(struct perf_evsel *evsel) | |||
280 | attr->inherit = !no_inherit; | 281 | attr->inherit = !no_inherit; |
281 | 282 | ||
282 | if (system_wide) | 283 | if (system_wide) |
283 | return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false); | 284 | return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, group); |
284 | 285 | ||
285 | if (target_pid == -1 && target_tid == -1) { | 286 | if (target_pid == -1 && target_tid == -1) { |
286 | attr->disabled = 1; | 287 | attr->disabled = 1; |
287 | attr->enable_on_exec = 1; | 288 | attr->enable_on_exec = 1; |
288 | } | 289 | } |
289 | 290 | ||
290 | return perf_evsel__open_per_thread(evsel, evsel_list->threads, false); | 291 | return perf_evsel__open_per_thread(evsel, evsel_list->threads, group); |
291 | } | 292 | } |
292 | 293 | ||
293 | /* | 294 | /* |
@@ -1043,6 +1044,8 @@ static const struct option options[] = { | |||
1043 | "stat events on existing thread id"), | 1044 | "stat events on existing thread id"), |
1044 | OPT_BOOLEAN('a', "all-cpus", &system_wide, | 1045 | OPT_BOOLEAN('a', "all-cpus", &system_wide, |
1045 | "system-wide collection from all CPUs"), | 1046 | "system-wide collection from all CPUs"), |
1047 | OPT_BOOLEAN('g', "group", &group, | ||
1048 | "put the counters into a counter group"), | ||
1046 | OPT_BOOLEAN('c', "scale", &scale, | 1049 | OPT_BOOLEAN('c', "scale", &scale, |
1047 | "scale/normalize counters"), | 1050 | "scale/normalize counters"), |
1048 | OPT_INCR('v', "verbose", &verbose, | 1051 | OPT_INCR('v', "verbose", &verbose, |
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c index e02d78cae70f..fe02903f7d0f 100644 --- a/tools/perf/util/config.c +++ b/tools/perf/util/config.c | |||
@@ -399,7 +399,6 @@ static int perf_config_global(void) | |||
399 | int perf_config(config_fn_t fn, void *data) | 399 | int perf_config(config_fn_t fn, void *data) |
400 | { | 400 | { |
401 | int ret = 0, found = 0; | 401 | int ret = 0, found = 0; |
402 | char *repo_config = NULL; | ||
403 | const char *home = NULL; | 402 | const char *home = NULL; |
404 | 403 | ||
405 | /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */ | 404 | /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */ |
@@ -414,19 +413,32 @@ int perf_config(config_fn_t fn, void *data) | |||
414 | home = getenv("HOME"); | 413 | home = getenv("HOME"); |
415 | if (perf_config_global() && home) { | 414 | if (perf_config_global() && home) { |
416 | char *user_config = strdup(mkpath("%s/.perfconfig", home)); | 415 | char *user_config = strdup(mkpath("%s/.perfconfig", home)); |
417 | if (!access(user_config, R_OK)) { | 416 | struct stat st; |
418 | ret += perf_config_from_file(fn, user_config, data); | 417 | |
419 | found += 1; | 418 | if (user_config == NULL) { |
419 | warning("Not enough memory to process %s/.perfconfig, " | ||
420 | "ignoring it.", home); | ||
421 | goto out; | ||
420 | } | 422 | } |
421 | free(user_config); | ||
422 | } | ||
423 | 423 | ||
424 | repo_config = perf_pathdup("config"); | 424 | if (stat(user_config, &st) < 0) |
425 | if (!access(repo_config, R_OK)) { | 425 | goto out_free; |
426 | ret += perf_config_from_file(fn, repo_config, data); | 426 | |
427 | if (st.st_uid && (st.st_uid != geteuid())) { | ||
428 | warning("File %s not owned by current user or root, " | ||
429 | "ignoring it.", user_config); | ||
430 | goto out_free; | ||
431 | } | ||
432 | |||
433 | if (!st.st_size) | ||
434 | goto out_free; | ||
435 | |||
436 | ret += perf_config_from_file(fn, user_config, data); | ||
427 | found += 1; | 437 | found += 1; |
438 | out_free: | ||
439 | free(user_config); | ||
428 | } | 440 | } |
429 | free(repo_config); | 441 | out: |
430 | if (found == 0) | 442 | if (found == 0) |
431 | return -1; | 443 | return -1; |
432 | return ret; | 444 | return ret; |
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index fddf40f30d3e..ee51e9b4dc09 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c | |||
@@ -96,6 +96,39 @@ int cu_find_lineinfo(Dwarf_Die *cu_die, unsigned long addr, | |||
96 | return *lineno ?: -ENOENT; | 96 | return *lineno ?: -ENOENT; |
97 | } | 97 | } |
98 | 98 | ||
99 | static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data); | ||
100 | |||
101 | /** | ||
102 | * cu_walk_functions_at - Walk on function DIEs at given address | ||
103 | * @cu_die: A CU DIE | ||
104 | * @addr: An address | ||
105 | * @callback: A callback which called with found DIEs | ||
106 | * @data: A user data | ||
107 | * | ||
108 | * Walk on function DIEs at given @addr in @cu_die. Passed DIEs | ||
109 | * should be subprogram or inlined-subroutines. | ||
110 | */ | ||
111 | int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr, | ||
112 | int (*callback)(Dwarf_Die *, void *), void *data) | ||
113 | { | ||
114 | Dwarf_Die die_mem; | ||
115 | Dwarf_Die *sc_die; | ||
116 | int ret = -ENOENT; | ||
117 | |||
118 | /* Inlined function could be recursive. Trace it until fail */ | ||
119 | for (sc_die = die_find_realfunc(cu_die, addr, &die_mem); | ||
120 | sc_die != NULL; | ||
121 | sc_die = die_find_child(sc_die, __die_find_inline_cb, &addr, | ||
122 | &die_mem)) { | ||
123 | ret = callback(sc_die, data); | ||
124 | if (ret) | ||
125 | break; | ||
126 | } | ||
127 | |||
128 | return ret; | ||
129 | |||
130 | } | ||
131 | |||
99 | /** | 132 | /** |
100 | * die_compare_name - Compare diename and tname | 133 | * die_compare_name - Compare diename and tname |
101 | * @dw_die: a DIE | 134 | * @dw_die: a DIE |
@@ -198,6 +231,19 @@ static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name, | |||
198 | return 0; | 231 | return 0; |
199 | } | 232 | } |
200 | 233 | ||
234 | /* Get attribute and translate it as a sdata */ | ||
235 | static int die_get_attr_sdata(Dwarf_Die *tp_die, unsigned int attr_name, | ||
236 | Dwarf_Sword *result) | ||
237 | { | ||
238 | Dwarf_Attribute attr; | ||
239 | |||
240 | if (dwarf_attr(tp_die, attr_name, &attr) == NULL || | ||
241 | dwarf_formsdata(&attr, result) != 0) | ||
242 | return -ENOENT; | ||
243 | |||
244 | return 0; | ||
245 | } | ||
246 | |||
201 | /** | 247 | /** |
202 | * die_is_signed_type - Check whether a type DIE is signed or not | 248 | * die_is_signed_type - Check whether a type DIE is signed or not |
203 | * @tp_die: a DIE of a type | 249 | * @tp_die: a DIE of a type |
@@ -250,6 +296,50 @@ int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs) | |||
250 | return 0; | 296 | return 0; |
251 | } | 297 | } |
252 | 298 | ||
299 | /* Get the call file index number in CU DIE */ | ||
300 | static int die_get_call_fileno(Dwarf_Die *in_die) | ||
301 | { | ||
302 | Dwarf_Sword idx; | ||
303 | |||
304 | if (die_get_attr_sdata(in_die, DW_AT_call_file, &idx) == 0) | ||
305 | return (int)idx; | ||
306 | else | ||
307 | return -ENOENT; | ||
308 | } | ||
309 | |||
310 | /* Get the declared file index number in CU DIE */ | ||
311 | static int die_get_decl_fileno(Dwarf_Die *pdie) | ||
312 | { | ||
313 | Dwarf_Sword idx; | ||
314 | |||
315 | if (die_get_attr_sdata(pdie, DW_AT_decl_file, &idx) == 0) | ||
316 | return (int)idx; | ||
317 | else | ||
318 | return -ENOENT; | ||
319 | } | ||
320 | |||
321 | /** | ||
322 | * die_get_call_file - Get callsite file name of inlined function instance | ||
323 | * @in_die: a DIE of an inlined function instance | ||
324 | * | ||
325 | * Get call-site file name of @in_die. This means from which file the inline | ||
326 | * function is called. | ||
327 | */ | ||
328 | const char *die_get_call_file(Dwarf_Die *in_die) | ||
329 | { | ||
330 | Dwarf_Die cu_die; | ||
331 | Dwarf_Files *files; | ||
332 | int idx; | ||
333 | |||
334 | idx = die_get_call_fileno(in_die); | ||
335 | if (idx < 0 || !dwarf_diecu(in_die, &cu_die, NULL, NULL) || | ||
336 | dwarf_getsrcfiles(&cu_die, &files, NULL) != 0) | ||
337 | return NULL; | ||
338 | |||
339 | return dwarf_filesrc(files, idx, NULL, NULL); | ||
340 | } | ||
341 | |||
342 | |||
253 | /** | 343 | /** |
254 | * die_find_child - Generic DIE search function in DIE tree | 344 | * die_find_child - Generic DIE search function in DIE tree |
255 | * @rt_die: a root DIE | 345 | * @rt_die: a root DIE |
@@ -374,9 +464,78 @@ Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, | |||
374 | return die_mem; | 464 | return die_mem; |
375 | } | 465 | } |
376 | 466 | ||
467 | struct __instance_walk_param { | ||
468 | void *addr; | ||
469 | int (*callback)(Dwarf_Die *, void *); | ||
470 | void *data; | ||
471 | int retval; | ||
472 | }; | ||
473 | |||
474 | static int __die_walk_instances_cb(Dwarf_Die *inst, void *data) | ||
475 | { | ||
476 | struct __instance_walk_param *iwp = data; | ||
477 | Dwarf_Attribute attr_mem; | ||
478 | Dwarf_Die origin_mem; | ||
479 | Dwarf_Attribute *attr; | ||
480 | Dwarf_Die *origin; | ||
481 | int tmp; | ||
482 | |||
483 | attr = dwarf_attr(inst, DW_AT_abstract_origin, &attr_mem); | ||
484 | if (attr == NULL) | ||
485 | return DIE_FIND_CB_CONTINUE; | ||
486 | |||
487 | origin = dwarf_formref_die(attr, &origin_mem); | ||
488 | if (origin == NULL || origin->addr != iwp->addr) | ||
489 | return DIE_FIND_CB_CONTINUE; | ||
490 | |||
491 | /* Ignore redundant instances */ | ||
492 | if (dwarf_tag(inst) == DW_TAG_inlined_subroutine) { | ||
493 | dwarf_decl_line(origin, &tmp); | ||
494 | if (die_get_call_lineno(inst) == tmp) { | ||
495 | tmp = die_get_decl_fileno(origin); | ||
496 | if (die_get_call_fileno(inst) == tmp) | ||
497 | return DIE_FIND_CB_CONTINUE; | ||
498 | } | ||
499 | } | ||
500 | |||
501 | iwp->retval = iwp->callback(inst, iwp->data); | ||
502 | |||
503 | return (iwp->retval) ? DIE_FIND_CB_END : DIE_FIND_CB_CONTINUE; | ||
504 | } | ||
505 | |||
506 | /** | ||
507 | * die_walk_instances - Walk on instances of given DIE | ||
508 | * @or_die: an abstract original DIE | ||
509 | * @callback: a callback function which is called with instance DIE | ||
510 | * @data: user data | ||
511 | * | ||
512 | * Walk on the instances of give @in_die. @in_die must be an inlined function | ||
513 | * declartion. This returns the return value of @callback if it returns | ||
514 | * non-zero value, or -ENOENT if there is no instance. | ||
515 | */ | ||
516 | int die_walk_instances(Dwarf_Die *or_die, int (*callback)(Dwarf_Die *, void *), | ||
517 | void *data) | ||
518 | { | ||
519 | Dwarf_Die cu_die; | ||
520 | Dwarf_Die die_mem; | ||
521 | struct __instance_walk_param iwp = { | ||
522 | .addr = or_die->addr, | ||
523 | .callback = callback, | ||
524 | .data = data, | ||
525 | .retval = -ENOENT, | ||
526 | }; | ||
527 | |||
528 | if (dwarf_diecu(or_die, &cu_die, NULL, NULL) == NULL) | ||
529 | return -ENOENT; | ||
530 | |||
531 | die_find_child(&cu_die, __die_walk_instances_cb, &iwp, &die_mem); | ||
532 | |||
533 | return iwp.retval; | ||
534 | } | ||
535 | |||
377 | /* Line walker internal parameters */ | 536 | /* Line walker internal parameters */ |
378 | struct __line_walk_param { | 537 | struct __line_walk_param { |
379 | const char *fname; | 538 | bool recursive; |
380 | line_walk_callback_t callback; | 539 | line_walk_callback_t callback; |
381 | void *data; | 540 | void *data; |
382 | int retval; | 541 | int retval; |
@@ -385,39 +544,56 @@ struct __line_walk_param { | |||
385 | static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data) | 544 | static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data) |
386 | { | 545 | { |
387 | struct __line_walk_param *lw = data; | 546 | struct __line_walk_param *lw = data; |
388 | Dwarf_Addr addr; | 547 | Dwarf_Addr addr = 0; |
548 | const char *fname; | ||
389 | int lineno; | 549 | int lineno; |
390 | 550 | ||
391 | if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) { | 551 | if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) { |
552 | fname = die_get_call_file(in_die); | ||
392 | lineno = die_get_call_lineno(in_die); | 553 | lineno = die_get_call_lineno(in_die); |
393 | if (lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) { | 554 | if (fname && lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) { |
394 | lw->retval = lw->callback(lw->fname, lineno, addr, | 555 | lw->retval = lw->callback(fname, lineno, addr, lw->data); |
395 | lw->data); | ||
396 | if (lw->retval != 0) | 556 | if (lw->retval != 0) |
397 | return DIE_FIND_CB_END; | 557 | return DIE_FIND_CB_END; |
398 | } | 558 | } |
399 | } | 559 | } |
400 | return DIE_FIND_CB_SIBLING; | 560 | if (!lw->recursive) |
561 | /* Don't need to search recursively */ | ||
562 | return DIE_FIND_CB_SIBLING; | ||
563 | |||
564 | if (addr) { | ||
565 | fname = dwarf_decl_file(in_die); | ||
566 | if (fname && dwarf_decl_line(in_die, &lineno) == 0) { | ||
567 | lw->retval = lw->callback(fname, lineno, addr, lw->data); | ||
568 | if (lw->retval != 0) | ||
569 | return DIE_FIND_CB_END; | ||
570 | } | ||
571 | } | ||
572 | |||
573 | /* Continue to search nested inlined function call-sites */ | ||
574 | return DIE_FIND_CB_CONTINUE; | ||
401 | } | 575 | } |
402 | 576 | ||
403 | /* Walk on lines of blocks included in given DIE */ | 577 | /* Walk on lines of blocks included in given DIE */ |
404 | static int __die_walk_funclines(Dwarf_Die *sp_die, | 578 | static int __die_walk_funclines(Dwarf_Die *sp_die, bool recursive, |
405 | line_walk_callback_t callback, void *data) | 579 | line_walk_callback_t callback, void *data) |
406 | { | 580 | { |
407 | struct __line_walk_param lw = { | 581 | struct __line_walk_param lw = { |
582 | .recursive = recursive, | ||
408 | .callback = callback, | 583 | .callback = callback, |
409 | .data = data, | 584 | .data = data, |
410 | .retval = 0, | 585 | .retval = 0, |
411 | }; | 586 | }; |
412 | Dwarf_Die die_mem; | 587 | Dwarf_Die die_mem; |
413 | Dwarf_Addr addr; | 588 | Dwarf_Addr addr; |
589 | const char *fname; | ||
414 | int lineno; | 590 | int lineno; |
415 | 591 | ||
416 | /* Handle function declaration line */ | 592 | /* Handle function declaration line */ |
417 | lw.fname = dwarf_decl_file(sp_die); | 593 | fname = dwarf_decl_file(sp_die); |
418 | if (lw.fname && dwarf_decl_line(sp_die, &lineno) == 0 && | 594 | if (fname && dwarf_decl_line(sp_die, &lineno) == 0 && |
419 | dwarf_entrypc(sp_die, &addr) == 0) { | 595 | dwarf_entrypc(sp_die, &addr) == 0) { |
420 | lw.retval = callback(lw.fname, lineno, addr, data); | 596 | lw.retval = callback(fname, lineno, addr, data); |
421 | if (lw.retval != 0) | 597 | if (lw.retval != 0) |
422 | goto done; | 598 | goto done; |
423 | } | 599 | } |
@@ -430,7 +606,7 @@ static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data) | |||
430 | { | 606 | { |
431 | struct __line_walk_param *lw = data; | 607 | struct __line_walk_param *lw = data; |
432 | 608 | ||
433 | lw->retval = __die_walk_funclines(sp_die, lw->callback, lw->data); | 609 | lw->retval = __die_walk_funclines(sp_die, true, lw->callback, lw->data); |
434 | if (lw->retval != 0) | 610 | if (lw->retval != 0) |
435 | return DWARF_CB_ABORT; | 611 | return DWARF_CB_ABORT; |
436 | 612 | ||
@@ -439,7 +615,7 @@ static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data) | |||
439 | 615 | ||
440 | /** | 616 | /** |
441 | * die_walk_lines - Walk on lines inside given DIE | 617 | * die_walk_lines - Walk on lines inside given DIE |
442 | * @rt_die: a root DIE (CU or subprogram) | 618 | * @rt_die: a root DIE (CU, subprogram or inlined_subroutine) |
443 | * @callback: callback routine | 619 | * @callback: callback routine |
444 | * @data: user data | 620 | * @data: user data |
445 | * | 621 | * |
@@ -460,12 +636,12 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data) | |||
460 | size_t nlines, i; | 636 | size_t nlines, i; |
461 | 637 | ||
462 | /* Get the CU die */ | 638 | /* Get the CU die */ |
463 | if (dwarf_tag(rt_die) == DW_TAG_subprogram) | 639 | if (dwarf_tag(rt_die) != DW_TAG_compile_unit) |
464 | cu_die = dwarf_diecu(rt_die, &die_mem, NULL, NULL); | 640 | cu_die = dwarf_diecu(rt_die, &die_mem, NULL, NULL); |
465 | else | 641 | else |
466 | cu_die = rt_die; | 642 | cu_die = rt_die; |
467 | if (!cu_die) { | 643 | if (!cu_die) { |
468 | pr_debug2("Failed to get CU from subprogram\n"); | 644 | pr_debug2("Failed to get CU from given DIE.\n"); |
469 | return -EINVAL; | 645 | return -EINVAL; |
470 | } | 646 | } |
471 | 647 | ||
@@ -509,7 +685,11 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data) | |||
509 | * subroutines. We have to check functions list or given function. | 685 | * subroutines. We have to check functions list or given function. |
510 | */ | 686 | */ |
511 | if (rt_die != cu_die) | 687 | if (rt_die != cu_die) |
512 | ret = __die_walk_funclines(rt_die, callback, data); | 688 | /* |
689 | * Don't need walk functions recursively, because nested | ||
690 | * inlined functions don't have lines of the specified DIE. | ||
691 | */ | ||
692 | ret = __die_walk_funclines(rt_die, false, callback, data); | ||
513 | else { | 693 | else { |
514 | struct __line_walk_param param = { | 694 | struct __line_walk_param param = { |
515 | .callback = callback, | 695 | .callback = callback, |
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h index bc3b21167e70..6ce1717784b7 100644 --- a/tools/perf/util/dwarf-aux.h +++ b/tools/perf/util/dwarf-aux.h | |||
@@ -34,12 +34,19 @@ extern const char *cu_get_comp_dir(Dwarf_Die *cu_die); | |||
34 | extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr, | 34 | extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr, |
35 | const char **fname, int *lineno); | 35 | const char **fname, int *lineno); |
36 | 36 | ||
37 | /* Walk on funcitons at given address */ | ||
38 | extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr, | ||
39 | int (*callback)(Dwarf_Die *, void *), void *data); | ||
40 | |||
37 | /* Compare diename and tname */ | 41 | /* Compare diename and tname */ |
38 | extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname); | 42 | extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname); |
39 | 43 | ||
40 | /* Get callsite line number of inline-function instance */ | 44 | /* Get callsite line number of inline-function instance */ |
41 | extern int die_get_call_lineno(Dwarf_Die *in_die); | 45 | extern int die_get_call_lineno(Dwarf_Die *in_die); |
42 | 46 | ||
47 | /* Get callsite file name of inlined function instance */ | ||
48 | extern const char *die_get_call_file(Dwarf_Die *in_die); | ||
49 | |||
43 | /* Get type die */ | 50 | /* Get type die */ |
44 | extern Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem); | 51 | extern Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem); |
45 | 52 | ||
@@ -73,6 +80,10 @@ extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr, | |||
73 | extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, | 80 | extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, |
74 | Dwarf_Die *die_mem); | 81 | Dwarf_Die *die_mem); |
75 | 82 | ||
83 | /* Walk on the instances of given DIE */ | ||
84 | extern int die_walk_instances(Dwarf_Die *in_die, | ||
85 | int (*callback)(Dwarf_Die *, void *), void *data); | ||
86 | |||
76 | /* Walker on lines (Note: line number will not be sorted) */ | 87 | /* Walker on lines (Note: line number will not be sorted) */ |
77 | typedef int (* line_walk_callback_t) (const char *fname, int lineno, | 88 | typedef int (* line_walk_callback_t) (const char *fname, int lineno, |
78 | Dwarf_Addr addr, void *data); | 89 | Dwarf_Addr addr, void *data); |
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index b021ea9265c3..c12bd476c6f7 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
@@ -85,10 +85,32 @@ int perf_evlist__add_default(struct perf_evlist *evlist) | |||
85 | struct perf_evsel *evsel = perf_evsel__new(&attr, 0); | 85 | struct perf_evsel *evsel = perf_evsel__new(&attr, 0); |
86 | 86 | ||
87 | if (evsel == NULL) | 87 | if (evsel == NULL) |
88 | return -ENOMEM; | 88 | goto error; |
89 | |||
90 | /* use strdup() because free(evsel) assumes name is allocated */ | ||
91 | evsel->name = strdup("cycles"); | ||
92 | if (!evsel->name) | ||
93 | goto error_free; | ||
89 | 94 | ||
90 | perf_evlist__add(evlist, evsel); | 95 | perf_evlist__add(evlist, evsel); |
91 | return 0; | 96 | return 0; |
97 | error_free: | ||
98 | perf_evsel__delete(evsel); | ||
99 | error: | ||
100 | return -ENOMEM; | ||
101 | } | ||
102 | |||
103 | void perf_evlist__disable(struct perf_evlist *evlist) | ||
104 | { | ||
105 | int cpu, thread; | ||
106 | struct perf_evsel *pos; | ||
107 | |||
108 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | ||
109 | list_for_each_entry(pos, &evlist->entries, node) { | ||
110 | for (thread = 0; thread < evlist->threads->nr; thread++) | ||
111 | ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE); | ||
112 | } | ||
113 | } | ||
92 | } | 114 | } |
93 | 115 | ||
94 | int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) | 116 | int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) |
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index b2b862374f37..ce85ae9ae57a 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h | |||
@@ -53,6 +53,8 @@ int perf_evlist__alloc_mmap(struct perf_evlist *evlist); | |||
53 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); | 53 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); |
54 | void perf_evlist__munmap(struct perf_evlist *evlist); | 54 | void perf_evlist__munmap(struct perf_evlist *evlist); |
55 | 55 | ||
56 | void perf_evlist__disable(struct perf_evlist *evlist); | ||
57 | |||
56 | static inline void perf_evlist__set_maps(struct perf_evlist *evlist, | 58 | static inline void perf_evlist__set_maps(struct perf_evlist *evlist, |
57 | struct cpu_map *cpus, | 59 | struct cpu_map *cpus, |
58 | struct thread_map *threads) | 60 | struct thread_map *threads) |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index cb2959a3fb43..b6c1ad123ca9 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -189,8 +189,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, | |||
189 | const char *name, bool is_kallsyms) | 189 | const char *name, bool is_kallsyms) |
190 | { | 190 | { |
191 | const size_t size = PATH_MAX; | 191 | const size_t size = PATH_MAX; |
192 | char *realname, *filename = malloc(size), | 192 | char *realname, *filename = zalloc(size), |
193 | *linkname = malloc(size), *targetname; | 193 | *linkname = zalloc(size), *targetname; |
194 | int len, err = -1; | 194 | int len, err = -1; |
195 | 195 | ||
196 | if (is_kallsyms) { | 196 | if (is_kallsyms) { |
@@ -254,8 +254,8 @@ static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, | |||
254 | int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir) | 254 | int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir) |
255 | { | 255 | { |
256 | const size_t size = PATH_MAX; | 256 | const size_t size = PATH_MAX; |
257 | char *filename = malloc(size), | 257 | char *filename = zalloc(size), |
258 | *linkname = malloc(size); | 258 | *linkname = zalloc(size); |
259 | int err = -1; | 259 | int err = -1; |
260 | 260 | ||
261 | if (filename == NULL || linkname == NULL) | 261 | if (filename == NULL || linkname == NULL) |
@@ -726,7 +726,16 @@ static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, | |||
726 | return -1; | 726 | return -1; |
727 | 727 | ||
728 | bev.header = old_bev.header; | 728 | bev.header = old_bev.header; |
729 | bev.pid = 0; | 729 | |
730 | /* | ||
731 | * As the pid is the missing value, we need to fill | ||
732 | * it properly. The header.misc value give us nice hint. | ||
733 | */ | ||
734 | bev.pid = HOST_KERNEL_ID; | ||
735 | if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || | ||
736 | bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) | ||
737 | bev.pid = DEFAULT_GUEST_KERNEL_ID; | ||
738 | |||
730 | memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); | 739 | memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); |
731 | __event_process_build_id(&bev, filename, session); | 740 | __event_process_build_id(&bev, filename, session); |
732 | 741 | ||
diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h index 791f9dd27ebf..547628e97f3d 100644 --- a/tools/perf/util/include/linux/compiler.h +++ b/tools/perf/util/include/linux/compiler.h | |||
@@ -5,7 +5,9 @@ | |||
5 | #define __always_inline inline | 5 | #define __always_inline inline |
6 | #endif | 6 | #endif |
7 | #define __user | 7 | #define __user |
8 | #ifndef __attribute_const__ | ||
8 | #define __attribute_const__ | 9 | #define __attribute_const__ |
10 | #endif | ||
9 | 11 | ||
10 | #define __used __attribute__((__unused__)) | 12 | #define __used __attribute__((__unused__)) |
11 | 13 | ||
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 4ea7e19f5251..928918b796b2 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
@@ -697,7 +697,11 @@ parse_raw_event(const char **strp, struct perf_event_attr *attr) | |||
697 | return EVT_FAILED; | 697 | return EVT_FAILED; |
698 | n = hex2u64(str + 1, &config); | 698 | n = hex2u64(str + 1, &config); |
699 | if (n > 0) { | 699 | if (n > 0) { |
700 | *strp = str + n + 1; | 700 | const char *end = str + n + 1; |
701 | if (*end != '\0' && *end != ',' && *end != ':') | ||
702 | return EVT_FAILED; | ||
703 | |||
704 | *strp = end; | ||
701 | attr->type = PERF_TYPE_RAW; | 705 | attr->type = PERF_TYPE_RAW; |
702 | attr->config = config; | 706 | attr->config = config; |
703 | return EVT_HANDLED; | 707 | return EVT_HANDLED; |
@@ -1097,6 +1101,4 @@ void print_events(const char *event_glob) | |||
1097 | printf("\n"); | 1101 | printf("\n"); |
1098 | 1102 | ||
1099 | print_tracepoint_events(NULL, NULL); | 1103 | print_tracepoint_events(NULL, NULL); |
1100 | |||
1101 | exit(129); | ||
1102 | } | 1104 | } |
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index b82d54fa2c56..1c7bfa5fe0a8 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c | |||
@@ -1820,11 +1820,15 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev, | |||
1820 | ret = -ENOMEM; | 1820 | ret = -ENOMEM; |
1821 | goto error; | 1821 | goto error; |
1822 | } | 1822 | } |
1823 | tev->point.module = strdup(module); | 1823 | |
1824 | if (tev->point.module == NULL) { | 1824 | if (module) { |
1825 | ret = -ENOMEM; | 1825 | tev->point.module = strdup(module); |
1826 | goto error; | 1826 | if (tev->point.module == NULL) { |
1827 | ret = -ENOMEM; | ||
1828 | goto error; | ||
1829 | } | ||
1827 | } | 1830 | } |
1831 | |||
1828 | tev->point.offset = pev->point.offset; | 1832 | tev->point.offset = pev->point.offset; |
1829 | tev->point.retprobe = pev->point.retprobe; | 1833 | tev->point.retprobe = pev->point.retprobe; |
1830 | tev->nargs = pev->nargs; | 1834 | tev->nargs = pev->nargs; |
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 3e44a3e36519..555fc3864b90 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c | |||
@@ -612,12 +612,12 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf) | |||
612 | return ret; | 612 | return ret; |
613 | } | 613 | } |
614 | 614 | ||
615 | /* Find a variable in a subprogram die */ | 615 | /* Find a variable in a scope DIE */ |
616 | static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) | 616 | static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf) |
617 | { | 617 | { |
618 | Dwarf_Die vr_die, *scopes; | 618 | Dwarf_Die vr_die; |
619 | char buf[32], *ptr; | 619 | char buf[32], *ptr; |
620 | int ret, nscopes; | 620 | int ret = 0; |
621 | 621 | ||
622 | if (!is_c_varname(pf->pvar->var)) { | 622 | if (!is_c_varname(pf->pvar->var)) { |
623 | /* Copy raw parameters */ | 623 | /* Copy raw parameters */ |
@@ -652,30 +652,16 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
652 | if (pf->tvar->name == NULL) | 652 | if (pf->tvar->name == NULL) |
653 | return -ENOMEM; | 653 | return -ENOMEM; |
654 | 654 | ||
655 | pr_debug("Searching '%s' variable in context.\n", | 655 | pr_debug("Searching '%s' variable in context.\n", pf->pvar->var); |
656 | pf->pvar->var); | ||
657 | /* Search child die for local variables and parameters. */ | 656 | /* Search child die for local variables and parameters. */ |
658 | if (die_find_variable_at(sp_die, pf->pvar->var, pf->addr, &vr_die)) | 657 | if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) { |
659 | ret = convert_variable(&vr_die, pf); | 658 | /* Search again in global variables */ |
660 | else { | 659 | if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die)) |
661 | /* Search upper class */ | 660 | ret = -ENOENT; |
662 | nscopes = dwarf_getscopes_die(sp_die, &scopes); | ||
663 | while (nscopes-- > 1) { | ||
664 | pr_debug("Searching variables in %s\n", | ||
665 | dwarf_diename(&scopes[nscopes])); | ||
666 | /* We should check this scope, so give dummy address */ | ||
667 | if (die_find_variable_at(&scopes[nscopes], | ||
668 | pf->pvar->var, 0, | ||
669 | &vr_die)) { | ||
670 | ret = convert_variable(&vr_die, pf); | ||
671 | goto found; | ||
672 | } | ||
673 | } | ||
674 | if (scopes) | ||
675 | free(scopes); | ||
676 | ret = -ENOENT; | ||
677 | } | 661 | } |
678 | found: | 662 | if (ret == 0) |
663 | ret = convert_variable(&vr_die, pf); | ||
664 | |||
679 | if (ret < 0) | 665 | if (ret < 0) |
680 | pr_warning("Failed to find '%s' in this function.\n", | 666 | pr_warning("Failed to find '%s' in this function.\n", |
681 | pf->pvar->var); | 667 | pf->pvar->var); |
@@ -718,26 +704,30 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr, | |||
718 | return 0; | 704 | return 0; |
719 | } | 705 | } |
720 | 706 | ||
721 | /* Call probe_finder callback with real subprogram DIE */ | 707 | /* Call probe_finder callback with scope DIE */ |
722 | static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf) | 708 | static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf) |
723 | { | 709 | { |
724 | Dwarf_Die die_mem; | ||
725 | Dwarf_Attribute fb_attr; | 710 | Dwarf_Attribute fb_attr; |
726 | size_t nops; | 711 | size_t nops; |
727 | int ret; | 712 | int ret; |
728 | 713 | ||
729 | /* If no real subprogram, find a real one */ | 714 | if (!sc_die) { |
730 | if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) { | 715 | pr_err("Caller must pass a scope DIE. Program error.\n"); |
731 | sp_die = die_find_realfunc(&pf->cu_die, pf->addr, &die_mem); | 716 | return -EINVAL; |
732 | if (!sp_die) { | 717 | } |
718 | |||
719 | /* If not a real subprogram, find a real one */ | ||
720 | if (dwarf_tag(sc_die) != DW_TAG_subprogram) { | ||
721 | if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) { | ||
733 | pr_warning("Failed to find probe point in any " | 722 | pr_warning("Failed to find probe point in any " |
734 | "functions.\n"); | 723 | "functions.\n"); |
735 | return -ENOENT; | 724 | return -ENOENT; |
736 | } | 725 | } |
737 | } | 726 | } else |
727 | memcpy(&pf->sp_die, sc_die, sizeof(Dwarf_Die)); | ||
738 | 728 | ||
739 | /* Get the frame base attribute/ops */ | 729 | /* Get the frame base attribute/ops from subprogram */ |
740 | dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr); | 730 | dwarf_attr(&pf->sp_die, DW_AT_frame_base, &fb_attr); |
741 | ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1); | 731 | ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1); |
742 | if (ret <= 0 || nops == 0) { | 732 | if (ret <= 0 || nops == 0) { |
743 | pf->fb_ops = NULL; | 733 | pf->fb_ops = NULL; |
@@ -755,7 +745,7 @@ static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
755 | } | 745 | } |
756 | 746 | ||
757 | /* Call finder's callback handler */ | 747 | /* Call finder's callback handler */ |
758 | ret = pf->callback(sp_die, pf); | 748 | ret = pf->callback(sc_die, pf); |
759 | 749 | ||
760 | /* *pf->fb_ops will be cached in libdw. Don't free it. */ | 750 | /* *pf->fb_ops will be cached in libdw. Don't free it. */ |
761 | pf->fb_ops = NULL; | 751 | pf->fb_ops = NULL; |
@@ -763,17 +753,82 @@ static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
763 | return ret; | 753 | return ret; |
764 | } | 754 | } |
765 | 755 | ||
756 | struct find_scope_param { | ||
757 | const char *function; | ||
758 | const char *file; | ||
759 | int line; | ||
760 | int diff; | ||
761 | Dwarf_Die *die_mem; | ||
762 | bool found; | ||
763 | }; | ||
764 | |||
765 | static int find_best_scope_cb(Dwarf_Die *fn_die, void *data) | ||
766 | { | ||
767 | struct find_scope_param *fsp = data; | ||
768 | const char *file; | ||
769 | int lno; | ||
770 | |||
771 | /* Skip if declared file name does not match */ | ||
772 | if (fsp->file) { | ||
773 | file = dwarf_decl_file(fn_die); | ||
774 | if (!file || strcmp(fsp->file, file) != 0) | ||
775 | return 0; | ||
776 | } | ||
777 | /* If the function name is given, that's what user expects */ | ||
778 | if (fsp->function) { | ||
779 | if (die_compare_name(fn_die, fsp->function)) { | ||
780 | memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die)); | ||
781 | fsp->found = true; | ||
782 | return 1; | ||
783 | } | ||
784 | } else { | ||
785 | /* With the line number, find the nearest declared DIE */ | ||
786 | dwarf_decl_line(fn_die, &lno); | ||
787 | if (lno < fsp->line && fsp->diff > fsp->line - lno) { | ||
788 | /* Keep a candidate and continue */ | ||
789 | fsp->diff = fsp->line - lno; | ||
790 | memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die)); | ||
791 | fsp->found = true; | ||
792 | } | ||
793 | } | ||
794 | return 0; | ||
795 | } | ||
796 | |||
797 | /* Find an appropriate scope fits to given conditions */ | ||
798 | static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem) | ||
799 | { | ||
800 | struct find_scope_param fsp = { | ||
801 | .function = pf->pev->point.function, | ||
802 | .file = pf->fname, | ||
803 | .line = pf->lno, | ||
804 | .diff = INT_MAX, | ||
805 | .die_mem = die_mem, | ||
806 | .found = false, | ||
807 | }; | ||
808 | |||
809 | cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb, &fsp); | ||
810 | |||
811 | return fsp.found ? die_mem : NULL; | ||
812 | } | ||
813 | |||
766 | static int probe_point_line_walker(const char *fname, int lineno, | 814 | static int probe_point_line_walker(const char *fname, int lineno, |
767 | Dwarf_Addr addr, void *data) | 815 | Dwarf_Addr addr, void *data) |
768 | { | 816 | { |
769 | struct probe_finder *pf = data; | 817 | struct probe_finder *pf = data; |
818 | Dwarf_Die *sc_die, die_mem; | ||
770 | int ret; | 819 | int ret; |
771 | 820 | ||
772 | if (lineno != pf->lno || strtailcmp(fname, pf->fname) != 0) | 821 | if (lineno != pf->lno || strtailcmp(fname, pf->fname) != 0) |
773 | return 0; | 822 | return 0; |
774 | 823 | ||
775 | pf->addr = addr; | 824 | pf->addr = addr; |
776 | ret = call_probe_finder(NULL, pf); | 825 | sc_die = find_best_scope(pf, &die_mem); |
826 | if (!sc_die) { | ||
827 | pr_warning("Failed to find scope of probe point.\n"); | ||
828 | return -ENOENT; | ||
829 | } | ||
830 | |||
831 | ret = call_probe_finder(sc_die, pf); | ||
777 | 832 | ||
778 | /* Continue if no error, because the line will be in inline function */ | 833 | /* Continue if no error, because the line will be in inline function */ |
779 | return ret < 0 ? ret : 0; | 834 | return ret < 0 ? ret : 0; |
@@ -827,6 +882,7 @@ static int probe_point_lazy_walker(const char *fname, int lineno, | |||
827 | Dwarf_Addr addr, void *data) | 882 | Dwarf_Addr addr, void *data) |
828 | { | 883 | { |
829 | struct probe_finder *pf = data; | 884 | struct probe_finder *pf = data; |
885 | Dwarf_Die *sc_die, die_mem; | ||
830 | int ret; | 886 | int ret; |
831 | 887 | ||
832 | if (!line_list__has_line(&pf->lcache, lineno) || | 888 | if (!line_list__has_line(&pf->lcache, lineno) || |
@@ -836,7 +892,14 @@ static int probe_point_lazy_walker(const char *fname, int lineno, | |||
836 | pr_debug("Probe line found: line:%d addr:0x%llx\n", | 892 | pr_debug("Probe line found: line:%d addr:0x%llx\n", |
837 | lineno, (unsigned long long)addr); | 893 | lineno, (unsigned long long)addr); |
838 | pf->addr = addr; | 894 | pf->addr = addr; |
839 | ret = call_probe_finder(NULL, pf); | 895 | pf->lno = lineno; |
896 | sc_die = find_best_scope(pf, &die_mem); | ||
897 | if (!sc_die) { | ||
898 | pr_warning("Failed to find scope of probe point.\n"); | ||
899 | return -ENOENT; | ||
900 | } | ||
901 | |||
902 | ret = call_probe_finder(sc_die, pf); | ||
840 | 903 | ||
841 | /* | 904 | /* |
842 | * Continue if no error, because the lazy pattern will match | 905 | * Continue if no error, because the lazy pattern will match |
@@ -861,42 +924,39 @@ static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
861 | return die_walk_lines(sp_die, probe_point_lazy_walker, pf); | 924 | return die_walk_lines(sp_die, probe_point_lazy_walker, pf); |
862 | } | 925 | } |
863 | 926 | ||
864 | /* Callback parameter with return value */ | ||
865 | struct dwarf_callback_param { | ||
866 | void *data; | ||
867 | int retval; | ||
868 | }; | ||
869 | |||
870 | static int probe_point_inline_cb(Dwarf_Die *in_die, void *data) | 927 | static int probe_point_inline_cb(Dwarf_Die *in_die, void *data) |
871 | { | 928 | { |
872 | struct dwarf_callback_param *param = data; | 929 | struct probe_finder *pf = data; |
873 | struct probe_finder *pf = param->data; | ||
874 | struct perf_probe_point *pp = &pf->pev->point; | 930 | struct perf_probe_point *pp = &pf->pev->point; |
875 | Dwarf_Addr addr; | 931 | Dwarf_Addr addr; |
932 | int ret; | ||
876 | 933 | ||
877 | if (pp->lazy_line) | 934 | if (pp->lazy_line) |
878 | param->retval = find_probe_point_lazy(in_die, pf); | 935 | ret = find_probe_point_lazy(in_die, pf); |
879 | else { | 936 | else { |
880 | /* Get probe address */ | 937 | /* Get probe address */ |
881 | if (dwarf_entrypc(in_die, &addr) != 0) { | 938 | if (dwarf_entrypc(in_die, &addr) != 0) { |
882 | pr_warning("Failed to get entry address of %s.\n", | 939 | pr_warning("Failed to get entry address of %s.\n", |
883 | dwarf_diename(in_die)); | 940 | dwarf_diename(in_die)); |
884 | param->retval = -ENOENT; | 941 | return -ENOENT; |
885 | return DWARF_CB_ABORT; | ||
886 | } | 942 | } |
887 | pf->addr = addr; | 943 | pf->addr = addr; |
888 | pf->addr += pp->offset; | 944 | pf->addr += pp->offset; |
889 | pr_debug("found inline addr: 0x%jx\n", | 945 | pr_debug("found inline addr: 0x%jx\n", |
890 | (uintmax_t)pf->addr); | 946 | (uintmax_t)pf->addr); |
891 | 947 | ||
892 | param->retval = call_probe_finder(in_die, pf); | 948 | ret = call_probe_finder(in_die, pf); |
893 | if (param->retval < 0) | ||
894 | return DWARF_CB_ABORT; | ||
895 | } | 949 | } |
896 | 950 | ||
897 | return DWARF_CB_OK; | 951 | return ret; |
898 | } | 952 | } |
899 | 953 | ||
954 | /* Callback parameter with return value for libdw */ | ||
955 | struct dwarf_callback_param { | ||
956 | void *data; | ||
957 | int retval; | ||
958 | }; | ||
959 | |||
900 | /* Search function from function name */ | 960 | /* Search function from function name */ |
901 | static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) | 961 | static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) |
902 | { | 962 | { |
@@ -933,14 +993,10 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) | |||
933 | /* TODO: Check the address in this function */ | 993 | /* TODO: Check the address in this function */ |
934 | param->retval = call_probe_finder(sp_die, pf); | 994 | param->retval = call_probe_finder(sp_die, pf); |
935 | } | 995 | } |
936 | } else { | 996 | } else |
937 | struct dwarf_callback_param _param = {.data = (void *)pf, | ||
938 | .retval = 0}; | ||
939 | /* Inlined function: search instances */ | 997 | /* Inlined function: search instances */ |
940 | dwarf_func_inline_instances(sp_die, probe_point_inline_cb, | 998 | param->retval = die_walk_instances(sp_die, |
941 | &_param); | 999 | probe_point_inline_cb, (void *)pf); |
942 | param->retval = _param.retval; | ||
943 | } | ||
944 | 1000 | ||
945 | return DWARF_CB_ABORT; /* Exit; no same symbol in this CU. */ | 1001 | return DWARF_CB_ABORT; /* Exit; no same symbol in this CU. */ |
946 | } | 1002 | } |
@@ -1060,7 +1116,7 @@ found: | |||
1060 | } | 1116 | } |
1061 | 1117 | ||
1062 | /* Add a found probe point into trace event list */ | 1118 | /* Add a found probe point into trace event list */ |
1063 | static int add_probe_trace_event(Dwarf_Die *sp_die, struct probe_finder *pf) | 1119 | static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) |
1064 | { | 1120 | { |
1065 | struct trace_event_finder *tf = | 1121 | struct trace_event_finder *tf = |
1066 | container_of(pf, struct trace_event_finder, pf); | 1122 | container_of(pf, struct trace_event_finder, pf); |
@@ -1075,8 +1131,9 @@ static int add_probe_trace_event(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
1075 | } | 1131 | } |
1076 | tev = &tf->tevs[tf->ntevs++]; | 1132 | tev = &tf->tevs[tf->ntevs++]; |
1077 | 1133 | ||
1078 | ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe, | 1134 | /* Trace point should be converted from subprogram DIE */ |
1079 | &tev->point); | 1135 | ret = convert_to_trace_point(&pf->sp_die, pf->addr, |
1136 | pf->pev->point.retprobe, &tev->point); | ||
1080 | if (ret < 0) | 1137 | if (ret < 0) |
1081 | return ret; | 1138 | return ret; |
1082 | 1139 | ||
@@ -1091,7 +1148,8 @@ static int add_probe_trace_event(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
1091 | for (i = 0; i < pf->pev->nargs; i++) { | 1148 | for (i = 0; i < pf->pev->nargs; i++) { |
1092 | pf->pvar = &pf->pev->args[i]; | 1149 | pf->pvar = &pf->pev->args[i]; |
1093 | pf->tvar = &tev->args[i]; | 1150 | pf->tvar = &tev->args[i]; |
1094 | ret = find_variable(sp_die, pf); | 1151 | /* Variable should be found from scope DIE */ |
1152 | ret = find_variable(sc_die, pf); | ||
1095 | if (ret != 0) | 1153 | if (ret != 0) |
1096 | return ret; | 1154 | return ret; |
1097 | } | 1155 | } |
@@ -1159,13 +1217,13 @@ static int collect_variables_cb(Dwarf_Die *die_mem, void *data) | |||
1159 | } | 1217 | } |
1160 | 1218 | ||
1161 | /* Add a found vars into available variables list */ | 1219 | /* Add a found vars into available variables list */ |
1162 | static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf) | 1220 | static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf) |
1163 | { | 1221 | { |
1164 | struct available_var_finder *af = | 1222 | struct available_var_finder *af = |
1165 | container_of(pf, struct available_var_finder, pf); | 1223 | container_of(pf, struct available_var_finder, pf); |
1166 | struct variable_list *vl; | 1224 | struct variable_list *vl; |
1167 | Dwarf_Die die_mem, *scopes = NULL; | 1225 | Dwarf_Die die_mem; |
1168 | int ret, nscopes; | 1226 | int ret; |
1169 | 1227 | ||
1170 | /* Check number of tevs */ | 1228 | /* Check number of tevs */ |
1171 | if (af->nvls == af->max_vls) { | 1229 | if (af->nvls == af->max_vls) { |
@@ -1174,8 +1232,9 @@ static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
1174 | } | 1232 | } |
1175 | vl = &af->vls[af->nvls++]; | 1233 | vl = &af->vls[af->nvls++]; |
1176 | 1234 | ||
1177 | ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe, | 1235 | /* Trace point should be converted from subprogram DIE */ |
1178 | &vl->point); | 1236 | ret = convert_to_trace_point(&pf->sp_die, pf->addr, |
1237 | pf->pev->point.retprobe, &vl->point); | ||
1179 | if (ret < 0) | 1238 | if (ret < 0) |
1180 | return ret; | 1239 | return ret; |
1181 | 1240 | ||
@@ -1187,19 +1246,14 @@ static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
1187 | if (vl->vars == NULL) | 1246 | if (vl->vars == NULL) |
1188 | return -ENOMEM; | 1247 | return -ENOMEM; |
1189 | af->child = true; | 1248 | af->child = true; |
1190 | die_find_child(sp_die, collect_variables_cb, (void *)af, &die_mem); | 1249 | die_find_child(sc_die, collect_variables_cb, (void *)af, &die_mem); |
1191 | 1250 | ||
1192 | /* Find external variables */ | 1251 | /* Find external variables */ |
1193 | if (!af->externs) | 1252 | if (!af->externs) |
1194 | goto out; | 1253 | goto out; |
1195 | /* Don't need to search child DIE for externs. */ | 1254 | /* Don't need to search child DIE for externs. */ |
1196 | af->child = false; | 1255 | af->child = false; |
1197 | nscopes = dwarf_getscopes_die(sp_die, &scopes); | 1256 | die_find_child(&pf->cu_die, collect_variables_cb, (void *)af, &die_mem); |
1198 | while (nscopes-- > 1) | ||
1199 | die_find_child(&scopes[nscopes], collect_variables_cb, | ||
1200 | (void *)af, &die_mem); | ||
1201 | if (scopes) | ||
1202 | free(scopes); | ||
1203 | 1257 | ||
1204 | out: | 1258 | out: |
1205 | if (strlist__empty(vl->vars)) { | 1259 | if (strlist__empty(vl->vars)) { |
@@ -1391,10 +1445,14 @@ static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf) | |||
1391 | 1445 | ||
1392 | static int line_range_inline_cb(Dwarf_Die *in_die, void *data) | 1446 | static int line_range_inline_cb(Dwarf_Die *in_die, void *data) |
1393 | { | 1447 | { |
1394 | struct dwarf_callback_param *param = data; | 1448 | find_line_range_by_line(in_die, data); |
1395 | 1449 | ||
1396 | param->retval = find_line_range_by_line(in_die, param->data); | 1450 | /* |
1397 | return DWARF_CB_ABORT; /* No need to find other instances */ | 1451 | * We have to check all instances of inlined function, because |
1452 | * some execution paths can be optimized out depends on the | ||
1453 | * function argument of instances | ||
1454 | */ | ||
1455 | return 0; | ||
1398 | } | 1456 | } |
1399 | 1457 | ||
1400 | /* Search function from function name */ | 1458 | /* Search function from function name */ |
@@ -1422,15 +1480,10 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data) | |||
1422 | pr_debug("New line range: %d to %d\n", lf->lno_s, lf->lno_e); | 1480 | pr_debug("New line range: %d to %d\n", lf->lno_s, lf->lno_e); |
1423 | lr->start = lf->lno_s; | 1481 | lr->start = lf->lno_s; |
1424 | lr->end = lf->lno_e; | 1482 | lr->end = lf->lno_e; |
1425 | if (dwarf_func_inline(sp_die)) { | 1483 | if (dwarf_func_inline(sp_die)) |
1426 | struct dwarf_callback_param _param; | 1484 | param->retval = die_walk_instances(sp_die, |
1427 | _param.data = (void *)lf; | 1485 | line_range_inline_cb, lf); |
1428 | _param.retval = 0; | 1486 | else |
1429 | dwarf_func_inline_instances(sp_die, | ||
1430 | line_range_inline_cb, | ||
1431 | &_param); | ||
1432 | param->retval = _param.retval; | ||
1433 | } else | ||
1434 | param->retval = find_line_range_by_line(sp_die, lf); | 1487 | param->retval = find_line_range_by_line(sp_die, lf); |
1435 | return DWARF_CB_ABORT; | 1488 | return DWARF_CB_ABORT; |
1436 | } | 1489 | } |
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index c478b42a2473..1132c8f0ce89 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h | |||
@@ -57,7 +57,7 @@ struct probe_finder { | |||
57 | struct perf_probe_event *pev; /* Target probe event */ | 57 | struct perf_probe_event *pev; /* Target probe event */ |
58 | 58 | ||
59 | /* Callback when a probe point is found */ | 59 | /* Callback when a probe point is found */ |
60 | int (*callback)(Dwarf_Die *sp_die, struct probe_finder *pf); | 60 | int (*callback)(Dwarf_Die *sc_die, struct probe_finder *pf); |
61 | 61 | ||
62 | /* For function searching */ | 62 | /* For function searching */ |
63 | int lno; /* Line number */ | 63 | int lno; /* Line number */ |
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 8e0b5a39d8a7..cbc8f215d4b7 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c | |||
@@ -187,16 +187,119 @@ static PyTypeObject pyrf_throttle_event__type = { | |||
187 | .tp_repr = (reprfunc)pyrf_throttle_event__repr, | 187 | .tp_repr = (reprfunc)pyrf_throttle_event__repr, |
188 | }; | 188 | }; |
189 | 189 | ||
190 | static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object."); | ||
191 | |||
192 | static PyMemberDef pyrf_lost_event__members[] = { | ||
193 | sample_members | ||
194 | member_def(lost_event, id, T_ULONGLONG, "event id"), | ||
195 | member_def(lost_event, lost, T_ULONGLONG, "number of lost events"), | ||
196 | { .name = NULL, }, | ||
197 | }; | ||
198 | |||
199 | static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent) | ||
200 | { | ||
201 | PyObject *ret; | ||
202 | char *s; | ||
203 | |||
204 | if (asprintf(&s, "{ type: lost, id: %#" PRIx64 ", " | ||
205 | "lost: %#" PRIx64 " }", | ||
206 | pevent->event.lost.id, pevent->event.lost.lost) < 0) { | ||
207 | ret = PyErr_NoMemory(); | ||
208 | } else { | ||
209 | ret = PyString_FromString(s); | ||
210 | free(s); | ||
211 | } | ||
212 | return ret; | ||
213 | } | ||
214 | |||
215 | static PyTypeObject pyrf_lost_event__type = { | ||
216 | PyVarObject_HEAD_INIT(NULL, 0) | ||
217 | .tp_name = "perf.lost_event", | ||
218 | .tp_basicsize = sizeof(struct pyrf_event), | ||
219 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
220 | .tp_doc = pyrf_lost_event__doc, | ||
221 | .tp_members = pyrf_lost_event__members, | ||
222 | .tp_repr = (reprfunc)pyrf_lost_event__repr, | ||
223 | }; | ||
224 | |||
225 | static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object."); | ||
226 | |||
227 | static PyMemberDef pyrf_read_event__members[] = { | ||
228 | sample_members | ||
229 | member_def(read_event, pid, T_UINT, "event pid"), | ||
230 | member_def(read_event, tid, T_UINT, "event tid"), | ||
231 | { .name = NULL, }, | ||
232 | }; | ||
233 | |||
234 | static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent) | ||
235 | { | ||
236 | return PyString_FromFormat("{ type: read, pid: %u, tid: %u }", | ||
237 | pevent->event.read.pid, | ||
238 | pevent->event.read.tid); | ||
239 | /* | ||
240 | * FIXME: return the array of read values, | ||
241 | * making this method useful ;-) | ||
242 | */ | ||
243 | } | ||
244 | |||
245 | static PyTypeObject pyrf_read_event__type = { | ||
246 | PyVarObject_HEAD_INIT(NULL, 0) | ||
247 | .tp_name = "perf.read_event", | ||
248 | .tp_basicsize = sizeof(struct pyrf_event), | ||
249 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
250 | .tp_doc = pyrf_read_event__doc, | ||
251 | .tp_members = pyrf_read_event__members, | ||
252 | .tp_repr = (reprfunc)pyrf_read_event__repr, | ||
253 | }; | ||
254 | |||
255 | static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object."); | ||
256 | |||
257 | static PyMemberDef pyrf_sample_event__members[] = { | ||
258 | sample_members | ||
259 | member_def(perf_event_header, type, T_UINT, "event type"), | ||
260 | { .name = NULL, }, | ||
261 | }; | ||
262 | |||
263 | static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent) | ||
264 | { | ||
265 | PyObject *ret; | ||
266 | char *s; | ||
267 | |||
268 | if (asprintf(&s, "{ type: sample }") < 0) { | ||
269 | ret = PyErr_NoMemory(); | ||
270 | } else { | ||
271 | ret = PyString_FromString(s); | ||
272 | free(s); | ||
273 | } | ||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | static PyTypeObject pyrf_sample_event__type = { | ||
278 | PyVarObject_HEAD_INIT(NULL, 0) | ||
279 | .tp_name = "perf.sample_event", | ||
280 | .tp_basicsize = sizeof(struct pyrf_event), | ||
281 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
282 | .tp_doc = pyrf_sample_event__doc, | ||
283 | .tp_members = pyrf_sample_event__members, | ||
284 | .tp_repr = (reprfunc)pyrf_sample_event__repr, | ||
285 | }; | ||
286 | |||
190 | static int pyrf_event__setup_types(void) | 287 | static int pyrf_event__setup_types(void) |
191 | { | 288 | { |
192 | int err; | 289 | int err; |
193 | pyrf_mmap_event__type.tp_new = | 290 | pyrf_mmap_event__type.tp_new = |
194 | pyrf_task_event__type.tp_new = | 291 | pyrf_task_event__type.tp_new = |
195 | pyrf_comm_event__type.tp_new = | 292 | pyrf_comm_event__type.tp_new = |
293 | pyrf_lost_event__type.tp_new = | ||
294 | pyrf_read_event__type.tp_new = | ||
295 | pyrf_sample_event__type.tp_new = | ||
196 | pyrf_throttle_event__type.tp_new = PyType_GenericNew; | 296 | pyrf_throttle_event__type.tp_new = PyType_GenericNew; |
197 | err = PyType_Ready(&pyrf_mmap_event__type); | 297 | err = PyType_Ready(&pyrf_mmap_event__type); |
198 | if (err < 0) | 298 | if (err < 0) |
199 | goto out; | 299 | goto out; |
300 | err = PyType_Ready(&pyrf_lost_event__type); | ||
301 | if (err < 0) | ||
302 | goto out; | ||
200 | err = PyType_Ready(&pyrf_task_event__type); | 303 | err = PyType_Ready(&pyrf_task_event__type); |
201 | if (err < 0) | 304 | if (err < 0) |
202 | goto out; | 305 | goto out; |
@@ -206,20 +309,26 @@ static int pyrf_event__setup_types(void) | |||
206 | err = PyType_Ready(&pyrf_throttle_event__type); | 309 | err = PyType_Ready(&pyrf_throttle_event__type); |
207 | if (err < 0) | 310 | if (err < 0) |
208 | goto out; | 311 | goto out; |
312 | err = PyType_Ready(&pyrf_read_event__type); | ||
313 | if (err < 0) | ||
314 | goto out; | ||
315 | err = PyType_Ready(&pyrf_sample_event__type); | ||
316 | if (err < 0) | ||
317 | goto out; | ||
209 | out: | 318 | out: |
210 | return err; | 319 | return err; |
211 | } | 320 | } |
212 | 321 | ||
213 | static PyTypeObject *pyrf_event__type[] = { | 322 | static PyTypeObject *pyrf_event__type[] = { |
214 | [PERF_RECORD_MMAP] = &pyrf_mmap_event__type, | 323 | [PERF_RECORD_MMAP] = &pyrf_mmap_event__type, |
215 | [PERF_RECORD_LOST] = &pyrf_mmap_event__type, | 324 | [PERF_RECORD_LOST] = &pyrf_lost_event__type, |
216 | [PERF_RECORD_COMM] = &pyrf_comm_event__type, | 325 | [PERF_RECORD_COMM] = &pyrf_comm_event__type, |
217 | [PERF_RECORD_EXIT] = &pyrf_task_event__type, | 326 | [PERF_RECORD_EXIT] = &pyrf_task_event__type, |
218 | [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type, | 327 | [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type, |
219 | [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type, | 328 | [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type, |
220 | [PERF_RECORD_FORK] = &pyrf_task_event__type, | 329 | [PERF_RECORD_FORK] = &pyrf_task_event__type, |
221 | [PERF_RECORD_READ] = &pyrf_mmap_event__type, | 330 | [PERF_RECORD_READ] = &pyrf_read_event__type, |
222 | [PERF_RECORD_SAMPLE] = &pyrf_mmap_event__type, | 331 | [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type, |
223 | }; | 332 | }; |
224 | 333 | ||
225 | static PyObject *pyrf_event__new(union perf_event *event) | 334 | static PyObject *pyrf_event__new(union perf_event *event) |
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py index bbc982f5dd8b..95d370074928 100644 --- a/tools/perf/util/setup.py +++ b/tools/perf/util/setup.py | |||
@@ -3,9 +3,27 @@ | |||
3 | from distutils.core import setup, Extension | 3 | from distutils.core import setup, Extension |
4 | from os import getenv | 4 | from os import getenv |
5 | 5 | ||
6 | from distutils.command.build_ext import build_ext as _build_ext | ||
7 | from distutils.command.install_lib import install_lib as _install_lib | ||
8 | |||
9 | class build_ext(_build_ext): | ||
10 | def finalize_options(self): | ||
11 | _build_ext.finalize_options(self) | ||
12 | self.build_lib = build_lib | ||
13 | self.build_temp = build_tmp | ||
14 | |||
15 | class install_lib(_install_lib): | ||
16 | def finalize_options(self): | ||
17 | _install_lib.finalize_options(self) | ||
18 | self.build_dir = build_lib | ||
19 | |||
20 | |||
6 | cflags = ['-fno-strict-aliasing', '-Wno-write-strings'] | 21 | cflags = ['-fno-strict-aliasing', '-Wno-write-strings'] |
7 | cflags += getenv('CFLAGS', '').split() | 22 | cflags += getenv('CFLAGS', '').split() |
8 | 23 | ||
24 | build_lib = getenv('PYTHON_EXTBUILD_LIB') | ||
25 | build_tmp = getenv('PYTHON_EXTBUILD_TMP') | ||
26 | |||
9 | perf = Extension('perf', | 27 | perf = Extension('perf', |
10 | sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c', | 28 | sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c', |
11 | 'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c', | 29 | 'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c', |
@@ -21,4 +39,5 @@ setup(name='perf', | |||
21 | author_email='acme@redhat.com', | 39 | author_email='acme@redhat.com', |
22 | license='GPLv2', | 40 | license='GPLv2', |
23 | url='http://perf.wiki.kernel.org', | 41 | url='http://perf.wiki.kernel.org', |
24 | ext_modules=[perf]) | 42 | ext_modules=[perf], |
43 | cmdclass={'build_ext': build_ext, 'install_lib': install_lib}) | ||
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index eec196329fd9..469c0264ed29 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -1504,6 +1504,17 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) | |||
1504 | dso->adjust_symbols = 0; | 1504 | dso->adjust_symbols = 0; |
1505 | 1505 | ||
1506 | if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { | 1506 | if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { |
1507 | struct stat st; | ||
1508 | |||
1509 | if (lstat(dso->name, &st) < 0) | ||
1510 | return -1; | ||
1511 | |||
1512 | if (st.st_uid && (st.st_uid != geteuid())) { | ||
1513 | pr_warning("File %s not owned by current user or root, " | ||
1514 | "ignoring it.\n", dso->name); | ||
1515 | return -1; | ||
1516 | } | ||
1517 | |||
1507 | ret = dso__load_perf_map(dso, map, filter); | 1518 | ret = dso__load_perf_map(dso, map, filter); |
1508 | dso->symtab_type = ret > 0 ? SYMTAB__JAVA_JIT : | 1519 | dso->symtab_type = ret > 0 ? SYMTAB__JAVA_JIT : |
1509 | SYMTAB__NOT_FOUND; | 1520 | SYMTAB__NOT_FOUND; |
@@ -2170,27 +2181,22 @@ size_t machines__fprintf_dsos_buildid(struct rb_root *machines, | |||
2170 | return ret; | 2181 | return ret; |
2171 | } | 2182 | } |
2172 | 2183 | ||
2173 | struct dso *dso__new_kernel(const char *name) | 2184 | static struct dso* |
2185 | dso__kernel_findnew(struct machine *machine, const char *name, | ||
2186 | const char *short_name, int dso_type) | ||
2174 | { | 2187 | { |
2175 | struct dso *dso = dso__new(name ?: "[kernel.kallsyms]"); | 2188 | /* |
2176 | 2189 | * The kernel dso could be created by build_id processing. | |
2177 | if (dso != NULL) { | 2190 | */ |
2178 | dso__set_short_name(dso, "[kernel]"); | 2191 | struct dso *dso = __dsos__findnew(&machine->kernel_dsos, name); |
2179 | dso->kernel = DSO_TYPE_KERNEL; | ||
2180 | } | ||
2181 | |||
2182 | return dso; | ||
2183 | } | ||
2184 | 2192 | ||
2185 | static struct dso *dso__new_guest_kernel(struct machine *machine, | 2193 | /* |
2186 | const char *name) | 2194 | * We need to run this in all cases, since during the build_id |
2187 | { | 2195 | * processing we had no idea this was the kernel dso. |
2188 | char bf[PATH_MAX]; | 2196 | */ |
2189 | struct dso *dso = dso__new(name ?: machine__mmap_name(machine, bf, | ||
2190 | sizeof(bf))); | ||
2191 | if (dso != NULL) { | 2197 | if (dso != NULL) { |
2192 | dso__set_short_name(dso, "[guest.kernel]"); | 2198 | dso__set_short_name(dso, short_name); |
2193 | dso->kernel = DSO_TYPE_GUEST_KERNEL; | 2199 | dso->kernel = dso_type; |
2194 | } | 2200 | } |
2195 | 2201 | ||
2196 | return dso; | 2202 | return dso; |
@@ -2208,24 +2214,36 @@ void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) | |||
2208 | dso->has_build_id = true; | 2214 | dso->has_build_id = true; |
2209 | } | 2215 | } |
2210 | 2216 | ||
2211 | static struct dso *machine__create_kernel(struct machine *machine) | 2217 | static struct dso *machine__get_kernel(struct machine *machine) |
2212 | { | 2218 | { |
2213 | const char *vmlinux_name = NULL; | 2219 | const char *vmlinux_name = NULL; |
2214 | struct dso *kernel; | 2220 | struct dso *kernel; |
2215 | 2221 | ||
2216 | if (machine__is_host(machine)) { | 2222 | if (machine__is_host(machine)) { |
2217 | vmlinux_name = symbol_conf.vmlinux_name; | 2223 | vmlinux_name = symbol_conf.vmlinux_name; |
2218 | kernel = dso__new_kernel(vmlinux_name); | 2224 | if (!vmlinux_name) |
2225 | vmlinux_name = "[kernel.kallsyms]"; | ||
2226 | |||
2227 | kernel = dso__kernel_findnew(machine, vmlinux_name, | ||
2228 | "[kernel]", | ||
2229 | DSO_TYPE_KERNEL); | ||
2219 | } else { | 2230 | } else { |
2231 | char bf[PATH_MAX]; | ||
2232 | |||
2220 | if (machine__is_default_guest(machine)) | 2233 | if (machine__is_default_guest(machine)) |
2221 | vmlinux_name = symbol_conf.default_guest_vmlinux_name; | 2234 | vmlinux_name = symbol_conf.default_guest_vmlinux_name; |
2222 | kernel = dso__new_guest_kernel(machine, vmlinux_name); | 2235 | if (!vmlinux_name) |
2236 | vmlinux_name = machine__mmap_name(machine, bf, | ||
2237 | sizeof(bf)); | ||
2238 | |||
2239 | kernel = dso__kernel_findnew(machine, vmlinux_name, | ||
2240 | "[guest.kernel]", | ||
2241 | DSO_TYPE_GUEST_KERNEL); | ||
2223 | } | 2242 | } |
2224 | 2243 | ||
2225 | if (kernel != NULL) { | 2244 | if (kernel != NULL && (!kernel->has_build_id)) |
2226 | dso__read_running_kernel_build_id(kernel, machine); | 2245 | dso__read_running_kernel_build_id(kernel, machine); |
2227 | dsos__add(&machine->kernel_dsos, kernel); | 2246 | |
2228 | } | ||
2229 | return kernel; | 2247 | return kernel; |
2230 | } | 2248 | } |
2231 | 2249 | ||
@@ -2329,7 +2347,7 @@ void machine__destroy_kernel_maps(struct machine *machine) | |||
2329 | 2347 | ||
2330 | int machine__create_kernel_maps(struct machine *machine) | 2348 | int machine__create_kernel_maps(struct machine *machine) |
2331 | { | 2349 | { |
2332 | struct dso *kernel = machine__create_kernel(machine); | 2350 | struct dso *kernel = machine__get_kernel(machine); |
2333 | 2351 | ||
2334 | if (kernel == NULL || | 2352 | if (kernel == NULL || |
2335 | __machine__create_kernel_maps(machine, kernel) < 0) | 2353 | __machine__create_kernel_maps(machine, kernel) < 0) |
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 325ee36a9d29..4f377d92e75a 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
@@ -155,7 +155,6 @@ struct dso { | |||
155 | }; | 155 | }; |
156 | 156 | ||
157 | struct dso *dso__new(const char *name); | 157 | struct dso *dso__new(const char *name); |
158 | struct dso *dso__new_kernel(const char *name); | ||
159 | void dso__delete(struct dso *dso); | 158 | void dso__delete(struct dso *dso); |
160 | 159 | ||
161 | int dso__name_len(const struct dso *dso); | 160 | int dso__name_len(const struct dso *dso); |
diff --git a/tools/perf/util/ui/browsers/top.c b/tools/perf/util/ui/browsers/top.c index 5a06538532af..88403cf8396a 100644 --- a/tools/perf/util/ui/browsers/top.c +++ b/tools/perf/util/ui/browsers/top.c | |||
@@ -208,6 +208,5 @@ int perf_top__tui_browser(struct perf_top *top) | |||
208 | }, | 208 | }, |
209 | }; | 209 | }; |
210 | 210 | ||
211 | ui_helpline__push("Press <- or ESC to exit"); | ||
212 | return perf_top_browser__run(&browser); | 211 | return perf_top_browser__run(&browser); |
213 | } | 212 | } |
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile index 94c2cf0a98b8..e8a03aceceb1 100644 --- a/tools/power/cpupower/Makefile +++ b/tools/power/cpupower/Makefile | |||
@@ -24,7 +24,7 @@ | |||
24 | 24 | ||
25 | # Set the following to `true' to make a unstripped, unoptimized | 25 | # Set the following to `true' to make a unstripped, unoptimized |
26 | # binary. Leave this set to `false' for production use. | 26 | # binary. Leave this set to `false' for production use. |
27 | DEBUG ?= false | 27 | DEBUG ?= true |
28 | 28 | ||
29 | # make the build silent. Set this to something else to make it noisy again. | 29 | # make the build silent. Set this to something else to make it noisy again. |
30 | V ?= false | 30 | V ?= false |
@@ -35,7 +35,7 @@ NLS ?= true | |||
35 | 35 | ||
36 | # Set the following to 'true' to build/install the | 36 | # Set the following to 'true' to build/install the |
37 | # cpufreq-bench benchmarking tool | 37 | # cpufreq-bench benchmarking tool |
38 | CPUFRQ_BENCH ?= true | 38 | CPUFREQ_BENCH ?= true |
39 | 39 | ||
40 | # Prefix to the directories we're installing to | 40 | # Prefix to the directories we're installing to |
41 | DESTDIR ?= | 41 | DESTDIR ?= |
@@ -137,9 +137,10 @@ CFLAGS += -pipe | |||
137 | ifeq ($(strip $(NLS)),true) | 137 | ifeq ($(strip $(NLS)),true) |
138 | INSTALL_NLS += install-gmo | 138 | INSTALL_NLS += install-gmo |
139 | COMPILE_NLS += create-gmo | 139 | COMPILE_NLS += create-gmo |
140 | CFLAGS += -DNLS | ||
140 | endif | 141 | endif |
141 | 142 | ||
142 | ifeq ($(strip $(CPUFRQ_BENCH)),true) | 143 | ifeq ($(strip $(CPUFREQ_BENCH)),true) |
143 | INSTALL_BENCH += install-bench | 144 | INSTALL_BENCH += install-bench |
144 | COMPILE_BENCH += compile-bench | 145 | COMPILE_BENCH += compile-bench |
145 | endif | 146 | endif |
diff --git a/tools/power/cpupower/debug/x86_64/Makefile b/tools/power/cpupower/debug/x86_64/Makefile index dbf13998462a..3326217dd311 100644 --- a/tools/power/cpupower/debug/x86_64/Makefile +++ b/tools/power/cpupower/debug/x86_64/Makefile | |||
@@ -1,10 +1,10 @@ | |||
1 | default: all | 1 | default: all |
2 | 2 | ||
3 | centrino-decode: centrino-decode.c | 3 | centrino-decode: ../i386/centrino-decode.c |
4 | $(CC) $(CFLAGS) -o centrino-decode centrino-decode.c | 4 | $(CC) $(CFLAGS) -o $@ $< |
5 | 5 | ||
6 | powernow-k8-decode: powernow-k8-decode.c | 6 | powernow-k8-decode: ../i386/powernow-k8-decode.c |
7 | $(CC) $(CFLAGS) -o powernow-k8-decode powernow-k8-decode.c | 7 | $(CC) $(CFLAGS) -o $@ $< |
8 | 8 | ||
9 | all: centrino-decode powernow-k8-decode | 9 | all: centrino-decode powernow-k8-decode |
10 | 10 | ||
diff --git a/tools/power/cpupower/debug/x86_64/centrino-decode.c b/tools/power/cpupower/debug/x86_64/centrino-decode.c deleted file mode 120000 index 26fb3f1d8fc7..000000000000 --- a/tools/power/cpupower/debug/x86_64/centrino-decode.c +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | ../i386/centrino-decode.c \ No newline at end of file | ||
diff --git a/tools/power/cpupower/debug/x86_64/powernow-k8-decode.c b/tools/power/cpupower/debug/x86_64/powernow-k8-decode.c deleted file mode 120000 index eb30c79cf9df..000000000000 --- a/tools/power/cpupower/debug/x86_64/powernow-k8-decode.c +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | ../i386/powernow-k8-decode.c \ No newline at end of file | ||
diff --git a/tools/power/cpupower/man/cpupower-frequency-info.1 b/tools/power/cpupower/man/cpupower-frequency-info.1 index 3194811d58f5..bb60a8d1e45a 100644 --- a/tools/power/cpupower/man/cpupower-frequency-info.1 +++ b/tools/power/cpupower/man/cpupower-frequency-info.1 | |||
@@ -1,10 +1,10 @@ | |||
1 | .TH "cpufreq-info" "1" "0.1" "Mattia Dongili" "" | 1 | .TH "cpupower-frequency-info" "1" "0.1" "Mattia Dongili" "" |
2 | .SH "NAME" | 2 | .SH "NAME" |
3 | .LP | 3 | .LP |
4 | cpufreq\-info \- Utility to retrieve cpufreq kernel information | 4 | cpupower frequency\-info \- Utility to retrieve cpufreq kernel information |
5 | .SH "SYNTAX" | 5 | .SH "SYNTAX" |
6 | .LP | 6 | .LP |
7 | cpufreq\-info [\fIoptions\fP] | 7 | cpupower [ \-c cpulist ] frequency\-info [\fIoptions\fP] |
8 | .SH "DESCRIPTION" | 8 | .SH "DESCRIPTION" |
9 | .LP | 9 | .LP |
10 | A small tool which prints out cpufreq information helpful to developers and interested users. | 10 | A small tool which prints out cpufreq information helpful to developers and interested users. |
diff --git a/tools/power/cpupower/man/cpupower-frequency-set.1 b/tools/power/cpupower/man/cpupower-frequency-set.1 index 26e3e13eee3b..685f469093ad 100644 --- a/tools/power/cpupower/man/cpupower-frequency-set.1 +++ b/tools/power/cpupower/man/cpupower-frequency-set.1 | |||
@@ -1,13 +1,13 @@ | |||
1 | .TH "cpufreq-set" "1" "0.1" "Mattia Dongili" "" | 1 | .TH "cpupower-freqency-set" "1" "0.1" "Mattia Dongili" "" |
2 | .SH "NAME" | 2 | .SH "NAME" |
3 | .LP | 3 | .LP |
4 | cpufreq\-set \- A small tool which allows to modify cpufreq settings. | 4 | cpupower frequency\-set \- A small tool which allows to modify cpufreq settings. |
5 | .SH "SYNTAX" | 5 | .SH "SYNTAX" |
6 | .LP | 6 | .LP |
7 | cpufreq\-set [\fIoptions\fP] | 7 | cpupower [ \-c cpu ] frequency\-set [\fIoptions\fP] |
8 | .SH "DESCRIPTION" | 8 | .SH "DESCRIPTION" |
9 | .LP | 9 | .LP |
10 | cpufreq\-set allows you to modify cpufreq settings without having to type e.g. "/sys/devices/system/cpu/cpu0/cpufreq/scaling_set_speed" all the time. | 10 | cpupower frequency\-set allows you to modify cpufreq settings without having to type e.g. "/sys/devices/system/cpu/cpu0/cpufreq/scaling_set_speed" all the time. |
11 | .SH "OPTIONS" | 11 | .SH "OPTIONS" |
12 | .LP | 12 | .LP |
13 | .TP | 13 | .TP |
diff --git a/tools/power/cpupower/man/cpupower.1 b/tools/power/cpupower/man/cpupower.1 index 78c20feab85c..baf741d06e82 100644 --- a/tools/power/cpupower/man/cpupower.1 +++ b/tools/power/cpupower/man/cpupower.1 | |||
@@ -3,7 +3,7 @@ | |||
3 | cpupower \- Shows and sets processor power related values | 3 | cpupower \- Shows and sets processor power related values |
4 | .SH SYNOPSIS | 4 | .SH SYNOPSIS |
5 | .ft B | 5 | .ft B |
6 | .B cpupower [ \-c cpulist ] subcommand [ARGS] | 6 | .B cpupower [ \-c cpulist ] <command> [ARGS] |
7 | 7 | ||
8 | .B cpupower \-v|\-\-version | 8 | .B cpupower \-v|\-\-version |
9 | 9 | ||
@@ -13,24 +13,24 @@ cpupower \- Shows and sets processor power related values | |||
13 | \fBcpupower \fP is a collection of tools to examine and tune power saving | 13 | \fBcpupower \fP is a collection of tools to examine and tune power saving |
14 | related features of your processor. | 14 | related features of your processor. |
15 | 15 | ||
16 | The manpages of the subcommands (cpupower\-<subcommand>(1)) provide detailed | 16 | The manpages of the commands (cpupower\-<command>(1)) provide detailed |
17 | descriptions of supported features. Run \fBcpupower help\fP to get an overview | 17 | descriptions of supported features. Run \fBcpupower help\fP to get an overview |
18 | of supported subcommands. | 18 | of supported commands. |
19 | 19 | ||
20 | .SH Options | 20 | .SH Options |
21 | .PP | 21 | .PP |
22 | \-\-help, \-h | 22 | \-\-help, \-h |
23 | .RS 4 | 23 | .RS 4 |
24 | Shows supported subcommands and general usage. | 24 | Shows supported commands and general usage. |
25 | .RE | 25 | .RE |
26 | .PP | 26 | .PP |
27 | \-\-cpu cpulist, \-c cpulist | 27 | \-\-cpu cpulist, \-c cpulist |
28 | .RS 4 | 28 | .RS 4 |
29 | Only show or set values for specific cores. | 29 | Only show or set values for specific cores. |
30 | This option is not supported by all subcommands, details can be found in the | 30 | This option is not supported by all commands, details can be found in the |
31 | manpages of the subcommands. | 31 | manpages of the commands. |
32 | 32 | ||
33 | Some subcommands access all cores (typically the *\-set commands), some only | 33 | Some commands access all cores (typically the *\-set commands), some only |
34 | the first core (typically the *\-info commands) by default. | 34 | the first core (typically the *\-info commands) by default. |
35 | 35 | ||
36 | The syntax for <cpulist> is based on how the kernel exports CPU bitmasks via | 36 | The syntax for <cpulist> is based on how the kernel exports CPU bitmasks via |
diff --git a/tools/power/cpupower/utils/builtin.h b/tools/power/cpupower/utils/builtin.h index c870ffba5219..c10496fbe3c6 100644 --- a/tools/power/cpupower/utils/builtin.h +++ b/tools/power/cpupower/utils/builtin.h | |||
@@ -8,11 +8,4 @@ extern int cmd_freq_info(int argc, const char **argv); | |||
8 | extern int cmd_idle_info(int argc, const char **argv); | 8 | extern int cmd_idle_info(int argc, const char **argv); |
9 | extern int cmd_monitor(int argc, const char **argv); | 9 | extern int cmd_monitor(int argc, const char **argv); |
10 | 10 | ||
11 | extern void set_help(void); | ||
12 | extern void info_help(void); | ||
13 | extern void freq_set_help(void); | ||
14 | extern void freq_info_help(void); | ||
15 | extern void idle_info_help(void); | ||
16 | extern void monitor_help(void); | ||
17 | |||
18 | #endif | 11 | #endif |
diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c index 5a1d25f056b3..28953c9a7bd5 100644 --- a/tools/power/cpupower/utils/cpufreq-info.c +++ b/tools/power/cpupower/utils/cpufreq-info.c | |||
@@ -510,37 +510,6 @@ static int get_latency(unsigned int cpu, unsigned int human) | |||
510 | return 0; | 510 | return 0; |
511 | } | 511 | } |
512 | 512 | ||
513 | void freq_info_help(void) | ||
514 | { | ||
515 | printf(_("Usage: cpupower freqinfo [options]\n")); | ||
516 | printf(_("Options:\n")); | ||
517 | printf(_(" -e, --debug Prints out debug information [default]\n")); | ||
518 | printf(_(" -f, --freq Get frequency the CPU currently runs at, according\n" | ||
519 | " to the cpufreq core *\n")); | ||
520 | printf(_(" -w, --hwfreq Get frequency the CPU currently runs at, by reading\n" | ||
521 | " it from hardware (only available to root) *\n")); | ||
522 | printf(_(" -l, --hwlimits Determine the minimum and maximum CPU frequency allowed *\n")); | ||
523 | printf(_(" -d, --driver Determines the used cpufreq kernel driver *\n")); | ||
524 | printf(_(" -p, --policy Gets the currently used cpufreq policy *\n")); | ||
525 | printf(_(" -g, --governors Determines available cpufreq governors *\n")); | ||
526 | printf(_(" -r, --related-cpus Determines which CPUs run at the same hardware frequency *\n")); | ||
527 | printf(_(" -a, --affected-cpus Determines which CPUs need to have their frequency\n" | ||
528 | " coordinated by software *\n")); | ||
529 | printf(_(" -s, --stats Shows cpufreq statistics if available\n")); | ||
530 | printf(_(" -y, --latency Determines the maximum latency on CPU frequency changes *\n")); | ||
531 | printf(_(" -b, --boost Checks for turbo or boost modes *\n")); | ||
532 | printf(_(" -o, --proc Prints out information like provided by the /proc/cpufreq\n" | ||
533 | " interface in 2.4. and early 2.6. kernels\n")); | ||
534 | printf(_(" -m, --human human-readable output for the -f, -w, -s and -y parameters\n")); | ||
535 | printf(_(" -h, --help Prints out this screen\n")); | ||
536 | |||
537 | printf("\n"); | ||
538 | printf(_("If no argument is given, full output about\n" | ||
539 | "cpufreq is printed which is useful e.g. for reporting bugs.\n\n")); | ||
540 | printf(_("By default info of CPU 0 is shown which can be overridden\n" | ||
541 | "with the cpupower --cpu main command option.\n")); | ||
542 | } | ||
543 | |||
544 | static struct option info_opts[] = { | 513 | static struct option info_opts[] = { |
545 | { .name = "debug", .has_arg = no_argument, .flag = NULL, .val = 'e'}, | 514 | { .name = "debug", .has_arg = no_argument, .flag = NULL, .val = 'e'}, |
546 | { .name = "boost", .has_arg = no_argument, .flag = NULL, .val = 'b'}, | 515 | { .name = "boost", .has_arg = no_argument, .flag = NULL, .val = 'b'}, |
@@ -556,7 +525,6 @@ static struct option info_opts[] = { | |||
556 | { .name = "latency", .has_arg = no_argument, .flag = NULL, .val = 'y'}, | 525 | { .name = "latency", .has_arg = no_argument, .flag = NULL, .val = 'y'}, |
557 | { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'}, | 526 | { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'}, |
558 | { .name = "human", .has_arg = no_argument, .flag = NULL, .val = 'm'}, | 527 | { .name = "human", .has_arg = no_argument, .flag = NULL, .val = 'm'}, |
559 | { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'}, | ||
560 | { }, | 528 | { }, |
561 | }; | 529 | }; |
562 | 530 | ||
@@ -570,16 +538,12 @@ int cmd_freq_info(int argc, char **argv) | |||
570 | int output_param = 0; | 538 | int output_param = 0; |
571 | 539 | ||
572 | do { | 540 | do { |
573 | ret = getopt_long(argc, argv, "hoefwldpgrasmyb", info_opts, NULL); | 541 | ret = getopt_long(argc, argv, "oefwldpgrasmyb", info_opts, NULL); |
574 | switch (ret) { | 542 | switch (ret) { |
575 | case '?': | 543 | case '?': |
576 | output_param = '?'; | 544 | output_param = '?'; |
577 | cont = 0; | 545 | cont = 0; |
578 | break; | 546 | break; |
579 | case 'h': | ||
580 | output_param = 'h'; | ||
581 | cont = 0; | ||
582 | break; | ||
583 | case -1: | 547 | case -1: |
584 | cont = 0; | 548 | cont = 0; |
585 | break; | 549 | break; |
@@ -642,11 +606,7 @@ int cmd_freq_info(int argc, char **argv) | |||
642 | return -EINVAL; | 606 | return -EINVAL; |
643 | case '?': | 607 | case '?': |
644 | printf(_("invalid or unknown argument\n")); | 608 | printf(_("invalid or unknown argument\n")); |
645 | freq_info_help(); | ||
646 | return -EINVAL; | 609 | return -EINVAL; |
647 | case 'h': | ||
648 | freq_info_help(); | ||
649 | return EXIT_SUCCESS; | ||
650 | case 'o': | 610 | case 'o': |
651 | proc_cpufreq_output(); | 611 | proc_cpufreq_output(); |
652 | return EXIT_SUCCESS; | 612 | return EXIT_SUCCESS; |
diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c index 5f783622bf31..dd1539eb8c63 100644 --- a/tools/power/cpupower/utils/cpufreq-set.c +++ b/tools/power/cpupower/utils/cpufreq-set.c | |||
@@ -20,34 +20,11 @@ | |||
20 | 20 | ||
21 | #define NORM_FREQ_LEN 32 | 21 | #define NORM_FREQ_LEN 32 |
22 | 22 | ||
23 | void freq_set_help(void) | ||
24 | { | ||
25 | printf(_("Usage: cpupower frequency-set [options]\n")); | ||
26 | printf(_("Options:\n")); | ||
27 | printf(_(" -d FREQ, --min FREQ new minimum CPU frequency the governor may select\n")); | ||
28 | printf(_(" -u FREQ, --max FREQ new maximum CPU frequency the governor may select\n")); | ||
29 | printf(_(" -g GOV, --governor GOV new cpufreq governor\n")); | ||
30 | printf(_(" -f FREQ, --freq FREQ specific frequency to be set. Requires userspace\n" | ||
31 | " governor to be available and loaded\n")); | ||
32 | printf(_(" -r, --related Switches all hardware-related CPUs\n")); | ||
33 | printf(_(" -h, --help Prints out this screen\n")); | ||
34 | printf("\n"); | ||
35 | printf(_("Notes:\n" | ||
36 | "1. Omitting the -c or --cpu argument is equivalent to setting it to \"all\"\n")); | ||
37 | printf(_("2. The -f FREQ, --freq FREQ parameter cannot be combined with any other parameter\n" | ||
38 | " except the -c CPU, --cpu CPU parameter\n" | ||
39 | "3. FREQuencies can be passed in Hz, kHz (default), MHz, GHz, or THz\n" | ||
40 | " by postfixing the value with the wanted unit name, without any space\n" | ||
41 | " (FREQuency in kHz =^ Hz * 0.001 =^ MHz * 1000 =^ GHz * 1000000).\n")); | ||
42 | |||
43 | } | ||
44 | |||
45 | static struct option set_opts[] = { | 23 | static struct option set_opts[] = { |
46 | { .name = "min", .has_arg = required_argument, .flag = NULL, .val = 'd'}, | 24 | { .name = "min", .has_arg = required_argument, .flag = NULL, .val = 'd'}, |
47 | { .name = "max", .has_arg = required_argument, .flag = NULL, .val = 'u'}, | 25 | { .name = "max", .has_arg = required_argument, .flag = NULL, .val = 'u'}, |
48 | { .name = "governor", .has_arg = required_argument, .flag = NULL, .val = 'g'}, | 26 | { .name = "governor", .has_arg = required_argument, .flag = NULL, .val = 'g'}, |
49 | { .name = "freq", .has_arg = required_argument, .flag = NULL, .val = 'f'}, | 27 | { .name = "freq", .has_arg = required_argument, .flag = NULL, .val = 'f'}, |
50 | { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'}, | ||
51 | { .name = "related", .has_arg = no_argument, .flag = NULL, .val='r'}, | 28 | { .name = "related", .has_arg = no_argument, .flag = NULL, .val='r'}, |
52 | { }, | 29 | { }, |
53 | }; | 30 | }; |
@@ -80,7 +57,6 @@ const struct freq_units def_units[] = { | |||
80 | static void print_unknown_arg(void) | 57 | static void print_unknown_arg(void) |
81 | { | 58 | { |
82 | printf(_("invalid or unknown argument\n")); | 59 | printf(_("invalid or unknown argument\n")); |
83 | freq_set_help(); | ||
84 | } | 60 | } |
85 | 61 | ||
86 | static unsigned long string_to_frequency(const char *str) | 62 | static unsigned long string_to_frequency(const char *str) |
@@ -231,14 +207,11 @@ int cmd_freq_set(int argc, char **argv) | |||
231 | 207 | ||
232 | /* parameter parsing */ | 208 | /* parameter parsing */ |
233 | do { | 209 | do { |
234 | ret = getopt_long(argc, argv, "d:u:g:f:hr", set_opts, NULL); | 210 | ret = getopt_long(argc, argv, "d:u:g:f:r", set_opts, NULL); |
235 | switch (ret) { | 211 | switch (ret) { |
236 | case '?': | 212 | case '?': |
237 | print_unknown_arg(); | 213 | print_unknown_arg(); |
238 | return -EINVAL; | 214 | return -EINVAL; |
239 | case 'h': | ||
240 | freq_set_help(); | ||
241 | return 0; | ||
242 | case -1: | 215 | case -1: |
243 | cont = 0; | 216 | cont = 0; |
244 | break; | 217 | break; |
diff --git a/tools/power/cpupower/utils/cpuidle-info.c b/tools/power/cpupower/utils/cpuidle-info.c index 70da3574f1e9..b028267c1376 100644 --- a/tools/power/cpupower/utils/cpuidle-info.c +++ b/tools/power/cpupower/utils/cpuidle-info.c | |||
@@ -139,30 +139,14 @@ static void proc_cpuidle_cpu_output(unsigned int cpu) | |||
139 | } | 139 | } |
140 | } | 140 | } |
141 | 141 | ||
142 | /* --freq / -f */ | ||
143 | |||
144 | void idle_info_help(void) | ||
145 | { | ||
146 | printf(_ ("Usage: cpupower idleinfo [options]\n")); | ||
147 | printf(_ ("Options:\n")); | ||
148 | printf(_ (" -s, --silent Only show general C-state information\n")); | ||
149 | printf(_ (" -o, --proc Prints out information like provided by the /proc/acpi/processor/*/power\n" | ||
150 | " interface in older kernels\n")); | ||
151 | printf(_ (" -h, --help Prints out this screen\n")); | ||
152 | |||
153 | printf("\n"); | ||
154 | } | ||
155 | |||
156 | static struct option info_opts[] = { | 142 | static struct option info_opts[] = { |
157 | { .name = "silent", .has_arg = no_argument, .flag = NULL, .val = 's'}, | 143 | { .name = "silent", .has_arg = no_argument, .flag = NULL, .val = 's'}, |
158 | { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'}, | 144 | { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'}, |
159 | { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'}, | ||
160 | { }, | 145 | { }, |
161 | }; | 146 | }; |
162 | 147 | ||
163 | static inline void cpuidle_exit(int fail) | 148 | static inline void cpuidle_exit(int fail) |
164 | { | 149 | { |
165 | idle_info_help(); | ||
166 | exit(EXIT_FAILURE); | 150 | exit(EXIT_FAILURE); |
167 | } | 151 | } |
168 | 152 | ||
@@ -174,7 +158,7 @@ int cmd_idle_info(int argc, char **argv) | |||
174 | unsigned int cpu = 0; | 158 | unsigned int cpu = 0; |
175 | 159 | ||
176 | do { | 160 | do { |
177 | ret = getopt_long(argc, argv, "hos", info_opts, NULL); | 161 | ret = getopt_long(argc, argv, "os", info_opts, NULL); |
178 | if (ret == -1) | 162 | if (ret == -1) |
179 | break; | 163 | break; |
180 | switch (ret) { | 164 | switch (ret) { |
@@ -182,10 +166,6 @@ int cmd_idle_info(int argc, char **argv) | |||
182 | output_param = '?'; | 166 | output_param = '?'; |
183 | cont = 0; | 167 | cont = 0; |
184 | break; | 168 | break; |
185 | case 'h': | ||
186 | output_param = 'h'; | ||
187 | cont = 0; | ||
188 | break; | ||
189 | case 's': | 169 | case 's': |
190 | verbose = 0; | 170 | verbose = 0; |
191 | break; | 171 | break; |
@@ -211,8 +191,6 @@ int cmd_idle_info(int argc, char **argv) | |||
211 | case '?': | 191 | case '?': |
212 | printf(_("invalid or unknown argument\n")); | 192 | printf(_("invalid or unknown argument\n")); |
213 | cpuidle_exit(EXIT_FAILURE); | 193 | cpuidle_exit(EXIT_FAILURE); |
214 | case 'h': | ||
215 | cpuidle_exit(EXIT_SUCCESS); | ||
216 | } | 194 | } |
217 | 195 | ||
218 | /* Default is: show output of CPU 0 only */ | 196 | /* Default is: show output of CPU 0 only */ |
diff --git a/tools/power/cpupower/utils/cpupower-info.c b/tools/power/cpupower/utils/cpupower-info.c index 85253cb7600e..3f68632c28c7 100644 --- a/tools/power/cpupower/utils/cpupower-info.c +++ b/tools/power/cpupower/utils/cpupower-info.c | |||
@@ -16,31 +16,16 @@ | |||
16 | #include "helpers/helpers.h" | 16 | #include "helpers/helpers.h" |
17 | #include "helpers/sysfs.h" | 17 | #include "helpers/sysfs.h" |
18 | 18 | ||
19 | void info_help(void) | ||
20 | { | ||
21 | printf(_("Usage: cpupower info [ -b ] [ -m ] [ -s ]\n")); | ||
22 | printf(_("Options:\n")); | ||
23 | printf(_(" -b, --perf-bias Gets CPU's power vs performance policy on some\n" | ||
24 | " Intel models [0-15], see manpage for details\n")); | ||
25 | printf(_(" -m, --sched-mc Gets the kernel's multi core scheduler policy.\n")); | ||
26 | printf(_(" -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n")); | ||
27 | printf(_(" -h, --help Prints out this screen\n")); | ||
28 | printf(_("\nPassing no option will show all info, by default only on core 0\n")); | ||
29 | printf("\n"); | ||
30 | } | ||
31 | |||
32 | static struct option set_opts[] = { | 19 | static struct option set_opts[] = { |
33 | { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, | 20 | { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, |
34 | { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, | 21 | { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, |
35 | { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, | 22 | { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, |
36 | { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'}, | ||
37 | { }, | 23 | { }, |
38 | }; | 24 | }; |
39 | 25 | ||
40 | static void print_wrong_arg_exit(void) | 26 | static void print_wrong_arg_exit(void) |
41 | { | 27 | { |
42 | printf(_("invalid or unknown argument\n")); | 28 | printf(_("invalid or unknown argument\n")); |
43 | info_help(); | ||
44 | exit(EXIT_FAILURE); | 29 | exit(EXIT_FAILURE); |
45 | } | 30 | } |
46 | 31 | ||
@@ -64,11 +49,8 @@ int cmd_info(int argc, char **argv) | |||
64 | textdomain(PACKAGE); | 49 | textdomain(PACKAGE); |
65 | 50 | ||
66 | /* parameter parsing */ | 51 | /* parameter parsing */ |
67 | while ((ret = getopt_long(argc, argv, "msbh", set_opts, NULL)) != -1) { | 52 | while ((ret = getopt_long(argc, argv, "msb", set_opts, NULL)) != -1) { |
68 | switch (ret) { | 53 | switch (ret) { |
69 | case 'h': | ||
70 | info_help(); | ||
71 | return 0; | ||
72 | case 'b': | 54 | case 'b': |
73 | if (params.perf_bias) | 55 | if (params.perf_bias) |
74 | print_wrong_arg_exit(); | 56 | print_wrong_arg_exit(); |
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c index bc1b391e46f0..dc4de3762111 100644 --- a/tools/power/cpupower/utils/cpupower-set.c +++ b/tools/power/cpupower/utils/cpupower-set.c | |||
@@ -17,30 +17,16 @@ | |||
17 | #include "helpers/sysfs.h" | 17 | #include "helpers/sysfs.h" |
18 | #include "helpers/bitmask.h" | 18 | #include "helpers/bitmask.h" |
19 | 19 | ||
20 | void set_help(void) | ||
21 | { | ||
22 | printf(_("Usage: cpupower set [ -b val ] [ -m val ] [ -s val ]\n")); | ||
23 | printf(_("Options:\n")); | ||
24 | printf(_(" -b, --perf-bias [VAL] Sets CPU's power vs performance policy on some\n" | ||
25 | " Intel models [0-15], see manpage for details\n")); | ||
26 | printf(_(" -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n")); | ||
27 | printf(_(" -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler policy.\n")); | ||
28 | printf(_(" -h, --help Prints out this screen\n")); | ||
29 | printf("\n"); | ||
30 | } | ||
31 | |||
32 | static struct option set_opts[] = { | 20 | static struct option set_opts[] = { |
33 | { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, | 21 | { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, |
34 | { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, | 22 | { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, |
35 | { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, | 23 | { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, |
36 | { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'}, | ||
37 | { }, | 24 | { }, |
38 | }; | 25 | }; |
39 | 26 | ||
40 | static void print_wrong_arg_exit(void) | 27 | static void print_wrong_arg_exit(void) |
41 | { | 28 | { |
42 | printf(_("invalid or unknown argument\n")); | 29 | printf(_("invalid or unknown argument\n")); |
43 | set_help(); | ||
44 | exit(EXIT_FAILURE); | 30 | exit(EXIT_FAILURE); |
45 | } | 31 | } |
46 | 32 | ||
@@ -66,12 +52,9 @@ int cmd_set(int argc, char **argv) | |||
66 | 52 | ||
67 | params.params = 0; | 53 | params.params = 0; |
68 | /* parameter parsing */ | 54 | /* parameter parsing */ |
69 | while ((ret = getopt_long(argc, argv, "m:s:b:h", | 55 | while ((ret = getopt_long(argc, argv, "m:s:b:", |
70 | set_opts, NULL)) != -1) { | 56 | set_opts, NULL)) != -1) { |
71 | switch (ret) { | 57 | switch (ret) { |
72 | case 'h': | ||
73 | set_help(); | ||
74 | return 0; | ||
75 | case 'b': | 58 | case 'b': |
76 | if (params.perf_bias) | 59 | if (params.perf_bias) |
77 | print_wrong_arg_exit(); | 60 | print_wrong_arg_exit(); |
@@ -110,10 +93,8 @@ int cmd_set(int argc, char **argv) | |||
110 | } | 93 | } |
111 | }; | 94 | }; |
112 | 95 | ||
113 | if (!params.params) { | 96 | if (!params.params) |
114 | set_help(); | 97 | print_wrong_arg_exit(); |
115 | return -EINVAL; | ||
116 | } | ||
117 | 98 | ||
118 | if (params.sched_mc) { | 99 | if (params.sched_mc) { |
119 | ret = sysfs_set_sched("mc", sched_mc); | 100 | ret = sysfs_set_sched("mc", sched_mc); |
diff --git a/tools/power/cpupower/utils/cpupower.c b/tools/power/cpupower/utils/cpupower.c index 5844ae0f786f..52bee591c1c5 100644 --- a/tools/power/cpupower/utils/cpupower.c +++ b/tools/power/cpupower/utils/cpupower.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <stdlib.h> | 11 | #include <stdlib.h> |
12 | #include <string.h> | 12 | #include <string.h> |
13 | #include <unistd.h> | 13 | #include <unistd.h> |
14 | #include <errno.h> | ||
14 | 15 | ||
15 | #include "builtin.h" | 16 | #include "builtin.h" |
16 | #include "helpers/helpers.h" | 17 | #include "helpers/helpers.h" |
@@ -19,13 +20,12 @@ | |||
19 | struct cmd_struct { | 20 | struct cmd_struct { |
20 | const char *cmd; | 21 | const char *cmd; |
21 | int (*main)(int, const char **); | 22 | int (*main)(int, const char **); |
22 | void (*usage)(void); | ||
23 | int needs_root; | 23 | int needs_root; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | #define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) | 26 | #define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) |
27 | 27 | ||
28 | int cmd_help(int argc, const char **argv); | 28 | static int cmd_help(int argc, const char **argv); |
29 | 29 | ||
30 | /* Global cpu_info object available for all binaries | 30 | /* Global cpu_info object available for all binaries |
31 | * Info only retrieved from CPU 0 | 31 | * Info only retrieved from CPU 0 |
@@ -44,55 +44,66 @@ int be_verbose; | |||
44 | static void print_help(void); | 44 | static void print_help(void); |
45 | 45 | ||
46 | static struct cmd_struct commands[] = { | 46 | static struct cmd_struct commands[] = { |
47 | { "frequency-info", cmd_freq_info, freq_info_help, 0 }, | 47 | { "frequency-info", cmd_freq_info, 0 }, |
48 | { "frequency-set", cmd_freq_set, freq_set_help, 1 }, | 48 | { "frequency-set", cmd_freq_set, 1 }, |
49 | { "idle-info", cmd_idle_info, idle_info_help, 0 }, | 49 | { "idle-info", cmd_idle_info, 0 }, |
50 | { "set", cmd_set, set_help, 1 }, | 50 | { "set", cmd_set, 1 }, |
51 | { "info", cmd_info, info_help, 0 }, | 51 | { "info", cmd_info, 0 }, |
52 | { "monitor", cmd_monitor, monitor_help, 0 }, | 52 | { "monitor", cmd_monitor, 0 }, |
53 | { "help", cmd_help, print_help, 0 }, | 53 | { "help", cmd_help, 0 }, |
54 | /* { "bench", cmd_bench, NULL, 1 }, */ | 54 | /* { "bench", cmd_bench, 1 }, */ |
55 | }; | 55 | }; |
56 | 56 | ||
57 | int cmd_help(int argc, const char **argv) | ||
58 | { | ||
59 | unsigned int i; | ||
60 | |||
61 | if (argc > 1) { | ||
62 | for (i = 0; i < ARRAY_SIZE(commands); i++) { | ||
63 | struct cmd_struct *p = commands + i; | ||
64 | if (strcmp(p->cmd, argv[1])) | ||
65 | continue; | ||
66 | if (p->usage) { | ||
67 | p->usage(); | ||
68 | return EXIT_SUCCESS; | ||
69 | } | ||
70 | } | ||
71 | } | ||
72 | print_help(); | ||
73 | if (argc == 1) | ||
74 | return EXIT_SUCCESS; /* cpupower help */ | ||
75 | return EXIT_FAILURE; | ||
76 | } | ||
77 | |||
78 | static void print_help(void) | 57 | static void print_help(void) |
79 | { | 58 | { |
80 | unsigned int i; | 59 | unsigned int i; |
81 | 60 | ||
82 | #ifdef DEBUG | 61 | #ifdef DEBUG |
83 | printf(_("cpupower [ -d ][ -c cpulist ] subcommand [ARGS]\n")); | 62 | printf(_("Usage:\tcpupower [-d|--debug] [-c|--cpu cpulist ] <command> [<args>]\n")); |
84 | printf(_(" -d, --debug May increase output (stderr) on some subcommands\n")); | ||
85 | #else | 63 | #else |
86 | printf(_("cpupower [ -c cpulist ] subcommand [ARGS]\n")); | 64 | printf(_("Usage:\tcpupower [-c|--cpu cpulist ] <command> [<args>]\n")); |
87 | #endif | 65 | #endif |
88 | printf(_("cpupower --version\n")); | 66 | printf(_("Supported commands are:\n")); |
89 | printf(_("Supported subcommands are:\n")); | ||
90 | for (i = 0; i < ARRAY_SIZE(commands); i++) | 67 | for (i = 0; i < ARRAY_SIZE(commands); i++) |
91 | printf("\t%s\n", commands[i].cmd); | 68 | printf("\t%s\n", commands[i].cmd); |
92 | printf(_("\nSome subcommands can make use of the -c cpulist option.\n")); | 69 | printf(_("\nNot all commands can make use of the -c cpulist option.\n")); |
93 | printf(_("Look at the general cpupower manpage how to use it\n")); | 70 | printf(_("\nUse 'cpupower help <command>' for getting help for above commands.\n")); |
94 | printf(_("and read up the subcommand's manpage whether it is supported.\n")); | 71 | } |
95 | printf(_("\nUse cpupower help subcommand for getting help for above subcommands.\n")); | 72 | |
73 | static int print_man_page(const char *subpage) | ||
74 | { | ||
75 | int len; | ||
76 | char *page; | ||
77 | |||
78 | len = 10; /* enough for "cpupower-" */ | ||
79 | if (subpage != NULL) | ||
80 | len += strlen(subpage); | ||
81 | |||
82 | page = malloc(len); | ||
83 | if (!page) | ||
84 | return -ENOMEM; | ||
85 | |||
86 | sprintf(page, "cpupower"); | ||
87 | if ((subpage != NULL) && strcmp(subpage, "help")) { | ||
88 | strcat(page, "-"); | ||
89 | strcat(page, subpage); | ||
90 | } | ||
91 | |||
92 | execlp("man", "man", page, NULL); | ||
93 | |||
94 | /* should not be reached */ | ||
95 | return -EINVAL; | ||
96 | } | ||
97 | |||
98 | static int cmd_help(int argc, const char **argv) | ||
99 | { | ||
100 | if (argc > 1) { | ||
101 | print_man_page(argv[1]); /* exits within execlp() */ | ||
102 | return EXIT_FAILURE; | ||
103 | } | ||
104 | |||
105 | print_help(); | ||
106 | return EXIT_SUCCESS; | ||
96 | } | 107 | } |
97 | 108 | ||
98 | static void print_version(void) | 109 | static void print_version(void) |
diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h index 592ee362b877..2747e738efb0 100644 --- a/tools/power/cpupower/utils/helpers/helpers.h +++ b/tools/power/cpupower/utils/helpers/helpers.h | |||
@@ -16,11 +16,20 @@ | |||
16 | #include "helpers/bitmask.h" | 16 | #include "helpers/bitmask.h" |
17 | 17 | ||
18 | /* Internationalization ****************************/ | 18 | /* Internationalization ****************************/ |
19 | #ifdef NLS | ||
20 | |||
19 | #define _(String) gettext(String) | 21 | #define _(String) gettext(String) |
20 | #ifndef gettext_noop | 22 | #ifndef gettext_noop |
21 | #define gettext_noop(String) String | 23 | #define gettext_noop(String) String |
22 | #endif | 24 | #endif |
23 | #define N_(String) gettext_noop(String) | 25 | #define N_(String) gettext_noop(String) |
26 | |||
27 | #else /* !NLS */ | ||
28 | |||
29 | #define _(String) String | ||
30 | #define N_(String) String | ||
31 | |||
32 | #endif | ||
24 | /* Internationalization ****************************/ | 33 | /* Internationalization ****************************/ |
25 | 34 | ||
26 | extern int run_as_root; | 35 | extern int run_as_root; |
@@ -96,6 +105,9 @@ struct cpupower_topology { | |||
96 | int pkg; | 105 | int pkg; |
97 | int core; | 106 | int core; |
98 | int cpu; | 107 | int cpu; |
108 | |||
109 | /* flags */ | ||
110 | unsigned int is_online:1; | ||
99 | } *core_info; | 111 | } *core_info; |
100 | }; | 112 | }; |
101 | 113 | ||
diff --git a/tools/power/cpupower/utils/helpers/sysfs.c b/tools/power/cpupower/utils/helpers/sysfs.c index 55e2466674c6..c6343024a611 100644 --- a/tools/power/cpupower/utils/helpers/sysfs.c +++ b/tools/power/cpupower/utils/helpers/sysfs.c | |||
@@ -56,6 +56,56 @@ static unsigned int sysfs_write_file(const char *path, | |||
56 | return (unsigned int) numwrite; | 56 | return (unsigned int) numwrite; |
57 | } | 57 | } |
58 | 58 | ||
59 | /* | ||
60 | * Detect whether a CPU is online | ||
61 | * | ||
62 | * Returns: | ||
63 | * 1 -> if CPU is online | ||
64 | * 0 -> if CPU is offline | ||
65 | * negative errno values in error case | ||
66 | */ | ||
67 | int sysfs_is_cpu_online(unsigned int cpu) | ||
68 | { | ||
69 | char path[SYSFS_PATH_MAX]; | ||
70 | int fd; | ||
71 | ssize_t numread; | ||
72 | unsigned long long value; | ||
73 | char linebuf[MAX_LINE_LEN]; | ||
74 | char *endp; | ||
75 | struct stat statbuf; | ||
76 | |||
77 | snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u", cpu); | ||
78 | |||
79 | if (stat(path, &statbuf) != 0) | ||
80 | return 0; | ||
81 | |||
82 | /* | ||
83 | * kernel without CONFIG_HOTPLUG_CPU | ||
84 | * -> cpuX directory exists, but not cpuX/online file | ||
85 | */ | ||
86 | snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/online", cpu); | ||
87 | if (stat(path, &statbuf) != 0) | ||
88 | return 1; | ||
89 | |||
90 | fd = open(path, O_RDONLY); | ||
91 | if (fd == -1) | ||
92 | return -errno; | ||
93 | |||
94 | numread = read(fd, linebuf, MAX_LINE_LEN - 1); | ||
95 | if (numread < 1) { | ||
96 | close(fd); | ||
97 | return -EIO; | ||
98 | } | ||
99 | linebuf[numread] = '\0'; | ||
100 | close(fd); | ||
101 | |||
102 | value = strtoull(linebuf, &endp, 0); | ||
103 | if (value > 1 || value < 0) | ||
104 | return -EINVAL; | ||
105 | |||
106 | return value; | ||
107 | } | ||
108 | |||
59 | /* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */ | 109 | /* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */ |
60 | 110 | ||
61 | /* | 111 | /* |
diff --git a/tools/power/cpupower/utils/helpers/sysfs.h b/tools/power/cpupower/utils/helpers/sysfs.h index f9373e090637..8cb797bbceb0 100644 --- a/tools/power/cpupower/utils/helpers/sysfs.h +++ b/tools/power/cpupower/utils/helpers/sysfs.h | |||
@@ -7,6 +7,8 @@ | |||
7 | 7 | ||
8 | extern unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen); | 8 | extern unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen); |
9 | 9 | ||
10 | extern int sysfs_is_cpu_online(unsigned int cpu); | ||
11 | |||
10 | extern unsigned long sysfs_get_idlestate_latency(unsigned int cpu, | 12 | extern unsigned long sysfs_get_idlestate_latency(unsigned int cpu, |
11 | unsigned int idlestate); | 13 | unsigned int idlestate); |
12 | extern unsigned long sysfs_get_idlestate_usage(unsigned int cpu, | 14 | extern unsigned long sysfs_get_idlestate_usage(unsigned int cpu, |
diff --git a/tools/power/cpupower/utils/helpers/topology.c b/tools/power/cpupower/utils/helpers/topology.c index 385ee5c7570c..4eae2c47ba48 100644 --- a/tools/power/cpupower/utils/helpers/topology.c +++ b/tools/power/cpupower/utils/helpers/topology.c | |||
@@ -41,6 +41,8 @@ struct cpuid_core_info { | |||
41 | unsigned int pkg; | 41 | unsigned int pkg; |
42 | unsigned int thread; | 42 | unsigned int thread; |
43 | unsigned int cpu; | 43 | unsigned int cpu; |
44 | /* flags */ | ||
45 | unsigned int is_online:1; | ||
44 | }; | 46 | }; |
45 | 47 | ||
46 | static int __compare(const void *t1, const void *t2) | 48 | static int __compare(const void *t1, const void *t2) |
@@ -78,6 +80,8 @@ int get_cpu_topology(struct cpupower_topology *cpu_top) | |||
78 | return -ENOMEM; | 80 | return -ENOMEM; |
79 | cpu_top->pkgs = cpu_top->cores = 0; | 81 | cpu_top->pkgs = cpu_top->cores = 0; |
80 | for (cpu = 0; cpu < cpus; cpu++) { | 82 | for (cpu = 0; cpu < cpus; cpu++) { |
83 | cpu_top->core_info[cpu].cpu = cpu; | ||
84 | cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu); | ||
81 | cpu_top->core_info[cpu].pkg = | 85 | cpu_top->core_info[cpu].pkg = |
82 | sysfs_topology_read_file(cpu, "physical_package_id"); | 86 | sysfs_topology_read_file(cpu, "physical_package_id"); |
83 | if ((int)cpu_top->core_info[cpu].pkg != -1 && | 87 | if ((int)cpu_top->core_info[cpu].pkg != -1 && |
@@ -85,7 +89,6 @@ int get_cpu_topology(struct cpupower_topology *cpu_top) | |||
85 | cpu_top->pkgs = cpu_top->core_info[cpu].pkg; | 89 | cpu_top->pkgs = cpu_top->core_info[cpu].pkg; |
86 | cpu_top->core_info[cpu].core = | 90 | cpu_top->core_info[cpu].core = |
87 | sysfs_topology_read_file(cpu, "core_id"); | 91 | sysfs_topology_read_file(cpu, "core_id"); |
88 | cpu_top->core_info[cpu].cpu = cpu; | ||
89 | } | 92 | } |
90 | cpu_top->pkgs++; | 93 | cpu_top->pkgs++; |
91 | 94 | ||
diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c index d048b96a6155..bcd22a1a3970 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c +++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c | |||
@@ -134,7 +134,7 @@ static struct cpuidle_monitor *cpuidle_register(void) | |||
134 | /* Assume idle state count is the same for all CPUs */ | 134 | /* Assume idle state count is the same for all CPUs */ |
135 | cpuidle_sysfs_monitor.hw_states_num = sysfs_get_idlestate_count(0); | 135 | cpuidle_sysfs_monitor.hw_states_num = sysfs_get_idlestate_count(0); |
136 | 136 | ||
137 | if (cpuidle_sysfs_monitor.hw_states_num == 0) | 137 | if (cpuidle_sysfs_monitor.hw_states_num <= 0) |
138 | return NULL; | 138 | return NULL; |
139 | 139 | ||
140 | for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) { | 140 | for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) { |
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c index ba4bf068380d..0d6571e418db 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c +++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c | |||
@@ -43,6 +43,12 @@ static struct cpupower_topology cpu_top; | |||
43 | /* ToDo: Document this in the manpage */ | 43 | /* ToDo: Document this in the manpage */ |
44 | static char range_abbr[RANGE_MAX] = { 'T', 'C', 'P', 'M', }; | 44 | static char range_abbr[RANGE_MAX] = { 'T', 'C', 'P', 'M', }; |
45 | 45 | ||
46 | static void print_wrong_arg_exit(void) | ||
47 | { | ||
48 | printf(_("invalid or unknown argument\n")); | ||
49 | exit(EXIT_FAILURE); | ||
50 | } | ||
51 | |||
46 | long long timespec_diff_us(struct timespec start, struct timespec end) | 52 | long long timespec_diff_us(struct timespec start, struct timespec end) |
47 | { | 53 | { |
48 | struct timespec temp; | 54 | struct timespec temp; |
@@ -56,21 +62,6 @@ long long timespec_diff_us(struct timespec start, struct timespec end) | |||
56 | return (temp.tv_sec * 1000000) + (temp.tv_nsec / 1000); | 62 | return (temp.tv_sec * 1000000) + (temp.tv_nsec / 1000); |
57 | } | 63 | } |
58 | 64 | ||
59 | void monitor_help(void) | ||
60 | { | ||
61 | printf(_("cpupower monitor: [-m <mon1>,[<mon2>],.. ] command\n")); | ||
62 | printf(_("cpupower monitor: [-m <mon1>,[<mon2>],.. ] [ -i interval_sec ]\n")); | ||
63 | printf(_("cpupower monitor: -l\n")); | ||
64 | printf(_("\t command: pass an arbitrary command to measure specific workload\n")); | ||
65 | printf(_("\t -i: time intervall to measure for in seconds (default 1)\n")); | ||
66 | printf(_("\t -l: list available CPU sleep monitors (for use with -m)\n")); | ||
67 | printf(_("\t -m: show specific CPU sleep monitors only (in same order)\n")); | ||
68 | printf(_("\t -h: print this help\n")); | ||
69 | printf("\n"); | ||
70 | printf(_("only one of: -l, -m are allowed\nIf none of them is passed,")); | ||
71 | printf(_(" all supported monitors are shown\n")); | ||
72 | } | ||
73 | |||
74 | void print_n_spaces(int n) | 65 | void print_n_spaces(int n) |
75 | { | 66 | { |
76 | int x; | 67 | int x; |
@@ -149,6 +140,10 @@ void print_results(int topology_depth, int cpu) | |||
149 | unsigned long long result; | 140 | unsigned long long result; |
150 | cstate_t s; | 141 | cstate_t s; |
151 | 142 | ||
143 | /* Be careful CPUs may got resorted for pkg value do not just use cpu */ | ||
144 | if (!bitmask_isbitset(cpus_chosen, cpu_top.core_info[cpu].cpu)) | ||
145 | return; | ||
146 | |||
152 | if (topology_depth > 2) | 147 | if (topology_depth > 2) |
153 | printf("%4d|", cpu_top.core_info[cpu].pkg); | 148 | printf("%4d|", cpu_top.core_info[cpu].pkg); |
154 | if (topology_depth > 1) | 149 | if (topology_depth > 1) |
@@ -190,9 +185,13 @@ void print_results(int topology_depth, int cpu) | |||
190 | } | 185 | } |
191 | } | 186 | } |
192 | } | 187 | } |
193 | /* cpu offline */ | 188 | /* |
194 | if (cpu_top.core_info[cpu].pkg == -1 || | 189 | * The monitor could still provide useful data, for example |
195 | cpu_top.core_info[cpu].core == -1) { | 190 | * AMD HW counters partly sit in PCI config space. |
191 | * It's up to the monitor plug-in to check .is_online, this one | ||
192 | * is just for additional info. | ||
193 | */ | ||
194 | if (!cpu_top.core_info[cpu].is_online) { | ||
196 | printf(_(" *is offline\n")); | 195 | printf(_(" *is offline\n")); |
197 | return; | 196 | return; |
198 | } else | 197 | } else |
@@ -238,7 +237,6 @@ static void parse_monitor_param(char *param) | |||
238 | if (hits == 0) { | 237 | if (hits == 0) { |
239 | printf(_("No matching monitor found in %s, " | 238 | printf(_("No matching monitor found in %s, " |
240 | "try -l option\n"), param); | 239 | "try -l option\n"), param); |
241 | monitor_help(); | ||
242 | exit(EXIT_FAILURE); | 240 | exit(EXIT_FAILURE); |
243 | } | 241 | } |
244 | /* Override detected/registerd monitors array with requested one */ | 242 | /* Override detected/registerd monitors array with requested one */ |
@@ -335,37 +333,27 @@ static void cmdline(int argc, char *argv[]) | |||
335 | int opt; | 333 | int opt; |
336 | progname = basename(argv[0]); | 334 | progname = basename(argv[0]); |
337 | 335 | ||
338 | while ((opt = getopt(argc, argv, "+hli:m:")) != -1) { | 336 | while ((opt = getopt(argc, argv, "+li:m:")) != -1) { |
339 | switch (opt) { | 337 | switch (opt) { |
340 | case 'h': | ||
341 | monitor_help(); | ||
342 | exit(EXIT_SUCCESS); | ||
343 | case 'l': | 338 | case 'l': |
344 | if (mode) { | 339 | if (mode) |
345 | monitor_help(); | 340 | print_wrong_arg_exit(); |
346 | exit(EXIT_FAILURE); | ||
347 | } | ||
348 | mode = list; | 341 | mode = list; |
349 | break; | 342 | break; |
350 | case 'i': | 343 | case 'i': |
351 | /* only allow -i with -m or no option */ | 344 | /* only allow -i with -m or no option */ |
352 | if (mode && mode != show) { | 345 | if (mode && mode != show) |
353 | monitor_help(); | 346 | print_wrong_arg_exit(); |
354 | exit(EXIT_FAILURE); | ||
355 | } | ||
356 | interval = atoi(optarg); | 347 | interval = atoi(optarg); |
357 | break; | 348 | break; |
358 | case 'm': | 349 | case 'm': |
359 | if (mode) { | 350 | if (mode) |
360 | monitor_help(); | 351 | print_wrong_arg_exit(); |
361 | exit(EXIT_FAILURE); | ||
362 | } | ||
363 | mode = show; | 352 | mode = show; |
364 | show_monitors_param = optarg; | 353 | show_monitors_param = optarg; |
365 | break; | 354 | break; |
366 | default: | 355 | default: |
367 | monitor_help(); | 356 | print_wrong_arg_exit(); |
368 | exit(EXIT_FAILURE); | ||
369 | } | 357 | } |
370 | } | 358 | } |
371 | if (!mode) | 359 | if (!mode) |
@@ -385,6 +373,10 @@ int cmd_monitor(int argc, char **argv) | |||
385 | return EXIT_FAILURE; | 373 | return EXIT_FAILURE; |
386 | } | 374 | } |
387 | 375 | ||
376 | /* Default is: monitor all CPUs */ | ||
377 | if (bitmask_isallclear(cpus_chosen)) | ||
378 | bitmask_setall(cpus_chosen); | ||
379 | |||
388 | dprint("System has up to %d CPU cores\n", cpu_count); | 380 | dprint("System has up to %d CPU cores\n", cpu_count); |
389 | 381 | ||
390 | for (num = 0; all_monitors[num]; num++) { | 382 | for (num = 0; all_monitors[num]; num++) { |
diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c index 63ca87a05e5f..5650ab5a2c20 100644 --- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c +++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c | |||
@@ -22,12 +22,15 @@ | |||
22 | 22 | ||
23 | #define MSR_TSC 0x10 | 23 | #define MSR_TSC 0x10 |
24 | 24 | ||
25 | #define MSR_AMD_HWCR 0xc0010015 | ||
26 | |||
25 | enum mperf_id { C0 = 0, Cx, AVG_FREQ, MPERF_CSTATE_COUNT }; | 27 | enum mperf_id { C0 = 0, Cx, AVG_FREQ, MPERF_CSTATE_COUNT }; |
26 | 28 | ||
27 | static int mperf_get_count_percent(unsigned int self_id, double *percent, | 29 | static int mperf_get_count_percent(unsigned int self_id, double *percent, |
28 | unsigned int cpu); | 30 | unsigned int cpu); |
29 | static int mperf_get_count_freq(unsigned int id, unsigned long long *count, | 31 | static int mperf_get_count_freq(unsigned int id, unsigned long long *count, |
30 | unsigned int cpu); | 32 | unsigned int cpu); |
33 | static struct timespec time_start, time_end; | ||
31 | 34 | ||
32 | static cstate_t mperf_cstates[MPERF_CSTATE_COUNT] = { | 35 | static cstate_t mperf_cstates[MPERF_CSTATE_COUNT] = { |
33 | { | 36 | { |
@@ -54,19 +57,33 @@ static cstate_t mperf_cstates[MPERF_CSTATE_COUNT] = { | |||
54 | }, | 57 | }, |
55 | }; | 58 | }; |
56 | 59 | ||
60 | enum MAX_FREQ_MODE { MAX_FREQ_SYSFS, MAX_FREQ_TSC_REF }; | ||
61 | static int max_freq_mode; | ||
62 | /* | ||
63 | * The max frequency mperf is ticking at (in C0), either retrieved via: | ||
64 | * 1) calculated after measurements if we know TSC ticks at mperf/P0 frequency | ||
65 | * 2) cpufreq /sys/devices/.../cpu0/cpufreq/cpuinfo_max_freq at init time | ||
66 | * 1. Is preferred as it also works without cpufreq subsystem (e.g. on Xen) | ||
67 | */ | ||
68 | static unsigned long max_frequency; | ||
69 | |||
57 | static unsigned long long tsc_at_measure_start; | 70 | static unsigned long long tsc_at_measure_start; |
58 | static unsigned long long tsc_at_measure_end; | 71 | static unsigned long long tsc_at_measure_end; |
59 | static unsigned long max_frequency; | ||
60 | static unsigned long long *mperf_previous_count; | 72 | static unsigned long long *mperf_previous_count; |
61 | static unsigned long long *aperf_previous_count; | 73 | static unsigned long long *aperf_previous_count; |
62 | static unsigned long long *mperf_current_count; | 74 | static unsigned long long *mperf_current_count; |
63 | static unsigned long long *aperf_current_count; | 75 | static unsigned long long *aperf_current_count; |
76 | |||
64 | /* valid flag for all CPUs. If a MSR read failed it will be zero */ | 77 | /* valid flag for all CPUs. If a MSR read failed it will be zero */ |
65 | static int *is_valid; | 78 | static int *is_valid; |
66 | 79 | ||
67 | static int mperf_get_tsc(unsigned long long *tsc) | 80 | static int mperf_get_tsc(unsigned long long *tsc) |
68 | { | 81 | { |
69 | return read_msr(0, MSR_TSC, tsc); | 82 | int ret; |
83 | ret = read_msr(0, MSR_TSC, tsc); | ||
84 | if (ret) | ||
85 | dprint("Reading TSC MSR failed, returning %llu\n", *tsc); | ||
86 | return ret; | ||
70 | } | 87 | } |
71 | 88 | ||
72 | static int mperf_init_stats(unsigned int cpu) | 89 | static int mperf_init_stats(unsigned int cpu) |
@@ -97,36 +114,11 @@ static int mperf_measure_stats(unsigned int cpu) | |||
97 | return 0; | 114 | return 0; |
98 | } | 115 | } |
99 | 116 | ||
100 | /* | ||
101 | * get_average_perf() | ||
102 | * | ||
103 | * Returns the average performance (also considers boosted frequencies) | ||
104 | * | ||
105 | * Input: | ||
106 | * aperf_diff: Difference of the aperf register over a time period | ||
107 | * mperf_diff: Difference of the mperf register over the same time period | ||
108 | * max_freq: Maximum frequency (P0) | ||
109 | * | ||
110 | * Returns: | ||
111 | * Average performance over the time period | ||
112 | */ | ||
113 | static unsigned long get_average_perf(unsigned long long aperf_diff, | ||
114 | unsigned long long mperf_diff) | ||
115 | { | ||
116 | unsigned int perf_percent = 0; | ||
117 | if (((unsigned long)(-1) / 100) < aperf_diff) { | ||
118 | int shift_count = 7; | ||
119 | aperf_diff >>= shift_count; | ||
120 | mperf_diff >>= shift_count; | ||
121 | } | ||
122 | perf_percent = (aperf_diff * 100) / mperf_diff; | ||
123 | return (max_frequency * perf_percent) / 100; | ||
124 | } | ||
125 | |||
126 | static int mperf_get_count_percent(unsigned int id, double *percent, | 117 | static int mperf_get_count_percent(unsigned int id, double *percent, |
127 | unsigned int cpu) | 118 | unsigned int cpu) |
128 | { | 119 | { |
129 | unsigned long long aperf_diff, mperf_diff, tsc_diff; | 120 | unsigned long long aperf_diff, mperf_diff, tsc_diff; |
121 | unsigned long long timediff; | ||
130 | 122 | ||
131 | if (!is_valid[cpu]) | 123 | if (!is_valid[cpu]) |
132 | return -1; | 124 | return -1; |
@@ -136,11 +128,19 @@ static int mperf_get_count_percent(unsigned int id, double *percent, | |||
136 | 128 | ||
137 | mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu]; | 129 | mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu]; |
138 | aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu]; | 130 | aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu]; |
139 | tsc_diff = tsc_at_measure_end - tsc_at_measure_start; | ||
140 | 131 | ||
141 | *percent = 100.0 * mperf_diff / tsc_diff; | 132 | if (max_freq_mode == MAX_FREQ_TSC_REF) { |
142 | dprint("%s: mperf_diff: %llu, tsc_diff: %llu\n", | 133 | tsc_diff = tsc_at_measure_end - tsc_at_measure_start; |
143 | mperf_cstates[id].name, mperf_diff, tsc_diff); | 134 | *percent = 100.0 * mperf_diff / tsc_diff; |
135 | dprint("%s: TSC Ref - mperf_diff: %llu, tsc_diff: %llu\n", | ||
136 | mperf_cstates[id].name, mperf_diff, tsc_diff); | ||
137 | } else if (max_freq_mode == MAX_FREQ_SYSFS) { | ||
138 | timediff = timespec_diff_us(time_start, time_end); | ||
139 | *percent = 100.0 * mperf_diff / timediff; | ||
140 | dprint("%s: MAXFREQ - mperf_diff: %llu, time_diff: %llu\n", | ||
141 | mperf_cstates[id].name, mperf_diff, timediff); | ||
142 | } else | ||
143 | return -1; | ||
144 | 144 | ||
145 | if (id == Cx) | 145 | if (id == Cx) |
146 | *percent = 100.0 - *percent; | 146 | *percent = 100.0 - *percent; |
@@ -154,7 +154,7 @@ static int mperf_get_count_percent(unsigned int id, double *percent, | |||
154 | static int mperf_get_count_freq(unsigned int id, unsigned long long *count, | 154 | static int mperf_get_count_freq(unsigned int id, unsigned long long *count, |
155 | unsigned int cpu) | 155 | unsigned int cpu) |
156 | { | 156 | { |
157 | unsigned long long aperf_diff, mperf_diff; | 157 | unsigned long long aperf_diff, mperf_diff, time_diff, tsc_diff; |
158 | 158 | ||
159 | if (id != AVG_FREQ) | 159 | if (id != AVG_FREQ) |
160 | return 1; | 160 | return 1; |
@@ -165,11 +165,21 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count, | |||
165 | mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu]; | 165 | mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu]; |
166 | aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu]; | 166 | aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu]; |
167 | 167 | ||
168 | /* Return MHz for now, might want to return KHz if column width is more | 168 | if (max_freq_mode == MAX_FREQ_TSC_REF) { |
169 | generic */ | 169 | /* Calculate max_freq from TSC count */ |
170 | *count = get_average_perf(aperf_diff, mperf_diff) / 1000; | 170 | tsc_diff = tsc_at_measure_end - tsc_at_measure_start; |
171 | dprint("%s: %llu\n", mperf_cstates[id].name, *count); | 171 | time_diff = timespec_diff_us(time_start, time_end); |
172 | max_frequency = tsc_diff / time_diff; | ||
173 | } | ||
172 | 174 | ||
175 | *count = max_frequency * ((double)aperf_diff / mperf_diff); | ||
176 | dprint("%s: Average freq based on %s maximum frequency:\n", | ||
177 | mperf_cstates[id].name, | ||
178 | (max_freq_mode == MAX_FREQ_TSC_REF) ? "TSC calculated" : "sysfs read"); | ||
179 | dprint("%max_frequency: %lu", max_frequency); | ||
180 | dprint("aperf_diff: %llu\n", aperf_diff); | ||
181 | dprint("mperf_diff: %llu\n", mperf_diff); | ||
182 | dprint("avg freq: %llu\n", *count); | ||
173 | return 0; | 183 | return 0; |
174 | } | 184 | } |
175 | 185 | ||
@@ -178,6 +188,7 @@ static int mperf_start(void) | |||
178 | int cpu; | 188 | int cpu; |
179 | unsigned long long dbg; | 189 | unsigned long long dbg; |
180 | 190 | ||
191 | clock_gettime(CLOCK_REALTIME, &time_start); | ||
181 | mperf_get_tsc(&tsc_at_measure_start); | 192 | mperf_get_tsc(&tsc_at_measure_start); |
182 | 193 | ||
183 | for (cpu = 0; cpu < cpu_count; cpu++) | 194 | for (cpu = 0; cpu < cpu_count; cpu++) |
@@ -193,32 +204,104 @@ static int mperf_stop(void) | |||
193 | unsigned long long dbg; | 204 | unsigned long long dbg; |
194 | int cpu; | 205 | int cpu; |
195 | 206 | ||
196 | mperf_get_tsc(&tsc_at_measure_end); | ||
197 | |||
198 | for (cpu = 0; cpu < cpu_count; cpu++) | 207 | for (cpu = 0; cpu < cpu_count; cpu++) |
199 | mperf_measure_stats(cpu); | 208 | mperf_measure_stats(cpu); |
200 | 209 | ||
210 | mperf_get_tsc(&tsc_at_measure_end); | ||
211 | clock_gettime(CLOCK_REALTIME, &time_end); | ||
212 | |||
201 | mperf_get_tsc(&dbg); | 213 | mperf_get_tsc(&dbg); |
202 | dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end); | 214 | dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end); |
203 | 215 | ||
204 | return 0; | 216 | return 0; |
205 | } | 217 | } |
206 | 218 | ||
207 | struct cpuidle_monitor mperf_monitor; | 219 | /* |
208 | 220 | * Mperf register is defined to tick at P0 (maximum) frequency | |
209 | struct cpuidle_monitor *mperf_register(void) | 221 | * |
222 | * Instead of reading out P0 which can be tricky to read out from HW, | ||
223 | * we use TSC counter if it reliably ticks at P0/mperf frequency. | ||
224 | * | ||
225 | * Still try to fall back to: | ||
226 | * /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq | ||
227 | * on older Intel HW without invariant TSC feature. | ||
228 | * Or on AMD machines where TSC does not tick at P0 (do not exist yet, but | ||
229 | * it's still double checked (MSR_AMD_HWCR)). | ||
230 | * | ||
231 | * On these machines the user would still get useful mperf | ||
232 | * stats when acpi-cpufreq driver is loaded. | ||
233 | */ | ||
234 | static int init_maxfreq_mode(void) | ||
210 | { | 235 | { |
236 | int ret; | ||
237 | unsigned long long hwcr; | ||
211 | unsigned long min; | 238 | unsigned long min; |
212 | 239 | ||
213 | if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_APERF)) | 240 | if (!cpupower_cpu_info.caps & CPUPOWER_CAP_INV_TSC) |
214 | return NULL; | 241 | goto use_sysfs; |
215 | 242 | ||
216 | /* Assume min/max all the same on all cores */ | 243 | if (cpupower_cpu_info.vendor == X86_VENDOR_AMD) { |
244 | /* MSR_AMD_HWCR tells us whether TSC runs at P0/mperf | ||
245 | * freq. | ||
246 | * A test whether hwcr is accessable/available would be: | ||
247 | * (cpupower_cpu_info.family > 0x10 || | ||
248 | * cpupower_cpu_info.family == 0x10 && | ||
249 | * cpupower_cpu_info.model >= 0x2)) | ||
250 | * This should be the case for all aperf/mperf | ||
251 | * capable AMD machines and is therefore safe to test here. | ||
252 | * Compare with Linus kernel git commit: acf01734b1747b1ec4 | ||
253 | */ | ||
254 | ret = read_msr(0, MSR_AMD_HWCR, &hwcr); | ||
255 | /* | ||
256 | * If the MSR read failed, assume a Xen system that did | ||
257 | * not explicitly provide access to it and assume TSC works | ||
258 | */ | ||
259 | if (ret != 0) { | ||
260 | dprint("TSC read 0x%x failed - assume TSC working\n", | ||
261 | MSR_AMD_HWCR); | ||
262 | return 0; | ||
263 | } else if (1 & (hwcr >> 24)) { | ||
264 | max_freq_mode = MAX_FREQ_TSC_REF; | ||
265 | return 0; | ||
266 | } else { /* Use sysfs max frequency if available */ } | ||
267 | } else if (cpupower_cpu_info.vendor == X86_VENDOR_INTEL) { | ||
268 | /* | ||
269 | * On Intel we assume mperf (in C0) is ticking at same | ||
270 | * rate than TSC | ||
271 | */ | ||
272 | max_freq_mode = MAX_FREQ_TSC_REF; | ||
273 | return 0; | ||
274 | } | ||
275 | use_sysfs: | ||
217 | if (cpufreq_get_hardware_limits(0, &min, &max_frequency)) { | 276 | if (cpufreq_get_hardware_limits(0, &min, &max_frequency)) { |
218 | dprint("Cannot retrieve max freq from cpufreq kernel " | 277 | dprint("Cannot retrieve max freq from cpufreq kernel " |
219 | "subsystem\n"); | 278 | "subsystem\n"); |
220 | return NULL; | 279 | return -1; |
221 | } | 280 | } |
281 | max_freq_mode = MAX_FREQ_SYSFS; | ||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | /* | ||
286 | * This monitor provides: | ||
287 | * | ||
288 | * 1) Average frequency a CPU resided in | ||
289 | * This always works if the CPU has aperf/mperf capabilities | ||
290 | * | ||
291 | * 2) C0 and Cx (any sleep state) time a CPU resided in | ||
292 | * Works if mperf timer stops ticking in sleep states which | ||
293 | * seem to be the case on all current HW. | ||
294 | * Both is directly retrieved from HW registers and is independent | ||
295 | * from kernel statistics. | ||
296 | */ | ||
297 | struct cpuidle_monitor mperf_monitor; | ||
298 | struct cpuidle_monitor *mperf_register(void) | ||
299 | { | ||
300 | if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_APERF)) | ||
301 | return NULL; | ||
302 | |||
303 | if (init_maxfreq_mode()) | ||
304 | return NULL; | ||
222 | 305 | ||
223 | /* Free this at program termination */ | 306 | /* Free this at program termination */ |
224 | is_valid = calloc(cpu_count, sizeof(int)); | 307 | is_valid = calloc(cpu_count, sizeof(int)); |