diff options
109 files changed, 2510 insertions, 1985 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci index 5210a51c90fd..a3c5a6685036 100644 --- a/Documentation/ABI/testing/sysfs-bus-pci +++ b/Documentation/ABI/testing/sysfs-bus-pci | |||
@@ -70,18 +70,15 @@ Date: September, 2011 | |||
70 | Contact: Neil Horman <nhorman@tuxdriver.com> | 70 | Contact: Neil Horman <nhorman@tuxdriver.com> |
71 | Description: | 71 | Description: |
72 | The /sys/devices/.../msi_irqs directory contains a variable set | 72 | The /sys/devices/.../msi_irqs directory contains a variable set |
73 | of sub-directories, with each sub-directory being named after a | 73 | of files, with each file being named after a corresponding msi |
74 | corresponding msi irq vector allocated to that device. Each | 74 | irq vector allocated to that device. |
75 | numbered sub-directory N contains attributes of that irq. | ||
76 | Note that this directory is not created for device drivers which | ||
77 | do not support msi irqs | ||
78 | 75 | ||
79 | What: /sys/bus/pci/devices/.../msi_irqs/<N>/mode | 76 | What: /sys/bus/pci/devices/.../msi_irqs/<N> |
80 | Date: September 2011 | 77 | Date: September 2011 |
81 | Contact: Neil Horman <nhorman@tuxdriver.com> | 78 | Contact: Neil Horman <nhorman@tuxdriver.com> |
82 | Description: | 79 | Description: |
83 | This attribute indicates the mode that the irq vector named by | 80 | This attribute indicates the mode that the irq vector named by |
84 | the parent directory is in (msi vs. msix) | 81 | the file is in (msi vs. msix) |
85 | 82 | ||
86 | What: /sys/bus/pci/devices/.../remove | 83 | What: /sys/bus/pci/devices/.../remove |
87 | Date: January 2009 | 84 | Date: January 2009 |
diff --git a/Documentation/PCI/00-INDEX b/Documentation/PCI/00-INDEX index 812b17fe3ed0..147231f1613e 100644 --- a/Documentation/PCI/00-INDEX +++ b/Documentation/PCI/00-INDEX | |||
@@ -2,12 +2,12 @@ | |||
2 | - this file | 2 | - this file |
3 | MSI-HOWTO.txt | 3 | MSI-HOWTO.txt |
4 | - the Message Signaled Interrupts (MSI) Driver Guide HOWTO and FAQ. | 4 | - the Message Signaled Interrupts (MSI) Driver Guide HOWTO and FAQ. |
5 | PCI-DMA-mapping.txt | ||
6 | - info for PCI drivers using DMA portably across all platforms | ||
7 | PCIEBUS-HOWTO.txt | 5 | PCIEBUS-HOWTO.txt |
8 | - a guide describing the PCI Express Port Bus driver | 6 | - a guide describing the PCI Express Port Bus driver |
9 | pci-error-recovery.txt | 7 | pci-error-recovery.txt |
10 | - info on PCI error recovery | 8 | - info on PCI error recovery |
9 | pci-iov-howto.txt | ||
10 | - the PCI Express I/O Virtualization HOWTO | ||
11 | pci.txt | 11 | pci.txt |
12 | - info on the PCI subsystem for device driver authors | 12 | - info on the PCI subsystem for device driver authors |
13 | pcieaer-howto.txt | 13 | pcieaer-howto.txt |
diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index a09178086c30..a8d01005f480 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt | |||
@@ -82,93 +82,111 @@ Most of the hard work is done for the driver in the PCI layer. It simply | |||
82 | has to request that the PCI layer set up the MSI capability for this | 82 | has to request that the PCI layer set up the MSI capability for this |
83 | device. | 83 | device. |
84 | 84 | ||
85 | 4.2.1 pci_enable_msi | 85 | 4.2.1 pci_enable_msi_range |
86 | 86 | ||
87 | int pci_enable_msi(struct pci_dev *dev) | 87 | int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) |
88 | 88 | ||
89 | A successful call allocates ONE interrupt to the device, regardless | 89 | This function allows a device driver to request any number of MSI |
90 | of how many MSIs the device supports. The device is switched from | 90 | interrupts within specified range from 'minvec' to 'maxvec'. |
91 | pin-based interrupt mode to MSI mode. The dev->irq number is changed | ||
92 | to a new number which represents the message signaled interrupt; | ||
93 | consequently, this function should be called before the driver calls | ||
94 | request_irq(), because an MSI is delivered via a vector that is | ||
95 | different from the vector of a pin-based interrupt. | ||
96 | 91 | ||
97 | 4.2.2 pci_enable_msi_block | 92 | If this function returns a positive number it indicates the number of |
93 | MSI interrupts that have been successfully allocated. In this case | ||
94 | the device is switched from pin-based interrupt mode to MSI mode and | ||
95 | updates dev->irq to be the lowest of the new interrupts assigned to it. | ||
96 | The other interrupts assigned to the device are in the range dev->irq | ||
97 | to dev->irq + returned value - 1. Device driver can use the returned | ||
98 | number of successfully allocated MSI interrupts to further allocate | ||
99 | and initialize device resources. | ||
98 | 100 | ||
99 | int pci_enable_msi_block(struct pci_dev *dev, int count) | 101 | If this function returns a negative number, it indicates an error and |
102 | the driver should not attempt to request any more MSI interrupts for | ||
103 | this device. | ||
100 | 104 | ||
101 | This variation on the above call allows a device driver to request multiple | 105 | This function should be called before the driver calls request_irq(), |
102 | MSIs. The MSI specification only allows interrupts to be allocated in | 106 | because MSI interrupts are delivered via vectors that are different |
103 | powers of two, up to a maximum of 2^5 (32). | 107 | from the vector of a pin-based interrupt. |
104 | 108 | ||
105 | If this function returns 0, it has succeeded in allocating at least as many | 109 | It is ideal if drivers can cope with a variable number of MSI interrupts; |
106 | interrupts as the driver requested (it may have allocated more in order | 110 | there are many reasons why the platform may not be able to provide the |
107 | to satisfy the power-of-two requirement). In this case, the function | 111 | exact number that a driver asks for. |
108 | enables MSI on this device and updates dev->irq to be the lowest of | ||
109 | the new interrupts assigned to it. The other interrupts assigned to | ||
110 | the device are in the range dev->irq to dev->irq + count - 1. | ||
111 | 112 | ||
112 | If this function returns a negative number, it indicates an error and | 113 | There could be devices that can not operate with just any number of MSI |
113 | the driver should not attempt to request any more MSI interrupts for | 114 | interrupts within a range. See chapter 4.3.1.3 to get the idea how to |
114 | this device. If this function returns a positive number, it is | 115 | handle such devices for MSI-X - the same logic applies to MSI. |
115 | less than 'count' and indicates the number of interrupts that could have | ||
116 | been allocated. In neither case is the irq value updated or the device | ||
117 | switched into MSI mode. | ||
118 | |||
119 | The device driver must decide what action to take if | ||
120 | pci_enable_msi_block() returns a value less than the number requested. | ||
121 | For instance, the driver could still make use of fewer interrupts; | ||
122 | in this case the driver should call pci_enable_msi_block() | ||
123 | again. Note that it is not guaranteed to succeed, even when the | ||
124 | 'count' has been reduced to the value returned from a previous call to | ||
125 | pci_enable_msi_block(). This is because there are multiple constraints | ||
126 | on the number of vectors that can be allocated; pci_enable_msi_block() | ||
127 | returns as soon as it finds any constraint that doesn't allow the | ||
128 | call to succeed. | ||
129 | |||
130 | 4.2.3 pci_enable_msi_block_auto | ||
131 | |||
132 | int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *count) | ||
133 | |||
134 | This variation on pci_enable_msi() call allows a device driver to request | ||
135 | the maximum possible number of MSIs. The MSI specification only allows | ||
136 | interrupts to be allocated in powers of two, up to a maximum of 2^5 (32). | ||
137 | |||
138 | If this function returns a positive number, it indicates that it has | ||
139 | succeeded and the returned value is the number of allocated interrupts. In | ||
140 | this case, the function enables MSI on this device and updates dev->irq to | ||
141 | be the lowest of the new interrupts assigned to it. The other interrupts | ||
142 | assigned to the device are in the range dev->irq to dev->irq + returned | ||
143 | value - 1. | ||
144 | 116 | ||
145 | If this function returns a negative number, it indicates an error and | 117 | 4.2.1.1 Maximum possible number of MSI interrupts |
146 | the driver should not attempt to request any more MSI interrupts for | 118 | |
147 | this device. | 119 | The typical usage of MSI interrupts is to allocate as many vectors as |
120 | possible, likely up to the limit returned by pci_msi_vec_count() function: | ||
121 | |||
122 | static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec) | ||
123 | { | ||
124 | return pci_enable_msi_range(pdev, 1, nvec); | ||
125 | } | ||
126 | |||
127 | Note the value of 'minvec' parameter is 1. As 'minvec' is inclusive, | ||
128 | the value of 0 would be meaningless and could result in error. | ||
148 | 129 | ||
149 | If the device driver needs to know the number of interrupts the device | 130 | Some devices have a minimal limit on number of MSI interrupts. |
150 | supports it can pass the pointer count where that number is stored. The | 131 | In this case the function could look like this: |
151 | device driver must decide what action to take if pci_enable_msi_block_auto() | ||
152 | succeeds, but returns a value less than the number of interrupts supported. | ||
153 | If the device driver does not need to know the number of interrupts | ||
154 | supported, it can set the pointer count to NULL. | ||
155 | 132 | ||
156 | 4.2.4 pci_disable_msi | 133 | static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec) |
134 | { | ||
135 | return pci_enable_msi_range(pdev, FOO_DRIVER_MINIMUM_NVEC, nvec); | ||
136 | } | ||
137 | |||
138 | 4.2.1.2 Exact number of MSI interrupts | ||
139 | |||
140 | If a driver is unable or unwilling to deal with a variable number of MSI | ||
141 | interrupts it could request a particular number of interrupts by passing | ||
142 | that number to pci_enable_msi_range() function as both 'minvec' and 'maxvec' | ||
143 | parameters: | ||
144 | |||
145 | static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec) | ||
146 | { | ||
147 | return pci_enable_msi_range(pdev, nvec, nvec); | ||
148 | } | ||
149 | |||
150 | 4.2.1.3 Single MSI mode | ||
151 | |||
152 | The most notorious example of the request type described above is | ||
153 | enabling the single MSI mode for a device. It could be done by passing | ||
154 | two 1s as 'minvec' and 'maxvec': | ||
155 | |||
156 | static int foo_driver_enable_single_msi(struct pci_dev *pdev) | ||
157 | { | ||
158 | return pci_enable_msi_range(pdev, 1, 1); | ||
159 | } | ||
160 | |||
161 | 4.2.2 pci_disable_msi | ||
157 | 162 | ||
158 | void pci_disable_msi(struct pci_dev *dev) | 163 | void pci_disable_msi(struct pci_dev *dev) |
159 | 164 | ||
160 | This function should be used to undo the effect of pci_enable_msi() or | 165 | This function should be used to undo the effect of pci_enable_msi_range(). |
161 | pci_enable_msi_block() or pci_enable_msi_block_auto(). Calling it restores | 166 | Calling it restores dev->irq to the pin-based interrupt number and frees |
162 | dev->irq to the pin-based interrupt number and frees the previously | 167 | the previously allocated MSIs. The interrupts may subsequently be assigned |
163 | allocated message signaled interrupt(s). The interrupt may subsequently be | 168 | to another device, so drivers should not cache the value of dev->irq. |
164 | assigned to another device, so drivers should not cache the value of | ||
165 | dev->irq. | ||
166 | 169 | ||
167 | Before calling this function, a device driver must always call free_irq() | 170 | Before calling this function, a device driver must always call free_irq() |
168 | on any interrupt for which it previously called request_irq(). | 171 | on any interrupt for which it previously called request_irq(). |
169 | Failure to do so results in a BUG_ON(), leaving the device with | 172 | Failure to do so results in a BUG_ON(), leaving the device with |
170 | MSI enabled and thus leaking its vector. | 173 | MSI enabled and thus leaking its vector. |
171 | 174 | ||
175 | 4.2.3 pci_msi_vec_count | ||
176 | |||
177 | int pci_msi_vec_count(struct pci_dev *dev) | ||
178 | |||
179 | This function could be used to retrieve the number of MSI vectors the | ||
180 | device requested (via the Multiple Message Capable register). The MSI | ||
181 | specification only allows the returned value to be a power of two, | ||
182 | up to a maximum of 2^5 (32). | ||
183 | |||
184 | If this function returns a negative number, it indicates the device is | ||
185 | not capable of sending MSIs. | ||
186 | |||
187 | If this function returns a positive number, it indicates the maximum | ||
188 | number of MSI interrupt vectors that could be allocated. | ||
189 | |||
172 | 4.3 Using MSI-X | 190 | 4.3 Using MSI-X |
173 | 191 | ||
174 | The MSI-X capability is much more flexible than the MSI capability. | 192 | The MSI-X capability is much more flexible than the MSI capability. |
@@ -188,26 +206,31 @@ in each element of the array to indicate for which entries the kernel | |||
188 | should assign interrupts; it is invalid to fill in two entries with the | 206 | should assign interrupts; it is invalid to fill in two entries with the |
189 | same number. | 207 | same number. |
190 | 208 | ||
191 | 4.3.1 pci_enable_msix | 209 | 4.3.1 pci_enable_msix_range |
192 | 210 | ||
193 | int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) | 211 | int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, |
212 | int minvec, int maxvec) | ||
194 | 213 | ||
195 | Calling this function asks the PCI subsystem to allocate 'nvec' MSIs. | 214 | Calling this function asks the PCI subsystem to allocate any number of |
215 | MSI-X interrupts within specified range from 'minvec' to 'maxvec'. | ||
196 | The 'entries' argument is a pointer to an array of msix_entry structs | 216 | The 'entries' argument is a pointer to an array of msix_entry structs |
197 | which should be at least 'nvec' entries in size. On success, the | 217 | which should be at least 'maxvec' entries in size. |
198 | device is switched into MSI-X mode and the function returns 0. | 218 | |
199 | The 'vector' member in each entry is populated with the interrupt number; | 219 | On success, the device is switched into MSI-X mode and the function |
220 | returns the number of MSI-X interrupts that have been successfully | ||
221 | allocated. In this case the 'vector' member in entries numbered from | ||
222 | 0 to the returned value - 1 is populated with the interrupt number; | ||
200 | the driver should then call request_irq() for each 'vector' that it | 223 | the driver should then call request_irq() for each 'vector' that it |
201 | decides to use. The device driver is responsible for keeping track of the | 224 | decides to use. The device driver is responsible for keeping track of the |
202 | interrupts assigned to the MSI-X vectors so it can free them again later. | 225 | interrupts assigned to the MSI-X vectors so it can free them again later. |
226 | Device driver can use the returned number of successfully allocated MSI-X | ||
227 | interrupts to further allocate and initialize device resources. | ||
203 | 228 | ||
204 | If this function returns a negative number, it indicates an error and | 229 | If this function returns a negative number, it indicates an error and |
205 | the driver should not attempt to allocate any more MSI-X interrupts for | 230 | the driver should not attempt to allocate any more MSI-X interrupts for |
206 | this device. If it returns a positive number, it indicates the maximum | 231 | this device. |
207 | number of interrupt vectors that could have been allocated. See example | ||
208 | below. | ||
209 | 232 | ||
210 | This function, in contrast with pci_enable_msi(), does not adjust | 233 | This function, in contrast with pci_enable_msi_range(), does not adjust |
211 | dev->irq. The device will not generate interrupts for this interrupt | 234 | dev->irq. The device will not generate interrupts for this interrupt |
212 | number once MSI-X is enabled. | 235 | number once MSI-X is enabled. |
213 | 236 | ||
@@ -218,28 +241,103 @@ It is ideal if drivers can cope with a variable number of MSI-X interrupts; | |||
218 | there are many reasons why the platform may not be able to provide the | 241 | there are many reasons why the platform may not be able to provide the |
219 | exact number that a driver asks for. | 242 | exact number that a driver asks for. |
220 | 243 | ||
221 | A request loop to achieve that might look like: | 244 | There could be devices that can not operate with just any number of MSI-X |
245 | interrupts within a range. E.g., an network adapter might need let's say | ||
246 | four vectors per each queue it provides. Therefore, a number of MSI-X | ||
247 | interrupts allocated should be a multiple of four. In this case interface | ||
248 | pci_enable_msix_range() can not be used alone to request MSI-X interrupts | ||
249 | (since it can allocate any number within the range, without any notion of | ||
250 | the multiple of four) and the device driver should master a custom logic | ||
251 | to request the required number of MSI-X interrupts. | ||
252 | |||
253 | 4.3.1.1 Maximum possible number of MSI-X interrupts | ||
254 | |||
255 | The typical usage of MSI-X interrupts is to allocate as many vectors as | ||
256 | possible, likely up to the limit returned by pci_msix_vec_count() function: | ||
222 | 257 | ||
223 | static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec) | 258 | static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec) |
224 | { | 259 | { |
225 | while (nvec >= FOO_DRIVER_MINIMUM_NVEC) { | 260 | return pci_enable_msi_range(adapter->pdev, adapter->msix_entries, |
226 | rc = pci_enable_msix(adapter->pdev, | 261 | 1, nvec); |
227 | adapter->msix_entries, nvec); | 262 | } |
228 | if (rc > 0) | 263 | |
229 | nvec = rc; | 264 | Note the value of 'minvec' parameter is 1. As 'minvec' is inclusive, |
230 | else | 265 | the value of 0 would be meaningless and could result in error. |
231 | return rc; | 266 | |
267 | Some devices have a minimal limit on number of MSI-X interrupts. | ||
268 | In this case the function could look like this: | ||
269 | |||
270 | static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec) | ||
271 | { | ||
272 | return pci_enable_msi_range(adapter->pdev, adapter->msix_entries, | ||
273 | FOO_DRIVER_MINIMUM_NVEC, nvec); | ||
274 | } | ||
275 | |||
276 | 4.3.1.2 Exact number of MSI-X interrupts | ||
277 | |||
278 | If a driver is unable or unwilling to deal with a variable number of MSI-X | ||
279 | interrupts it could request a particular number of interrupts by passing | ||
280 | that number to pci_enable_msix_range() function as both 'minvec' and 'maxvec' | ||
281 | parameters: | ||
282 | |||
283 | static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec) | ||
284 | { | ||
285 | return pci_enable_msi_range(adapter->pdev, adapter->msix_entries, | ||
286 | nvec, nvec); | ||
287 | } | ||
288 | |||
289 | 4.3.1.3 Specific requirements to the number of MSI-X interrupts | ||
290 | |||
291 | As noted above, there could be devices that can not operate with just any | ||
292 | number of MSI-X interrupts within a range. E.g., let's assume a device that | ||
293 | is only capable sending the number of MSI-X interrupts which is a power of | ||
294 | two. A routine that enables MSI-X mode for such device might look like this: | ||
295 | |||
296 | /* | ||
297 | * Assume 'minvec' and 'maxvec' are non-zero | ||
298 | */ | ||
299 | static int foo_driver_enable_msix(struct foo_adapter *adapter, | ||
300 | int minvec, int maxvec) | ||
301 | { | ||
302 | int rc; | ||
303 | |||
304 | minvec = roundup_pow_of_two(minvec); | ||
305 | maxvec = rounddown_pow_of_two(maxvec); | ||
306 | |||
307 | if (minvec > maxvec) | ||
308 | return -ERANGE; | ||
309 | |||
310 | retry: | ||
311 | rc = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, | ||
312 | maxvec, maxvec); | ||
313 | /* | ||
314 | * -ENOSPC is the only error code allowed to be analized | ||
315 | */ | ||
316 | if (rc == -ENOSPC) { | ||
317 | if (maxvec == 1) | ||
318 | return -ENOSPC; | ||
319 | |||
320 | maxvec /= 2; | ||
321 | |||
322 | if (minvec > maxvec) | ||
323 | return -ENOSPC; | ||
324 | |||
325 | goto retry; | ||
232 | } | 326 | } |
233 | 327 | ||
234 | return -ENOSPC; | 328 | return rc; |
235 | } | 329 | } |
236 | 330 | ||
331 | Note how pci_enable_msix_range() return value is analized for a fallback - | ||
332 | any error code other than -ENOSPC indicates a fatal error and should not | ||
333 | be retried. | ||
334 | |||
237 | 4.3.2 pci_disable_msix | 335 | 4.3.2 pci_disable_msix |
238 | 336 | ||
239 | void pci_disable_msix(struct pci_dev *dev) | 337 | void pci_disable_msix(struct pci_dev *dev) |
240 | 338 | ||
241 | This function should be used to undo the effect of pci_enable_msix(). It frees | 339 | This function should be used to undo the effect of pci_enable_msix_range(). |
242 | the previously allocated message signaled interrupts. The interrupts may | 340 | It frees the previously allocated MSI-X interrupts. The interrupts may |
243 | subsequently be assigned to another device, so drivers should not cache | 341 | subsequently be assigned to another device, so drivers should not cache |
244 | the value of the 'vector' elements over a call to pci_disable_msix(). | 342 | the value of the 'vector' elements over a call to pci_disable_msix(). |
245 | 343 | ||
@@ -255,18 +353,32 @@ MSI-X Table. This address is mapped by the PCI subsystem, and should not | |||
255 | be accessed directly by the device driver. If the driver wishes to | 353 | be accessed directly by the device driver. If the driver wishes to |
256 | mask or unmask an interrupt, it should call disable_irq() / enable_irq(). | 354 | mask or unmask an interrupt, it should call disable_irq() / enable_irq(). |
257 | 355 | ||
356 | 4.3.4 pci_msix_vec_count | ||
357 | |||
358 | int pci_msix_vec_count(struct pci_dev *dev) | ||
359 | |||
360 | This function could be used to retrieve number of entries in the device | ||
361 | MSI-X table. | ||
362 | |||
363 | If this function returns a negative number, it indicates the device is | ||
364 | not capable of sending MSI-Xs. | ||
365 | |||
366 | If this function returns a positive number, it indicates the maximum | ||
367 | number of MSI-X interrupt vectors that could be allocated. | ||
368 | |||
258 | 4.4 Handling devices implementing both MSI and MSI-X capabilities | 369 | 4.4 Handling devices implementing both MSI and MSI-X capabilities |
259 | 370 | ||
260 | If a device implements both MSI and MSI-X capabilities, it can | 371 | If a device implements both MSI and MSI-X capabilities, it can |
261 | run in either MSI mode or MSI-X mode, but not both simultaneously. | 372 | run in either MSI mode or MSI-X mode, but not both simultaneously. |
262 | This is a requirement of the PCI spec, and it is enforced by the | 373 | This is a requirement of the PCI spec, and it is enforced by the |
263 | PCI layer. Calling pci_enable_msi() when MSI-X is already enabled or | 374 | PCI layer. Calling pci_enable_msi_range() when MSI-X is already |
264 | pci_enable_msix() when MSI is already enabled results in an error. | 375 | enabled or pci_enable_msix_range() when MSI is already enabled |
265 | If a device driver wishes to switch between MSI and MSI-X at runtime, | 376 | results in an error. If a device driver wishes to switch between MSI |
266 | it must first quiesce the device, then switch it back to pin-interrupt | 377 | and MSI-X at runtime, it must first quiesce the device, then switch |
267 | mode, before calling pci_enable_msi() or pci_enable_msix() and resuming | 378 | it back to pin-interrupt mode, before calling pci_enable_msi_range() |
268 | operation. This is not expected to be a common operation but may be | 379 | or pci_enable_msix_range() and resuming operation. This is not expected |
269 | useful for debugging or testing during development. | 380 | to be a common operation but may be useful for debugging or testing |
381 | during development. | ||
270 | 382 | ||
271 | 4.5 Considerations when using MSIs | 383 | 4.5 Considerations when using MSIs |
272 | 384 | ||
@@ -381,5 +493,5 @@ or disabled (0). If 0 is found in any of the msi_bus files belonging | |||
381 | to bridges between the PCI root and the device, MSIs are disabled. | 493 | to bridges between the PCI root and the device, MSIs are disabled. |
382 | 494 | ||
383 | It is also worth checking the device driver to see whether it supports MSIs. | 495 | It is also worth checking the device driver to see whether it supports MSIs. |
384 | For example, it may contain calls to pci_enable_msi(), pci_enable_msix() or | 496 | For example, it may contain calls to pci_enable_msi_range() or |
385 | pci_enable_msi_block(). | 497 | pci_enable_msix_range(). |
diff --git a/Documentation/PCI/pci.txt b/Documentation/PCI/pci.txt index 6f458564d625..9518006f6675 100644 --- a/Documentation/PCI/pci.txt +++ b/Documentation/PCI/pci.txt | |||
@@ -123,8 +123,10 @@ initialization with a pointer to a structure describing the driver | |||
123 | 123 | ||
124 | 124 | ||
125 | The ID table is an array of struct pci_device_id entries ending with an | 125 | The ID table is an array of struct pci_device_id entries ending with an |
126 | all-zero entry; use of the macro DEFINE_PCI_DEVICE_TABLE is the preferred | 126 | all-zero entry. Definitions with static const are generally preferred. |
127 | method of declaring the table. Each entry consists of: | 127 | Use of the deprecated macro DEFINE_PCI_DEVICE_TABLE should be avoided. |
128 | |||
129 | Each entry consists of: | ||
128 | 130 | ||
129 | vendor,device Vendor and device ID to match (or PCI_ANY_ID) | 131 | vendor,device Vendor and device ID to match (or PCI_ANY_ID) |
130 | 132 | ||
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt index d5d26d443693..d6fae13ff062 100644 --- a/Documentation/devicetree/bindings/pci/designware-pcie.txt +++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt | |||
@@ -19,6 +19,8 @@ Required properties: | |||
19 | to define the mapping of the PCIe interface to interrupt | 19 | to define the mapping of the PCIe interface to interrupt |
20 | numbers. | 20 | numbers. |
21 | - num-lanes: number of lanes to use | 21 | - num-lanes: number of lanes to use |
22 | |||
23 | Optional properties: | ||
22 | - reset-gpio: gpio pin number of power good signal | 24 | - reset-gpio: gpio pin number of power good signal |
23 | 25 | ||
24 | Optional properties for fsl,imx6q-pcie | 26 | Optional properties for fsl,imx6q-pcie |
diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c index 2b183b0d3207..99e8d4796c96 100644 --- a/arch/alpha/kernel/pci-sysfs.c +++ b/arch/alpha/kernel/pci-sysfs.c | |||
@@ -83,7 +83,7 @@ static int pci_mmap_resource(struct kobject *kobj, | |||
83 | if (iomem_is_exclusive(res->start)) | 83 | if (iomem_is_exclusive(res->start)) |
84 | return -EINVAL; | 84 | return -EINVAL; |
85 | 85 | ||
86 | pcibios_resource_to_bus(pdev, &bar, res); | 86 | pcibios_resource_to_bus(pdev->bus, &bar, res); |
87 | vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0)); | 87 | vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0)); |
88 | mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; | 88 | mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; |
89 | 89 | ||
@@ -139,7 +139,7 @@ static int sparse_mem_mmap_fits(struct pci_dev *pdev, int num) | |||
139 | long dense_offset; | 139 | long dense_offset; |
140 | unsigned long sparse_size; | 140 | unsigned long sparse_size; |
141 | 141 | ||
142 | pcibios_resource_to_bus(pdev, &bar, &pdev->resource[num]); | 142 | pcibios_resource_to_bus(pdev->bus, &bar, &pdev->resource[num]); |
143 | 143 | ||
144 | /* All core logic chips have 4G sparse address space, except | 144 | /* All core logic chips have 4G sparse address space, except |
145 | CIA which has 16G (see xxx_SPARSE_MEM and xxx_DENSE_MEM | 145 | CIA which has 16G (see xxx_SPARSE_MEM and xxx_DENSE_MEM |
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index a21d0ab3b19e..eddee7720343 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c | |||
@@ -325,7 +325,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, | |||
325 | /* Helper for generic DMA-mapping functions. */ | 325 | /* Helper for generic DMA-mapping functions. */ |
326 | static struct pci_dev *alpha_gendev_to_pci(struct device *dev) | 326 | static struct pci_dev *alpha_gendev_to_pci(struct device *dev) |
327 | { | 327 | { |
328 | if (dev && dev->bus == &pci_bus_type) | 328 | if (dev && dev_is_pci(dev)) |
329 | return to_pci_dev(dev); | 329 | return to_pci_dev(dev); |
330 | 330 | ||
331 | /* Assume that non-PCI devices asking for DMA are either ISA or EISA, | 331 | /* Assume that non-PCI devices asking for DMA are either ISA or EISA, |
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 001f4913799c..5114b68e99d5 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c | |||
@@ -257,7 +257,7 @@ static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t s | |||
257 | */ | 257 | */ |
258 | static int it8152_pci_platform_notify(struct device *dev) | 258 | static int it8152_pci_platform_notify(struct device *dev) |
259 | { | 259 | { |
260 | if (dev->bus == &pci_bus_type) { | 260 | if (dev_is_pci(dev)) { |
261 | if (dev->dma_mask) | 261 | if (dev->dma_mask) |
262 | *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; | 262 | *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; |
263 | dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; | 263 | dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; |
@@ -268,7 +268,7 @@ static int it8152_pci_platform_notify(struct device *dev) | |||
268 | 268 | ||
269 | static int it8152_pci_platform_notify_remove(struct device *dev) | 269 | static int it8152_pci_platform_notify_remove(struct device *dev) |
270 | { | 270 | { |
271 | if (dev->bus == &pci_bus_type) | 271 | if (dev_is_pci(dev)) |
272 | dmabounce_unregister_dev(dev); | 272 | dmabounce_unregister_dev(dev); |
273 | 273 | ||
274 | return 0; | 274 | return 0; |
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index 6d6bde3e15fa..200970d56f6d 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c | |||
@@ -326,7 +326,7 @@ static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t s | |||
326 | */ | 326 | */ |
327 | static int ixp4xx_pci_platform_notify(struct device *dev) | 327 | static int ixp4xx_pci_platform_notify(struct device *dev) |
328 | { | 328 | { |
329 | if(dev->bus == &pci_bus_type) { | 329 | if (dev_is_pci(dev)) { |
330 | *dev->dma_mask = SZ_64M - 1; | 330 | *dev->dma_mask = SZ_64M - 1; |
331 | dev->coherent_dma_mask = SZ_64M - 1; | 331 | dev->coherent_dma_mask = SZ_64M - 1; |
332 | dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce); | 332 | dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce); |
@@ -336,9 +336,9 @@ static int ixp4xx_pci_platform_notify(struct device *dev) | |||
336 | 336 | ||
337 | static int ixp4xx_pci_platform_notify_remove(struct device *dev) | 337 | static int ixp4xx_pci_platform_notify_remove(struct device *dev) |
338 | { | 338 | { |
339 | if(dev->bus == &pci_bus_type) { | 339 | if (dev_is_pci(dev)) |
340 | dmabounce_unregister_dev(dev); | 340 | dmabounce_unregister_dev(dev); |
341 | } | 341 | |
342 | return 0; | 342 | return 0; |
343 | } | 343 | } |
344 | 344 | ||
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 4c530a82fc46..8e858b593e4f 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -255,7 +255,7 @@ static u64 prefetch_spill_page; | |||
255 | #endif | 255 | #endif |
256 | 256 | ||
257 | #ifdef CONFIG_PCI | 257 | #ifdef CONFIG_PCI |
258 | # define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \ | 258 | # define GET_IOC(dev) ((dev_is_pci(dev)) \ |
259 | ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL) | 259 | ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL) |
260 | #else | 260 | #else |
261 | # define GET_IOC(dev) NULL | 261 | # define GET_IOC(dev) NULL |
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 3290d6e00c31..d0853e8e8623 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
@@ -34,7 +34,7 @@ | |||
34 | */ | 34 | */ |
35 | static int sn_dma_supported(struct device *dev, u64 mask) | 35 | static int sn_dma_supported(struct device *dev, u64 mask) |
36 | { | 36 | { |
37 | BUG_ON(dev->bus != &pci_bus_type); | 37 | BUG_ON(!dev_is_pci(dev)); |
38 | 38 | ||
39 | if (mask < 0x7fffffff) | 39 | if (mask < 0x7fffffff) |
40 | return 0; | 40 | return 0; |
@@ -50,7 +50,7 @@ static int sn_dma_supported(struct device *dev, u64 mask) | |||
50 | */ | 50 | */ |
51 | int sn_dma_set_mask(struct device *dev, u64 dma_mask) | 51 | int sn_dma_set_mask(struct device *dev, u64 dma_mask) |
52 | { | 52 | { |
53 | BUG_ON(dev->bus != &pci_bus_type); | 53 | BUG_ON(!dev_is_pci(dev)); |
54 | 54 | ||
55 | if (!sn_dma_supported(dev, dma_mask)) | 55 | if (!sn_dma_supported(dev, dma_mask)) |
56 | return 0; | 56 | return 0; |
@@ -85,7 +85,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, | |||
85 | struct pci_dev *pdev = to_pci_dev(dev); | 85 | struct pci_dev *pdev = to_pci_dev(dev); |
86 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 86 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
87 | 87 | ||
88 | BUG_ON(dev->bus != &pci_bus_type); | 88 | BUG_ON(!dev_is_pci(dev)); |
89 | 89 | ||
90 | /* | 90 | /* |
91 | * Allocate the memory. | 91 | * Allocate the memory. |
@@ -143,7 +143,7 @@ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr | |||
143 | struct pci_dev *pdev = to_pci_dev(dev); | 143 | struct pci_dev *pdev = to_pci_dev(dev); |
144 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 144 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
145 | 145 | ||
146 | BUG_ON(dev->bus != &pci_bus_type); | 146 | BUG_ON(!dev_is_pci(dev)); |
147 | 147 | ||
148 | provider->dma_unmap(pdev, dma_handle, 0); | 148 | provider->dma_unmap(pdev, dma_handle, 0); |
149 | free_pages((unsigned long)cpu_addr, get_order(size)); | 149 | free_pages((unsigned long)cpu_addr, get_order(size)); |
@@ -187,7 +187,7 @@ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, | |||
187 | 187 | ||
188 | dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); | 188 | dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); |
189 | 189 | ||
190 | BUG_ON(dev->bus != &pci_bus_type); | 190 | BUG_ON(!dev_is_pci(dev)); |
191 | 191 | ||
192 | phys_addr = __pa(cpu_addr); | 192 | phys_addr = __pa(cpu_addr); |
193 | if (dmabarr) | 193 | if (dmabarr) |
@@ -223,7 +223,7 @@ static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
223 | struct pci_dev *pdev = to_pci_dev(dev); | 223 | struct pci_dev *pdev = to_pci_dev(dev); |
224 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 224 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
225 | 225 | ||
226 | BUG_ON(dev->bus != &pci_bus_type); | 226 | BUG_ON(!dev_is_pci(dev)); |
227 | 227 | ||
228 | provider->dma_unmap(pdev, dma_addr, dir); | 228 | provider->dma_unmap(pdev, dma_addr, dir); |
229 | } | 229 | } |
@@ -247,7 +247,7 @@ static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | |||
247 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 247 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
248 | struct scatterlist *sg; | 248 | struct scatterlist *sg; |
249 | 249 | ||
250 | BUG_ON(dev->bus != &pci_bus_type); | 250 | BUG_ON(!dev_is_pci(dev)); |
251 | 251 | ||
252 | for_each_sg(sgl, sg, nhwentries, i) { | 252 | for_each_sg(sgl, sg, nhwentries, i) { |
253 | provider->dma_unmap(pdev, sg->dma_address, dir); | 253 | provider->dma_unmap(pdev, sg->dma_address, dir); |
@@ -284,7 +284,7 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, | |||
284 | 284 | ||
285 | dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); | 285 | dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); |
286 | 286 | ||
287 | BUG_ON(dev->bus != &pci_bus_type); | 287 | BUG_ON(!dev_is_pci(dev)); |
288 | 288 | ||
289 | /* | 289 | /* |
290 | * Setup a DMA address for each entry in the scatterlist. | 290 | * Setup a DMA address for each entry in the scatterlist. |
@@ -323,26 +323,26 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, | |||
323 | static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 323 | static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
324 | size_t size, enum dma_data_direction dir) | 324 | size_t size, enum dma_data_direction dir) |
325 | { | 325 | { |
326 | BUG_ON(dev->bus != &pci_bus_type); | 326 | BUG_ON(!dev_is_pci(dev)); |
327 | } | 327 | } |
328 | 328 | ||
329 | static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | 329 | static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, |
330 | size_t size, | 330 | size_t size, |
331 | enum dma_data_direction dir) | 331 | enum dma_data_direction dir) |
332 | { | 332 | { |
333 | BUG_ON(dev->bus != &pci_bus_type); | 333 | BUG_ON(!dev_is_pci(dev)); |
334 | } | 334 | } |
335 | 335 | ||
336 | static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | 336 | static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
337 | int nelems, enum dma_data_direction dir) | 337 | int nelems, enum dma_data_direction dir) |
338 | { | 338 | { |
339 | BUG_ON(dev->bus != &pci_bus_type); | 339 | BUG_ON(!dev_is_pci(dev)); |
340 | } | 340 | } |
341 | 341 | ||
342 | static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 342 | static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
343 | int nelems, enum dma_data_direction dir) | 343 | int nelems, enum dma_data_direction dir) |
344 | { | 344 | { |
345 | BUG_ON(dev->bus != &pci_bus_type); | 345 | BUG_ON(!dev_is_pci(dev)); |
346 | } | 346 | } |
347 | 347 | ||
348 | static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 348 | static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 14285caec71a..dba508fe1683 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c | |||
@@ -282,18 +282,6 @@ find_pa_parent_type(const struct parisc_device *padev, int type) | |||
282 | return NULL; | 282 | return NULL; |
283 | } | 283 | } |
284 | 284 | ||
285 | #ifdef CONFIG_PCI | ||
286 | static inline int is_pci_dev(struct device *dev) | ||
287 | { | ||
288 | return dev->bus == &pci_bus_type; | ||
289 | } | ||
290 | #else | ||
291 | static inline int is_pci_dev(struct device *dev) | ||
292 | { | ||
293 | return 0; | ||
294 | } | ||
295 | #endif | ||
296 | |||
297 | /* | 285 | /* |
298 | * get_node_path fills in @path with the firmware path to the device. | 286 | * get_node_path fills in @path with the firmware path to the device. |
299 | * Note that if @node is a parisc device, we don't fill in the 'mod' field. | 287 | * Note that if @node is a parisc device, we don't fill in the 'mod' field. |
@@ -306,7 +294,7 @@ static void get_node_path(struct device *dev, struct hardware_path *path) | |||
306 | int i = 5; | 294 | int i = 5; |
307 | memset(&path->bc, -1, 6); | 295 | memset(&path->bc, -1, 6); |
308 | 296 | ||
309 | if (is_pci_dev(dev)) { | 297 | if (dev_is_pci(dev)) { |
310 | unsigned int devfn = to_pci_dev(dev)->devfn; | 298 | unsigned int devfn = to_pci_dev(dev)->devfn; |
311 | path->mod = PCI_FUNC(devfn); | 299 | path->mod = PCI_FUNC(devfn); |
312 | path->bc[i--] = PCI_SLOT(devfn); | 300 | path->bc[i--] = PCI_SLOT(devfn); |
@@ -314,7 +302,7 @@ static void get_node_path(struct device *dev, struct hardware_path *path) | |||
314 | } | 302 | } |
315 | 303 | ||
316 | while (dev != &root) { | 304 | while (dev != &root) { |
317 | if (is_pci_dev(dev)) { | 305 | if (dev_is_pci(dev)) { |
318 | unsigned int devfn = to_pci_dev(dev)->devfn; | 306 | unsigned int devfn = to_pci_dev(dev)->devfn; |
319 | path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5); | 307 | path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5); |
320 | } else if (dev->bus == &parisc_bus_type) { | 308 | } else if (dev->bus == &parisc_bus_type) { |
@@ -695,7 +683,7 @@ static int check_parent(struct device * dev, void * data) | |||
695 | if (dev->bus == &parisc_bus_type) { | 683 | if (dev->bus == &parisc_bus_type) { |
696 | if (match_parisc_device(dev, d->index, d->modpath)) | 684 | if (match_parisc_device(dev, d->index, d->modpath)) |
697 | d->dev = dev; | 685 | d->dev = dev; |
698 | } else if (is_pci_dev(dev)) { | 686 | } else if (dev_is_pci(dev)) { |
699 | if (match_pci_device(dev, d->index, d->modpath)) | 687 | if (match_pci_device(dev, d->index, d->modpath)) |
700 | d->dev = dev; | 688 | d->dev = dev; |
701 | } else if (dev->bus == NULL) { | 689 | } else if (dev->bus == NULL) { |
@@ -753,7 +741,7 @@ struct device *hwpath_to_device(struct hardware_path *modpath) | |||
753 | if (!parent) | 741 | if (!parent) |
754 | return NULL; | 742 | return NULL; |
755 | } | 743 | } |
756 | if (is_pci_dev(parent)) /* pci devices already parse MOD */ | 744 | if (dev_is_pci(parent)) /* pci devices already parse MOD */ |
757 | return parent; | 745 | return parent; |
758 | else | 746 | else |
759 | return parse_tree_node(parent, 6, modpath); | 747 | return parse_tree_node(parent, 6, modpath); |
@@ -772,7 +760,7 @@ void device_to_hwpath(struct device *dev, struct hardware_path *path) | |||
772 | padev = to_parisc_device(dev); | 760 | padev = to_parisc_device(dev); |
773 | get_node_path(dev->parent, path); | 761 | get_node_path(dev->parent, path); |
774 | path->mod = padev->hw_path; | 762 | path->mod = padev->hw_path; |
775 | } else if (is_pci_dev(dev)) { | 763 | } else if (dev_is_pci(dev)) { |
776 | get_node_path(dev, path); | 764 | get_node_path(dev, path); |
777 | } | 765 | } |
778 | } | 766 | } |
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 36bed5a12750..c17f90d0f73c 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c | |||
@@ -369,7 +369,9 @@ static void *eeh_rmv_device(void *data, void *userdata) | |||
369 | edev->mode |= EEH_DEV_DISCONNECTED; | 369 | edev->mode |= EEH_DEV_DISCONNECTED; |
370 | (*removed)++; | 370 | (*removed)++; |
371 | 371 | ||
372 | pci_lock_rescan_remove(); | ||
372 | pci_stop_and_remove_bus_device(dev); | 373 | pci_stop_and_remove_bus_device(dev); |
374 | pci_unlock_rescan_remove(); | ||
373 | 375 | ||
374 | return NULL; | 376 | return NULL; |
375 | } | 377 | } |
@@ -416,10 +418,13 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) | |||
416 | * into pcibios_add_pci_devices(). | 418 | * into pcibios_add_pci_devices(). |
417 | */ | 419 | */ |
418 | eeh_pe_state_mark(pe, EEH_PE_KEEP); | 420 | eeh_pe_state_mark(pe, EEH_PE_KEEP); |
419 | if (bus) | 421 | if (bus) { |
422 | pci_lock_rescan_remove(); | ||
420 | pcibios_remove_pci_devices(bus); | 423 | pcibios_remove_pci_devices(bus); |
421 | else if (frozen_bus) | 424 | pci_unlock_rescan_remove(); |
425 | } else if (frozen_bus) { | ||
422 | eeh_pe_dev_traverse(pe, eeh_rmv_device, &removed); | 426 | eeh_pe_dev_traverse(pe, eeh_rmv_device, &removed); |
427 | } | ||
423 | 428 | ||
424 | /* Reset the pci controller. (Asserts RST#; resets config space). | 429 | /* Reset the pci controller. (Asserts RST#; resets config space). |
425 | * Reconfigure bridges and devices. Don't try to bring the system | 430 | * Reconfigure bridges and devices. Don't try to bring the system |
@@ -429,6 +434,8 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) | |||
429 | if (rc) | 434 | if (rc) |
430 | return rc; | 435 | return rc; |
431 | 436 | ||
437 | pci_lock_rescan_remove(); | ||
438 | |||
432 | /* Restore PE */ | 439 | /* Restore PE */ |
433 | eeh_ops->configure_bridge(pe); | 440 | eeh_ops->configure_bridge(pe); |
434 | eeh_pe_restore_bars(pe); | 441 | eeh_pe_restore_bars(pe); |
@@ -462,6 +469,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) | |||
462 | pe->tstamp = tstamp; | 469 | pe->tstamp = tstamp; |
463 | pe->freeze_count = cnt; | 470 | pe->freeze_count = cnt; |
464 | 471 | ||
472 | pci_unlock_rescan_remove(); | ||
465 | return 0; | 473 | return 0; |
466 | } | 474 | } |
467 | 475 | ||
@@ -618,8 +626,11 @@ perm_error: | |||
618 | eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); | 626 | eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); |
619 | 627 | ||
620 | /* Shut down the device drivers for good. */ | 628 | /* Shut down the device drivers for good. */ |
621 | if (frozen_bus) | 629 | if (frozen_bus) { |
630 | pci_lock_rescan_remove(); | ||
622 | pcibios_remove_pci_devices(frozen_bus); | 631 | pcibios_remove_pci_devices(frozen_bus); |
632 | pci_unlock_rescan_remove(); | ||
633 | } | ||
623 | } | 634 | } |
624 | 635 | ||
625 | static void eeh_handle_special_event(void) | 636 | static void eeh_handle_special_event(void) |
@@ -692,6 +703,7 @@ static void eeh_handle_special_event(void) | |||
692 | if (rc == 2 || rc == 1) | 703 | if (rc == 2 || rc == 1) |
693 | eeh_handle_normal_event(pe); | 704 | eeh_handle_normal_event(pe); |
694 | else { | 705 | else { |
706 | pci_lock_rescan_remove(); | ||
695 | list_for_each_entry_safe(hose, tmp, | 707 | list_for_each_entry_safe(hose, tmp, |
696 | &hose_list, list_node) { | 708 | &hose_list, list_node) { |
697 | phb_pe = eeh_phb_pe_get(hose); | 709 | phb_pe = eeh_phb_pe_get(hose); |
@@ -703,6 +715,7 @@ static void eeh_handle_special_event(void) | |||
703 | eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); | 715 | eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); |
704 | pcibios_remove_pci_devices(bus); | 716 | pcibios_remove_pci_devices(bus); |
705 | } | 717 | } |
718 | pci_unlock_rescan_remove(); | ||
706 | } | 719 | } |
707 | } | 720 | } |
708 | 721 | ||
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index a1e3e40ca3fd..d9476c1fc959 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -835,7 +835,7 @@ static void pcibios_fixup_resources(struct pci_dev *dev) | |||
835 | * at 0 as unset as well, except if PCI_PROBE_ONLY is also set | 835 | * at 0 as unset as well, except if PCI_PROBE_ONLY is also set |
836 | * since in that case, we don't want to re-assign anything | 836 | * since in that case, we don't want to re-assign anything |
837 | */ | 837 | */ |
838 | pcibios_resource_to_bus(dev, ®, res); | 838 | pcibios_resource_to_bus(dev->bus, ®, res); |
839 | if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) || | 839 | if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) || |
840 | (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) { | 840 | (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) { |
841 | /* Only print message if not re-assigning */ | 841 | /* Only print message if not re-assigning */ |
@@ -886,7 +886,7 @@ static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus, | |||
886 | 886 | ||
887 | /* Job is a bit different between memory and IO */ | 887 | /* Job is a bit different between memory and IO */ |
888 | if (res->flags & IORESOURCE_MEM) { | 888 | if (res->flags & IORESOURCE_MEM) { |
889 | pcibios_resource_to_bus(dev, ®ion, res); | 889 | pcibios_resource_to_bus(dev->bus, ®ion, res); |
890 | 890 | ||
891 | /* If the BAR is non-0 then it's probably been initialized */ | 891 | /* If the BAR is non-0 then it's probably been initialized */ |
892 | if (region.start != 0) | 892 | if (region.start != 0) |
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index ac0b034f9ae0..83c26d829991 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c | |||
@@ -111,7 +111,7 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev) | |||
111 | res->name = pci_name(dev); | 111 | res->name = pci_name(dev); |
112 | region.start = base; | 112 | region.start = base; |
113 | region.end = base + size - 1; | 113 | region.end = base + size - 1; |
114 | pcibios_bus_to_resource(dev, res, ®ion); | 114 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
115 | } | 115 | } |
116 | } | 116 | } |
117 | 117 | ||
@@ -280,7 +280,7 @@ void of_scan_pci_bridge(struct pci_dev *dev) | |||
280 | res->flags = flags; | 280 | res->flags = flags; |
281 | region.start = of_read_number(&ranges[1], 2); | 281 | region.start = of_read_number(&ranges[1], 2); |
282 | region.end = region.start + size - 1; | 282 | region.end = region.start + size - 1; |
283 | pcibios_bus_to_resource(dev, res, ®ion); | 283 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
284 | } | 284 | } |
285 | sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), | 285 | sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), |
286 | bus->number); | 286 | bus->number); |
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 0820362c7b0f..66670ff262a0 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c | |||
@@ -407,8 +407,8 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
407 | struct msi_msg msg; | 407 | struct msi_msg msg; |
408 | int rc; | 408 | int rc; |
409 | 409 | ||
410 | if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI) | 410 | if (type == PCI_CAP_ID_MSI && nvec > 1) |
411 | return -EINVAL; | 411 | return 1; |
412 | msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX); | 412 | msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX); |
413 | msi_vecs = min_t(unsigned int, msi_vecs, CONFIG_PCI_NR_MSI); | 413 | msi_vecs = min_t(unsigned int, msi_vecs, CONFIG_PCI_NR_MSI); |
414 | 414 | ||
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index cb021453de2a..7de8d1f590b7 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c | |||
@@ -392,7 +392,7 @@ static void apb_fake_ranges(struct pci_dev *dev, | |||
392 | res->flags = IORESOURCE_IO; | 392 | res->flags = IORESOURCE_IO; |
393 | region.start = (first << 21); | 393 | region.start = (first << 21); |
394 | region.end = (last << 21) + ((1 << 21) - 1); | 394 | region.end = (last << 21) + ((1 << 21) - 1); |
395 | pcibios_bus_to_resource(dev, res, ®ion); | 395 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
396 | 396 | ||
397 | pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map); | 397 | pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map); |
398 | apb_calc_first_last(map, &first, &last); | 398 | apb_calc_first_last(map, &first, &last); |
@@ -400,7 +400,7 @@ static void apb_fake_ranges(struct pci_dev *dev, | |||
400 | res->flags = IORESOURCE_MEM; | 400 | res->flags = IORESOURCE_MEM; |
401 | region.start = (first << 29); | 401 | region.start = (first << 29); |
402 | region.end = (last << 29) + ((1 << 29) - 1); | 402 | region.end = (last << 29) + ((1 << 29) - 1); |
403 | pcibios_bus_to_resource(dev, res, ®ion); | 403 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
404 | } | 404 | } |
405 | 405 | ||
406 | static void pci_of_scan_bus(struct pci_pbm_info *pbm, | 406 | static void pci_of_scan_bus(struct pci_pbm_info *pbm, |
@@ -491,7 +491,7 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm, | |||
491 | res->flags = flags; | 491 | res->flags = flags; |
492 | region.start = GET_64BIT(ranges, 1); | 492 | region.start = GET_64BIT(ranges, 1); |
493 | region.end = region.start + size - 1; | 493 | region.end = region.start + size - 1; |
494 | pcibios_bus_to_resource(dev, res, ®ion); | 494 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
495 | } | 495 | } |
496 | after_ranges: | 496 | after_ranges: |
497 | sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), | 497 | sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 947b5c417e83..1ac6114c9ea5 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -104,7 +104,7 @@ extern void pci_iommu_alloc(void); | |||
104 | struct msi_desc; | 104 | struct msi_desc; |
105 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); | 105 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); |
106 | void native_teardown_msi_irq(unsigned int irq); | 106 | void native_teardown_msi_irq(unsigned int irq); |
107 | void native_restore_msi_irqs(struct pci_dev *dev, int irq); | 107 | void native_restore_msi_irqs(struct pci_dev *dev); |
108 | int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, | 108 | int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, |
109 | unsigned int irq_base, unsigned int irq_offset); | 109 | unsigned int irq_base, unsigned int irq_offset); |
110 | #else | 110 | #else |
@@ -125,7 +125,6 @@ int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, | |||
125 | 125 | ||
126 | /* generic pci stuff */ | 126 | /* generic pci stuff */ |
127 | #include <asm-generic/pci.h> | 127 | #include <asm-generic/pci.h> |
128 | #define PCIBIOS_MAX_MEM_32 0xffffffff | ||
129 | 128 | ||
130 | #ifdef CONFIG_NUMA | 129 | #ifdef CONFIG_NUMA |
131 | /* Returns the node based on pci bus */ | 130 | /* Returns the node based on pci bus */ |
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 0f1be11e43d2..e45e4da96bf1 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -181,7 +181,7 @@ struct x86_msi_ops { | |||
181 | u8 hpet_id); | 181 | u8 hpet_id); |
182 | void (*teardown_msi_irq)(unsigned int irq); | 182 | void (*teardown_msi_irq)(unsigned int irq); |
183 | void (*teardown_msi_irqs)(struct pci_dev *dev); | 183 | void (*teardown_msi_irqs)(struct pci_dev *dev); |
184 | void (*restore_msi_irqs)(struct pci_dev *dev, int irq); | 184 | void (*restore_msi_irqs)(struct pci_dev *dev); |
185 | int (*setup_hpet_msi)(unsigned int irq, unsigned int id); | 185 | int (*setup_hpet_msi)(unsigned int irq, unsigned int id); |
186 | u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag); | 186 | u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag); |
187 | u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag); | 187 | u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag); |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 6c0b43bd024b..d359d0fffa50 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -1034,9 +1034,7 @@ static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger, | |||
1034 | 1034 | ||
1035 | if (!acpi_ioapic) | 1035 | if (!acpi_ioapic) |
1036 | return 0; | 1036 | return 0; |
1037 | if (!dev) | 1037 | if (!dev || !dev_is_pci(dev)) |
1038 | return 0; | ||
1039 | if (dev->bus != &pci_bus_type) | ||
1040 | return 0; | 1038 | return 0; |
1041 | 1039 | ||
1042 | pdev = to_pci_dev(dev); | 1040 | pdev = to_pci_dev(dev); |
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 021783b1f46a..e48b674639cc 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c | |||
@@ -136,9 +136,9 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
136 | x86_msi.teardown_msi_irq(irq); | 136 | x86_msi.teardown_msi_irq(irq); |
137 | } | 137 | } |
138 | 138 | ||
139 | void arch_restore_msi_irqs(struct pci_dev *dev, int irq) | 139 | void arch_restore_msi_irqs(struct pci_dev *dev) |
140 | { | 140 | { |
141 | x86_msi.restore_msi_irqs(dev, irq); | 141 | x86_msi.restore_msi_irqs(dev); |
142 | } | 142 | } |
143 | u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | 143 | u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) |
144 | { | 144 | { |
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 5eee4959785d..103e702ec5a7 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
@@ -337,7 +337,7 @@ out: | |||
337 | return ret; | 337 | return ret; |
338 | } | 338 | } |
339 | 339 | ||
340 | static void xen_initdom_restore_msi_irqs(struct pci_dev *dev, int irq) | 340 | static void xen_initdom_restore_msi_irqs(struct pci_dev *dev) |
341 | { | 341 | { |
342 | int ret = 0; | 342 | int ret = 0; |
343 | 343 | ||
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 20360e480bd8..5b01bd6d5ea0 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -599,7 +599,9 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
599 | pci_assign_unassigned_root_bus_resources(root->bus); | 599 | pci_assign_unassigned_root_bus_resources(root->bus); |
600 | } | 600 | } |
601 | 601 | ||
602 | pci_lock_rescan_remove(); | ||
602 | pci_bus_add_devices(root->bus); | 603 | pci_bus_add_devices(root->bus); |
604 | pci_unlock_rescan_remove(); | ||
603 | return 1; | 605 | return 1; |
604 | 606 | ||
605 | end: | 607 | end: |
@@ -611,6 +613,8 @@ static void acpi_pci_root_remove(struct acpi_device *device) | |||
611 | { | 613 | { |
612 | struct acpi_pci_root *root = acpi_driver_data(device); | 614 | struct acpi_pci_root *root = acpi_driver_data(device); |
613 | 615 | ||
616 | pci_lock_rescan_remove(); | ||
617 | |||
614 | pci_stop_root_bus(root->bus); | 618 | pci_stop_root_bus(root->bus); |
615 | 619 | ||
616 | device_set_run_wake(root->bus->bridge, false); | 620 | device_set_run_wake(root->bus->bridge, false); |
@@ -618,6 +622,8 @@ static void acpi_pci_root_remove(struct acpi_device *device) | |||
618 | 622 | ||
619 | pci_remove_root_bus(root->bus); | 623 | pci_remove_root_bus(root->bus); |
620 | 624 | ||
625 | pci_unlock_rescan_remove(); | ||
626 | |||
621 | kfree(root); | 627 | kfree(root); |
622 | } | 628 | } |
623 | 629 | ||
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 74911c2cb1dd..dc2756fb6f33 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -1148,26 +1148,40 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host) | |||
1148 | {} | 1148 | {} |
1149 | #endif | 1149 | #endif |
1150 | 1150 | ||
1151 | static int ahci_init_interrupts(struct pci_dev *pdev, struct ahci_host_priv *hpriv) | 1151 | static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports, |
1152 | struct ahci_host_priv *hpriv) | ||
1152 | { | 1153 | { |
1153 | int rc; | 1154 | int rc, nvec; |
1154 | unsigned int maxvec; | ||
1155 | 1155 | ||
1156 | if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) { | 1156 | if (hpriv->flags & AHCI_HFLAG_NO_MSI) |
1157 | rc = pci_enable_msi_block_auto(pdev, &maxvec); | 1157 | goto intx; |
1158 | if (rc > 0) { | 1158 | |
1159 | if ((rc == maxvec) || (rc == 1)) | 1159 | rc = pci_msi_vec_count(pdev); |
1160 | return rc; | 1160 | if (rc < 0) |
1161 | /* | 1161 | goto intx; |
1162 | * Assume that advantage of multipe MSIs is negated, | 1162 | |
1163 | * so fallback to single MSI mode to save resources | 1163 | /* |
1164 | */ | 1164 | * If number of MSIs is less than number of ports then Sharing Last |
1165 | pci_disable_msi(pdev); | 1165 | * Message mode could be enforced. In this case assume that advantage |
1166 | if (!pci_enable_msi(pdev)) | 1166 | * of multipe MSIs is negated and use single MSI mode instead. |
1167 | return 1; | 1167 | */ |
1168 | } | 1168 | if (rc < n_ports) |
1169 | } | 1169 | goto single_msi; |
1170 | |||
1171 | nvec = rc; | ||
1172 | rc = pci_enable_msi_block(pdev, nvec); | ||
1173 | if (rc) | ||
1174 | goto intx; | ||
1170 | 1175 | ||
1176 | return nvec; | ||
1177 | |||
1178 | single_msi: | ||
1179 | rc = pci_enable_msi(pdev); | ||
1180 | if (rc) | ||
1181 | goto intx; | ||
1182 | return 1; | ||
1183 | |||
1184 | intx: | ||
1171 | pci_intx(pdev, 1); | 1185 | pci_intx(pdev, 1); |
1172 | return 0; | 1186 | return 0; |
1173 | } | 1187 | } |
@@ -1328,10 +1342,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1328 | 1342 | ||
1329 | hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar]; | 1343 | hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar]; |
1330 | 1344 | ||
1331 | n_msis = ahci_init_interrupts(pdev, hpriv); | ||
1332 | if (n_msis > 1) | ||
1333 | hpriv->flags |= AHCI_HFLAG_MULTI_MSI; | ||
1334 | |||
1335 | /* save initial config */ | 1345 | /* save initial config */ |
1336 | ahci_pci_save_initial_config(pdev, hpriv); | 1346 | ahci_pci_save_initial_config(pdev, hpriv); |
1337 | 1347 | ||
@@ -1386,6 +1396,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1386 | */ | 1396 | */ |
1387 | n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); | 1397 | n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); |
1388 | 1398 | ||
1399 | n_msis = ahci_init_interrupts(pdev, n_ports, hpriv); | ||
1400 | if (n_msis > 1) | ||
1401 | hpriv->flags |= AHCI_HFLAG_MULTI_MSI; | ||
1402 | |||
1389 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); | 1403 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); |
1390 | if (!host) | 1404 | if (!host) |
1391 | return -ENOMEM; | 1405 | return -ENOMEM; |
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index 923f99df4f1c..b709749c8639 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h | |||
@@ -239,6 +239,7 @@ long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg); | |||
239 | 239 | ||
240 | /* Chipset independent registers (from AGP Spec) */ | 240 | /* Chipset independent registers (from AGP Spec) */ |
241 | #define AGP_APBASE 0x10 | 241 | #define AGP_APBASE 0x10 |
242 | #define AGP_APERTURE_BAR 0 | ||
242 | 243 | ||
243 | #define AGPSTAT 0x4 | 244 | #define AGPSTAT 0x4 |
244 | #define AGPCMD 0x8 | 245 | #define AGPCMD 0x8 |
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c index 443cd6751ca2..19db03667650 100644 --- a/drivers/char/agp/ali-agp.c +++ b/drivers/char/agp/ali-agp.c | |||
@@ -85,8 +85,8 @@ static int ali_configure(void) | |||
85 | pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010)); | 85 | pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010)); |
86 | 86 | ||
87 | /* address to map to */ | 87 | /* address to map to */ |
88 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); | 88 | agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, |
89 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 89 | AGP_APERTURE_BAR); |
90 | 90 | ||
91 | #if 0 | 91 | #if 0 |
92 | if (agp_bridge->type == ALI_M1541) { | 92 | if (agp_bridge->type == ALI_M1541) { |
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index 779f0ab845a9..3661a51e93e2 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c | |||
@@ -11,7 +11,7 @@ | |||
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include "agp.h" | 12 | #include "agp.h" |
13 | 13 | ||
14 | #define AMD_MMBASE 0x14 | 14 | #define AMD_MMBASE_BAR 1 |
15 | #define AMD_APSIZE 0xac | 15 | #define AMD_APSIZE 0xac |
16 | #define AMD_MODECNTL 0xb0 | 16 | #define AMD_MODECNTL 0xb0 |
17 | #define AMD_MODECNTL2 0xb2 | 17 | #define AMD_MODECNTL2 0xb2 |
@@ -126,7 +126,6 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge) | |||
126 | unsigned long __iomem *cur_gatt; | 126 | unsigned long __iomem *cur_gatt; |
127 | unsigned long addr; | 127 | unsigned long addr; |
128 | int retval; | 128 | int retval; |
129 | u32 temp; | ||
130 | int i; | 129 | int i; |
131 | 130 | ||
132 | value = A_SIZE_LVL2(agp_bridge->current_size); | 131 | value = A_SIZE_LVL2(agp_bridge->current_size); |
@@ -149,8 +148,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge) | |||
149 | * used to program the agp master not the cpu | 148 | * used to program the agp master not the cpu |
150 | */ | 149 | */ |
151 | 150 | ||
152 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); | 151 | addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); |
153 | addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
154 | agp_bridge->gart_bus_addr = addr; | 152 | agp_bridge->gart_bus_addr = addr; |
155 | 153 | ||
156 | /* Calculate the agp offset */ | 154 | /* Calculate the agp offset */ |
@@ -207,6 +205,7 @@ static int amd_irongate_fetch_size(void) | |||
207 | static int amd_irongate_configure(void) | 205 | static int amd_irongate_configure(void) |
208 | { | 206 | { |
209 | struct aper_size_info_lvl2 *current_size; | 207 | struct aper_size_info_lvl2 *current_size; |
208 | phys_addr_t reg; | ||
210 | u32 temp; | 209 | u32 temp; |
211 | u16 enable_reg; | 210 | u16 enable_reg; |
212 | 211 | ||
@@ -214,9 +213,8 @@ static int amd_irongate_configure(void) | |||
214 | 213 | ||
215 | if (!amd_irongate_private.registers) { | 214 | if (!amd_irongate_private.registers) { |
216 | /* Get the memory mapped registers */ | 215 | /* Get the memory mapped registers */ |
217 | pci_read_config_dword(agp_bridge->dev, AMD_MMBASE, &temp); | 216 | reg = pci_resource_start(agp_bridge->dev, AMD_MMBASE_BAR); |
218 | temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 217 | amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(reg, 4096); |
219 | amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096); | ||
220 | if (!amd_irongate_private.registers) | 218 | if (!amd_irongate_private.registers) |
221 | return -ENOMEM; | 219 | return -ENOMEM; |
222 | } | 220 | } |
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 896413b59aae..3b47ed0310e1 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c | |||
@@ -269,7 +269,6 @@ static int agp_aperture_valid(u64 aper, u32 size) | |||
269 | */ | 269 | */ |
270 | static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap) | 270 | static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap) |
271 | { | 271 | { |
272 | u32 aper_low, aper_hi; | ||
273 | u64 aper, nb_aper; | 272 | u64 aper, nb_aper; |
274 | int order = 0; | 273 | int order = 0; |
275 | u32 nb_order, nb_base; | 274 | u32 nb_order, nb_base; |
@@ -295,9 +294,7 @@ static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap) | |||
295 | apsize |= 0xf00; | 294 | apsize |= 0xf00; |
296 | order = 7 - hweight16(apsize); | 295 | order = 7 - hweight16(apsize); |
297 | 296 | ||
298 | pci_read_config_dword(agp, 0x10, &aper_low); | 297 | aper = pci_bus_address(agp, AGP_APERTURE_BAR); |
299 | pci_read_config_dword(agp, 0x14, &aper_hi); | ||
300 | aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32); | ||
301 | 298 | ||
302 | /* | 299 | /* |
303 | * On some sick chips APSIZE is 0. This means it wants 4G | 300 | * On some sick chips APSIZE is 0. This means it wants 4G |
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c index 03c1dc1ab552..18a7a6baa304 100644 --- a/drivers/char/agp/ati-agp.c +++ b/drivers/char/agp/ati-agp.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <asm/agp.h> | 12 | #include <asm/agp.h> |
13 | #include "agp.h" | 13 | #include "agp.h" |
14 | 14 | ||
15 | #define ATI_GART_MMBASE_ADDR 0x14 | 15 | #define ATI_GART_MMBASE_BAR 1 |
16 | #define ATI_RS100_APSIZE 0xac | 16 | #define ATI_RS100_APSIZE 0xac |
17 | #define ATI_RS100_IG_AGPMODE 0xb0 | 17 | #define ATI_RS100_IG_AGPMODE 0xb0 |
18 | #define ATI_RS300_APSIZE 0xf8 | 18 | #define ATI_RS300_APSIZE 0xf8 |
@@ -196,12 +196,12 @@ static void ati_cleanup(void) | |||
196 | 196 | ||
197 | static int ati_configure(void) | 197 | static int ati_configure(void) |
198 | { | 198 | { |
199 | phys_addr_t reg; | ||
199 | u32 temp; | 200 | u32 temp; |
200 | 201 | ||
201 | /* Get the memory mapped registers */ | 202 | /* Get the memory mapped registers */ |
202 | pci_read_config_dword(agp_bridge->dev, ATI_GART_MMBASE_ADDR, &temp); | 203 | reg = pci_resource_start(agp_bridge->dev, ATI_GART_MMBASE_BAR); |
203 | temp = (temp & 0xfffff000); | 204 | ati_generic_private.registers = (volatile u8 __iomem *) ioremap(reg, 4096); |
204 | ati_generic_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096); | ||
205 | 205 | ||
206 | if (!ati_generic_private.registers) | 206 | if (!ati_generic_private.registers) |
207 | return -ENOMEM; | 207 | return -ENOMEM; |
@@ -211,18 +211,18 @@ static int ati_configure(void) | |||
211 | else | 211 | else |
212 | pci_write_config_dword(agp_bridge->dev, ATI_RS300_IG_AGPMODE, 0x20000); | 212 | pci_write_config_dword(agp_bridge->dev, ATI_RS300_IG_AGPMODE, 0x20000); |
213 | 213 | ||
214 | /* address to map too */ | 214 | /* address to map to */ |
215 | /* | 215 | /* |
216 | pci_read_config_dword(agp_bridge.dev, AGP_APBASE, &temp); | 216 | agp_bridge.gart_bus_addr = pci_bus_address(agp_bridge.dev, |
217 | agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 217 | AGP_APERTURE_BAR); |
218 | printk(KERN_INFO PFX "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr); | 218 | printk(KERN_INFO PFX "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr); |
219 | */ | 219 | */ |
220 | writel(0x60000, ati_generic_private.registers+ATI_GART_FEATURE_ID); | 220 | writel(0x60000, ati_generic_private.registers+ATI_GART_FEATURE_ID); |
221 | readl(ati_generic_private.registers+ATI_GART_FEATURE_ID); /* PCI Posting.*/ | 221 | readl(ati_generic_private.registers+ATI_GART_FEATURE_ID); /* PCI Posting.*/ |
222 | 222 | ||
223 | /* SIGNALED_SYSTEM_ERROR @ NB_STATUS */ | 223 | /* SIGNALED_SYSTEM_ERROR @ NB_STATUS */ |
224 | pci_read_config_dword(agp_bridge->dev, 4, &temp); | 224 | pci_read_config_dword(agp_bridge->dev, PCI_COMMAND, &temp); |
225 | pci_write_config_dword(agp_bridge->dev, 4, temp | (1<<14)); | 225 | pci_write_config_dword(agp_bridge->dev, PCI_COMMAND, temp | (1<<14)); |
226 | 226 | ||
227 | /* Write out the address of the gatt table */ | 227 | /* Write out the address of the gatt table */ |
228 | writel(agp_bridge->gatt_bus_addr, ati_generic_private.registers+ATI_GART_BASE); | 228 | writel(agp_bridge->gatt_bus_addr, ati_generic_private.registers+ATI_GART_BASE); |
@@ -385,8 +385,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge) | |||
385 | * This is a bus address even on the alpha, b/c its | 385 | * This is a bus address even on the alpha, b/c its |
386 | * used to program the agp master not the cpu | 386 | * used to program the agp master not the cpu |
387 | */ | 387 | */ |
388 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); | 388 | addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); |
389 | addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
390 | agp_bridge->gart_bus_addr = addr; | 389 | agp_bridge->gart_bus_addr = addr; |
391 | 390 | ||
392 | /* Calculate the agp offset */ | 391 | /* Calculate the agp offset */ |
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c index 6974d5032053..533cb6d229b8 100644 --- a/drivers/char/agp/efficeon-agp.c +++ b/drivers/char/agp/efficeon-agp.c | |||
@@ -128,7 +128,6 @@ static void efficeon_cleanup(void) | |||
128 | 128 | ||
129 | static int efficeon_configure(void) | 129 | static int efficeon_configure(void) |
130 | { | 130 | { |
131 | u32 temp; | ||
132 | u16 temp2; | 131 | u16 temp2; |
133 | struct aper_size_info_lvl2 *current_size; | 132 | struct aper_size_info_lvl2 *current_size; |
134 | 133 | ||
@@ -141,8 +140,8 @@ static int efficeon_configure(void) | |||
141 | current_size->size_value); | 140 | current_size->size_value); |
142 | 141 | ||
143 | /* address to map to */ | 142 | /* address to map to */ |
144 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); | 143 | agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, |
145 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 144 | AGP_APERTURE_BAR); |
146 | 145 | ||
147 | /* agpctrl */ | 146 | /* agpctrl */ |
148 | pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); | 147 | pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); |
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index a0df182f6f7d..f39437addb58 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c | |||
@@ -1396,8 +1396,8 @@ int agp3_generic_configure(void) | |||
1396 | 1396 | ||
1397 | current_size = A_SIZE_16(agp_bridge->current_size); | 1397 | current_size = A_SIZE_16(agp_bridge->current_size); |
1398 | 1398 | ||
1399 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); | 1399 | agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, |
1400 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 1400 | AGP_APERTURE_BAR); |
1401 | 1401 | ||
1402 | /* set aperture size */ | 1402 | /* set aperture size */ |
1403 | pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value); | 1403 | pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value); |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index a426ee1f57a6..a7c276585a9f 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -118,7 +118,6 @@ static void intel_8xx_cleanup(void) | |||
118 | 118 | ||
119 | static int intel_configure(void) | 119 | static int intel_configure(void) |
120 | { | 120 | { |
121 | u32 temp; | ||
122 | u16 temp2; | 121 | u16 temp2; |
123 | struct aper_size_info_16 *current_size; | 122 | struct aper_size_info_16 *current_size; |
124 | 123 | ||
@@ -128,8 +127,8 @@ static int intel_configure(void) | |||
128 | pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); | 127 | pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); |
129 | 128 | ||
130 | /* address to map to */ | 129 | /* address to map to */ |
131 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); | 130 | agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, |
132 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 131 | AGP_APERTURE_BAR); |
133 | 132 | ||
134 | /* attbase - aperture base */ | 133 | /* attbase - aperture base */ |
135 | pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); | 134 | pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); |
@@ -148,7 +147,7 @@ static int intel_configure(void) | |||
148 | 147 | ||
149 | static int intel_815_configure(void) | 148 | static int intel_815_configure(void) |
150 | { | 149 | { |
151 | u32 temp, addr; | 150 | u32 addr; |
152 | u8 temp2; | 151 | u8 temp2; |
153 | struct aper_size_info_8 *current_size; | 152 | struct aper_size_info_8 *current_size; |
154 | 153 | ||
@@ -167,8 +166,8 @@ static int intel_815_configure(void) | |||
167 | current_size->size_value); | 166 | current_size->size_value); |
168 | 167 | ||
169 | /* address to map to */ | 168 | /* address to map to */ |
170 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); | 169 | agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, |
171 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 170 | AGP_APERTURE_BAR); |
172 | 171 | ||
173 | pci_read_config_dword(agp_bridge->dev, INTEL_ATTBASE, &addr); | 172 | pci_read_config_dword(agp_bridge->dev, INTEL_ATTBASE, &addr); |
174 | addr &= INTEL_815_ATTBASE_MASK; | 173 | addr &= INTEL_815_ATTBASE_MASK; |
@@ -208,7 +207,6 @@ static void intel_820_cleanup(void) | |||
208 | 207 | ||
209 | static int intel_820_configure(void) | 208 | static int intel_820_configure(void) |
210 | { | 209 | { |
211 | u32 temp; | ||
212 | u8 temp2; | 210 | u8 temp2; |
213 | struct aper_size_info_8 *current_size; | 211 | struct aper_size_info_8 *current_size; |
214 | 212 | ||
@@ -218,8 +216,8 @@ static int intel_820_configure(void) | |||
218 | pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); | 216 | pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); |
219 | 217 | ||
220 | /* address to map to */ | 218 | /* address to map to */ |
221 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); | 219 | agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, |
222 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 220 | AGP_APERTURE_BAR); |
223 | 221 | ||
224 | /* attbase - aperture base */ | 222 | /* attbase - aperture base */ |
225 | pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); | 223 | pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); |
@@ -239,7 +237,6 @@ static int intel_820_configure(void) | |||
239 | 237 | ||
240 | static int intel_840_configure(void) | 238 | static int intel_840_configure(void) |
241 | { | 239 | { |
242 | u32 temp; | ||
243 | u16 temp2; | 240 | u16 temp2; |
244 | struct aper_size_info_8 *current_size; | 241 | struct aper_size_info_8 *current_size; |
245 | 242 | ||
@@ -249,8 +246,8 @@ static int intel_840_configure(void) | |||
249 | pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); | 246 | pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); |
250 | 247 | ||
251 | /* address to map to */ | 248 | /* address to map to */ |
252 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); | 249 | agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, |
253 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 250 | AGP_APERTURE_BAR); |
254 | 251 | ||
255 | /* attbase - aperture base */ | 252 | /* attbase - aperture base */ |
256 | pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); | 253 | pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); |
@@ -268,7 +265,6 @@ static int intel_840_configure(void) | |||
268 | 265 | ||
269 | static int intel_845_configure(void) | 266 | static int intel_845_configure(void) |
270 | { | 267 | { |
271 | u32 temp; | ||
272 | u8 temp2; | 268 | u8 temp2; |
273 | struct aper_size_info_8 *current_size; | 269 | struct aper_size_info_8 *current_size; |
274 | 270 | ||
@@ -282,9 +278,9 @@ static int intel_845_configure(void) | |||
282 | agp_bridge->apbase_config); | 278 | agp_bridge->apbase_config); |
283 | } else { | 279 | } else { |
284 | /* address to map to */ | 280 | /* address to map to */ |
285 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); | 281 | agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, |
286 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 282 | AGP_APERTURE_BAR); |
287 | agp_bridge->apbase_config = temp; | 283 | agp_bridge->apbase_config = agp_bridge->gart_bus_addr; |
288 | } | 284 | } |
289 | 285 | ||
290 | /* attbase - aperture base */ | 286 | /* attbase - aperture base */ |
@@ -303,7 +299,6 @@ static int intel_845_configure(void) | |||
303 | 299 | ||
304 | static int intel_850_configure(void) | 300 | static int intel_850_configure(void) |
305 | { | 301 | { |
306 | u32 temp; | ||
307 | u16 temp2; | 302 | u16 temp2; |
308 | struct aper_size_info_8 *current_size; | 303 | struct aper_size_info_8 *current_size; |
309 | 304 | ||
@@ -313,8 +308,8 @@ static int intel_850_configure(void) | |||
313 | pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); | 308 | pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); |
314 | 309 | ||
315 | /* address to map to */ | 310 | /* address to map to */ |
316 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); | 311 | agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, |
317 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 312 | AGP_APERTURE_BAR); |
318 | 313 | ||
319 | /* attbase - aperture base */ | 314 | /* attbase - aperture base */ |
320 | pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); | 315 | pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); |
@@ -332,7 +327,6 @@ static int intel_850_configure(void) | |||
332 | 327 | ||
333 | static int intel_860_configure(void) | 328 | static int intel_860_configure(void) |
334 | { | 329 | { |
335 | u32 temp; | ||
336 | u16 temp2; | 330 | u16 temp2; |
337 | struct aper_size_info_8 *current_size; | 331 | struct aper_size_info_8 *current_size; |
338 | 332 | ||
@@ -342,8 +336,8 @@ static int intel_860_configure(void) | |||
342 | pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); | 336 | pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); |
343 | 337 | ||
344 | /* address to map to */ | 338 | /* address to map to */ |
345 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); | 339 | agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, |
346 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 340 | AGP_APERTURE_BAR); |
347 | 341 | ||
348 | /* attbase - aperture base */ | 342 | /* attbase - aperture base */ |
349 | pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); | 343 | pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); |
@@ -361,7 +355,6 @@ static int intel_860_configure(void) | |||
361 | 355 | ||
362 | static int intel_830mp_configure(void) | 356 | static int intel_830mp_configure(void) |
363 | { | 357 | { |
364 | u32 temp; | ||
365 | u16 temp2; | 358 | u16 temp2; |
366 | struct aper_size_info_8 *current_size; | 359 | struct aper_size_info_8 *current_size; |
367 | 360 | ||
@@ -371,8 +364,8 @@ static int intel_830mp_configure(void) | |||
371 | pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); | 364 | pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); |
372 | 365 | ||
373 | /* address to map to */ | 366 | /* address to map to */ |
374 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); | 367 | agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, |
375 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 368 | AGP_APERTURE_BAR); |
376 | 369 | ||
377 | /* attbase - aperture base */ | 370 | /* attbase - aperture base */ |
378 | pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); | 371 | pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); |
@@ -390,7 +383,6 @@ static int intel_830mp_configure(void) | |||
390 | 383 | ||
391 | static int intel_7505_configure(void) | 384 | static int intel_7505_configure(void) |
392 | { | 385 | { |
393 | u32 temp; | ||
394 | u16 temp2; | 386 | u16 temp2; |
395 | struct aper_size_info_8 *current_size; | 387 | struct aper_size_info_8 *current_size; |
396 | 388 | ||
@@ -400,8 +392,8 @@ static int intel_7505_configure(void) | |||
400 | pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); | 392 | pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); |
401 | 393 | ||
402 | /* address to map to */ | 394 | /* address to map to */ |
403 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); | 395 | agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, |
404 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 396 | AGP_APERTURE_BAR); |
405 | 397 | ||
406 | /* attbase - aperture base */ | 398 | /* attbase - aperture base */ |
407 | pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); | 399 | pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); |
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index 1042c1b90376..fda073dcd967 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h | |||
@@ -55,8 +55,8 @@ | |||
55 | #define INTEL_I860_ERRSTS 0xc8 | 55 | #define INTEL_I860_ERRSTS 0xc8 |
56 | 56 | ||
57 | /* Intel i810 registers */ | 57 | /* Intel i810 registers */ |
58 | #define I810_GMADDR 0x10 | 58 | #define I810_GMADR_BAR 0 |
59 | #define I810_MMADDR 0x14 | 59 | #define I810_MMADR_BAR 1 |
60 | #define I810_PTE_BASE 0x10000 | 60 | #define I810_PTE_BASE 0x10000 |
61 | #define I810_PTE_MAIN_UNCACHED 0x00000000 | 61 | #define I810_PTE_MAIN_UNCACHED 0x00000000 |
62 | #define I810_PTE_LOCAL 0x00000002 | 62 | #define I810_PTE_LOCAL 0x00000002 |
@@ -113,9 +113,9 @@ | |||
113 | #define INTEL_I850_ERRSTS 0xc8 | 113 | #define INTEL_I850_ERRSTS 0xc8 |
114 | 114 | ||
115 | /* intel 915G registers */ | 115 | /* intel 915G registers */ |
116 | #define I915_GMADDR 0x18 | 116 | #define I915_GMADR_BAR 2 |
117 | #define I915_MMADDR 0x10 | 117 | #define I915_MMADR_BAR 0 |
118 | #define I915_PTEADDR 0x1C | 118 | #define I915_PTE_BAR 3 |
119 | #define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) | 119 | #define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) |
120 | #define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) | 120 | #define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) |
121 | #define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) | 121 | #define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) |
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index b8e2014cb9cb..ad5da1ffcbe9 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -64,7 +64,7 @@ static struct _intel_private { | |||
64 | struct pci_dev *pcidev; /* device one */ | 64 | struct pci_dev *pcidev; /* device one */ |
65 | struct pci_dev *bridge_dev; | 65 | struct pci_dev *bridge_dev; |
66 | u8 __iomem *registers; | 66 | u8 __iomem *registers; |
67 | phys_addr_t gtt_bus_addr; | 67 | phys_addr_t gtt_phys_addr; |
68 | u32 PGETBL_save; | 68 | u32 PGETBL_save; |
69 | u32 __iomem *gtt; /* I915G */ | 69 | u32 __iomem *gtt; /* I915G */ |
70 | bool clear_fake_agp; /* on first access via agp, fill with scratch */ | 70 | bool clear_fake_agp; /* on first access via agp, fill with scratch */ |
@@ -172,7 +172,7 @@ static void i8xx_destroy_pages(struct page *page) | |||
172 | #define I810_GTT_ORDER 4 | 172 | #define I810_GTT_ORDER 4 |
173 | static int i810_setup(void) | 173 | static int i810_setup(void) |
174 | { | 174 | { |
175 | u32 reg_addr; | 175 | phys_addr_t reg_addr; |
176 | char *gtt_table; | 176 | char *gtt_table; |
177 | 177 | ||
178 | /* i81x does not preallocate the gtt. It's always 64kb in size. */ | 178 | /* i81x does not preallocate the gtt. It's always 64kb in size. */ |
@@ -181,8 +181,7 @@ static int i810_setup(void) | |||
181 | return -ENOMEM; | 181 | return -ENOMEM; |
182 | intel_private.i81x_gtt_table = gtt_table; | 182 | intel_private.i81x_gtt_table = gtt_table; |
183 | 183 | ||
184 | pci_read_config_dword(intel_private.pcidev, I810_MMADDR, ®_addr); | 184 | reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR); |
185 | reg_addr &= 0xfff80000; | ||
186 | 185 | ||
187 | intel_private.registers = ioremap(reg_addr, KB(64)); | 186 | intel_private.registers = ioremap(reg_addr, KB(64)); |
188 | if (!intel_private.registers) | 187 | if (!intel_private.registers) |
@@ -191,7 +190,7 @@ static int i810_setup(void) | |||
191 | writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED, | 190 | writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED, |
192 | intel_private.registers+I810_PGETBL_CTL); | 191 | intel_private.registers+I810_PGETBL_CTL); |
193 | 192 | ||
194 | intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; | 193 | intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE; |
195 | 194 | ||
196 | if ((readl(intel_private.registers+I810_DRAM_CTL) | 195 | if ((readl(intel_private.registers+I810_DRAM_CTL) |
197 | & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { | 196 | & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { |
@@ -608,9 +607,8 @@ static bool intel_gtt_can_wc(void) | |||
608 | 607 | ||
609 | static int intel_gtt_init(void) | 608 | static int intel_gtt_init(void) |
610 | { | 609 | { |
611 | u32 gma_addr; | ||
612 | u32 gtt_map_size; | 610 | u32 gtt_map_size; |
613 | int ret; | 611 | int ret, bar; |
614 | 612 | ||
615 | ret = intel_private.driver->setup(); | 613 | ret = intel_private.driver->setup(); |
616 | if (ret != 0) | 614 | if (ret != 0) |
@@ -636,10 +634,10 @@ static int intel_gtt_init(void) | |||
636 | 634 | ||
637 | intel_private.gtt = NULL; | 635 | intel_private.gtt = NULL; |
638 | if (intel_gtt_can_wc()) | 636 | if (intel_gtt_can_wc()) |
639 | intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr, | 637 | intel_private.gtt = ioremap_wc(intel_private.gtt_phys_addr, |
640 | gtt_map_size); | 638 | gtt_map_size); |
641 | if (intel_private.gtt == NULL) | 639 | if (intel_private.gtt == NULL) |
642 | intel_private.gtt = ioremap(intel_private.gtt_bus_addr, | 640 | intel_private.gtt = ioremap(intel_private.gtt_phys_addr, |
643 | gtt_map_size); | 641 | gtt_map_size); |
644 | if (intel_private.gtt == NULL) { | 642 | if (intel_private.gtt == NULL) { |
645 | intel_private.driver->cleanup(); | 643 | intel_private.driver->cleanup(); |
@@ -660,14 +658,11 @@ static int intel_gtt_init(void) | |||
660 | } | 658 | } |
661 | 659 | ||
662 | if (INTEL_GTT_GEN <= 2) | 660 | if (INTEL_GTT_GEN <= 2) |
663 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, | 661 | bar = I810_GMADR_BAR; |
664 | &gma_addr); | ||
665 | else | 662 | else |
666 | pci_read_config_dword(intel_private.pcidev, I915_GMADDR, | 663 | bar = I915_GMADR_BAR; |
667 | &gma_addr); | ||
668 | |||
669 | intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); | ||
670 | 664 | ||
665 | intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar); | ||
671 | return 0; | 666 | return 0; |
672 | } | 667 | } |
673 | 668 | ||
@@ -787,16 +782,15 @@ EXPORT_SYMBOL(intel_enable_gtt); | |||
787 | 782 | ||
788 | static int i830_setup(void) | 783 | static int i830_setup(void) |
789 | { | 784 | { |
790 | u32 reg_addr; | 785 | phys_addr_t reg_addr; |
791 | 786 | ||
792 | pci_read_config_dword(intel_private.pcidev, I810_MMADDR, ®_addr); | 787 | reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR); |
793 | reg_addr &= 0xfff80000; | ||
794 | 788 | ||
795 | intel_private.registers = ioremap(reg_addr, KB(64)); | 789 | intel_private.registers = ioremap(reg_addr, KB(64)); |
796 | if (!intel_private.registers) | 790 | if (!intel_private.registers) |
797 | return -ENOMEM; | 791 | return -ENOMEM; |
798 | 792 | ||
799 | intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; | 793 | intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE; |
800 | 794 | ||
801 | return 0; | 795 | return 0; |
802 | } | 796 | } |
@@ -1108,12 +1102,10 @@ static void i965_write_entry(dma_addr_t addr, | |||
1108 | 1102 | ||
1109 | static int i9xx_setup(void) | 1103 | static int i9xx_setup(void) |
1110 | { | 1104 | { |
1111 | u32 reg_addr, gtt_addr; | 1105 | phys_addr_t reg_addr; |
1112 | int size = KB(512); | 1106 | int size = KB(512); |
1113 | 1107 | ||
1114 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr); | 1108 | reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR); |
1115 | |||
1116 | reg_addr &= 0xfff80000; | ||
1117 | 1109 | ||
1118 | intel_private.registers = ioremap(reg_addr, size); | 1110 | intel_private.registers = ioremap(reg_addr, size); |
1119 | if (!intel_private.registers) | 1111 | if (!intel_private.registers) |
@@ -1121,15 +1113,14 @@ static int i9xx_setup(void) | |||
1121 | 1113 | ||
1122 | switch (INTEL_GTT_GEN) { | 1114 | switch (INTEL_GTT_GEN) { |
1123 | case 3: | 1115 | case 3: |
1124 | pci_read_config_dword(intel_private.pcidev, | 1116 | intel_private.gtt_phys_addr = |
1125 | I915_PTEADDR, >t_addr); | 1117 | pci_resource_start(intel_private.pcidev, I915_PTE_BAR); |
1126 | intel_private.gtt_bus_addr = gtt_addr; | ||
1127 | break; | 1118 | break; |
1128 | case 5: | 1119 | case 5: |
1129 | intel_private.gtt_bus_addr = reg_addr + MB(2); | 1120 | intel_private.gtt_phys_addr = reg_addr + MB(2); |
1130 | break; | 1121 | break; |
1131 | default: | 1122 | default: |
1132 | intel_private.gtt_bus_addr = reg_addr + KB(512); | 1123 | intel_private.gtt_phys_addr = reg_addr + KB(512); |
1133 | break; | 1124 | break; |
1134 | } | 1125 | } |
1135 | 1126 | ||
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c index be42a2312dc9..a1861b75eb31 100644 --- a/drivers/char/agp/nvidia-agp.c +++ b/drivers/char/agp/nvidia-agp.c | |||
@@ -106,6 +106,7 @@ static int nvidia_configure(void) | |||
106 | { | 106 | { |
107 | int i, rc, num_dirs; | 107 | int i, rc, num_dirs; |
108 | u32 apbase, aplimit; | 108 | u32 apbase, aplimit; |
109 | phys_addr_t apbase_phys; | ||
109 | struct aper_size_info_8 *current_size; | 110 | struct aper_size_info_8 *current_size; |
110 | u32 temp; | 111 | u32 temp; |
111 | 112 | ||
@@ -115,9 +116,8 @@ static int nvidia_configure(void) | |||
115 | pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, | 116 | pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, |
116 | current_size->size_value); | 117 | current_size->size_value); |
117 | 118 | ||
118 | /* address to map to */ | 119 | /* address to map to */ |
119 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &apbase); | 120 | apbase = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); |
120 | apbase &= PCI_BASE_ADDRESS_MEM_MASK; | ||
121 | agp_bridge->gart_bus_addr = apbase; | 121 | agp_bridge->gart_bus_addr = apbase; |
122 | aplimit = apbase + (current_size->size * 1024 * 1024) - 1; | 122 | aplimit = apbase + (current_size->size * 1024 * 1024) - 1; |
123 | pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APBASE, apbase); | 123 | pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APBASE, apbase); |
@@ -153,8 +153,9 @@ static int nvidia_configure(void) | |||
153 | pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp | 0x100); | 153 | pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp | 0x100); |
154 | 154 | ||
155 | /* map aperture */ | 155 | /* map aperture */ |
156 | apbase_phys = pci_resource_start(agp_bridge->dev, AGP_APERTURE_BAR); | ||
156 | nvidia_private.aperture = | 157 | nvidia_private.aperture = |
157 | (volatile u32 __iomem *) ioremap(apbase, 33 * PAGE_SIZE); | 158 | (volatile u32 __iomem *) ioremap(apbase_phys, 33 * PAGE_SIZE); |
158 | 159 | ||
159 | if (!nvidia_private.aperture) | 160 | if (!nvidia_private.aperture) |
160 | return -ENOMEM; | 161 | return -ENOMEM; |
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c index 79c838c434bc..2c74038da459 100644 --- a/drivers/char/agp/sis-agp.c +++ b/drivers/char/agp/sis-agp.c | |||
@@ -50,13 +50,12 @@ static void sis_tlbflush(struct agp_memory *mem) | |||
50 | 50 | ||
51 | static int sis_configure(void) | 51 | static int sis_configure(void) |
52 | { | 52 | { |
53 | u32 temp; | ||
54 | struct aper_size_info_8 *current_size; | 53 | struct aper_size_info_8 *current_size; |
55 | 54 | ||
56 | current_size = A_SIZE_8(agp_bridge->current_size); | 55 | current_size = A_SIZE_8(agp_bridge->current_size); |
57 | pci_write_config_byte(agp_bridge->dev, SIS_TLBCNTRL, 0x05); | 56 | pci_write_config_byte(agp_bridge->dev, SIS_TLBCNTRL, 0x05); |
58 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); | 57 | agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, |
59 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 58 | AGP_APERTURE_BAR); |
60 | pci_write_config_dword(agp_bridge->dev, SIS_ATTBASE, | 59 | pci_write_config_dword(agp_bridge->dev, SIS_ATTBASE, |
61 | agp_bridge->gatt_bus_addr); | 60 | agp_bridge->gatt_bus_addr); |
62 | pci_write_config_byte(agp_bridge->dev, SIS_APSIZE, | 61 | pci_write_config_byte(agp_bridge->dev, SIS_APSIZE, |
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c index 74d3aa3773bf..228f20cddc05 100644 --- a/drivers/char/agp/via-agp.c +++ b/drivers/char/agp/via-agp.c | |||
@@ -43,16 +43,15 @@ static int via_fetch_size(void) | |||
43 | 43 | ||
44 | static int via_configure(void) | 44 | static int via_configure(void) |
45 | { | 45 | { |
46 | u32 temp; | ||
47 | struct aper_size_info_8 *current_size; | 46 | struct aper_size_info_8 *current_size; |
48 | 47 | ||
49 | current_size = A_SIZE_8(agp_bridge->current_size); | 48 | current_size = A_SIZE_8(agp_bridge->current_size); |
50 | /* aperture size */ | 49 | /* aperture size */ |
51 | pci_write_config_byte(agp_bridge->dev, VIA_APSIZE, | 50 | pci_write_config_byte(agp_bridge->dev, VIA_APSIZE, |
52 | current_size->size_value); | 51 | current_size->size_value); |
53 | /* address to map too */ | 52 | /* address to map to */ |
54 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); | 53 | agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, |
55 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 54 | AGP_APERTURE_BAR); |
56 | 55 | ||
57 | /* GART control register */ | 56 | /* GART control register */ |
58 | pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, 0x0000000f); | 57 | pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, 0x0000000f); |
@@ -132,9 +131,9 @@ static int via_configure_agp3(void) | |||
132 | 131 | ||
133 | current_size = A_SIZE_16(agp_bridge->current_size); | 132 | current_size = A_SIZE_16(agp_bridge->current_size); |
134 | 133 | ||
135 | /* address to map too */ | 134 | /* address to map to */ |
136 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); | 135 | agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, |
137 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 136 | AGP_APERTURE_BAR); |
138 | 137 | ||
139 | /* attbase - aperture GATT base */ | 138 | /* attbase - aperture GATT base */ |
140 | pci_write_config_dword(agp_bridge->dev, VIA_AGP3_ATTBASE, | 139 | pci_write_config_dword(agp_bridge->dev, VIA_AGP3_ATTBASE, |
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c index 272a3ec35957..612afeaec3cb 100644 --- a/drivers/eisa/eisa-bus.c +++ b/drivers/eisa/eisa-bus.c | |||
@@ -232,8 +232,10 @@ static int __init eisa_init_device(struct eisa_root_device *root, | |||
232 | static int __init eisa_register_device(struct eisa_device *edev) | 232 | static int __init eisa_register_device(struct eisa_device *edev) |
233 | { | 233 | { |
234 | int rc = device_register(&edev->dev); | 234 | int rc = device_register(&edev->dev); |
235 | if (rc) | 235 | if (rc) { |
236 | put_device(&edev->dev); | ||
236 | return rc; | 237 | return rc; |
238 | } | ||
237 | 239 | ||
238 | rc = device_create_file(&edev->dev, &dev_attr_signature); | 240 | rc = device_create_file(&edev->dev, &dev_attr_signature); |
239 | if (rc) | 241 | if (rc) |
@@ -275,18 +277,19 @@ static int __init eisa_request_resources(struct eisa_root_device *root, | |||
275 | } | 277 | } |
276 | 278 | ||
277 | if (slot) { | 279 | if (slot) { |
280 | edev->res[i].name = NULL; | ||
278 | edev->res[i].start = SLOT_ADDRESS(root, slot) | 281 | edev->res[i].start = SLOT_ADDRESS(root, slot) |
279 | + (i * 0x400); | 282 | + (i * 0x400); |
280 | edev->res[i].end = edev->res[i].start + 0xff; | 283 | edev->res[i].end = edev->res[i].start + 0xff; |
281 | edev->res[i].flags = IORESOURCE_IO; | 284 | edev->res[i].flags = IORESOURCE_IO; |
282 | } else { | 285 | } else { |
286 | edev->res[i].name = NULL; | ||
283 | edev->res[i].start = SLOT_ADDRESS(root, slot) | 287 | edev->res[i].start = SLOT_ADDRESS(root, slot) |
284 | + EISA_VENDOR_ID_OFFSET; | 288 | + EISA_VENDOR_ID_OFFSET; |
285 | edev->res[i].end = edev->res[i].start + 3; | 289 | edev->res[i].end = edev->res[i].start + 3; |
286 | edev->res[i].flags = IORESOURCE_IO | IORESOURCE_BUSY; | 290 | edev->res[i].flags = IORESOURCE_IO | IORESOURCE_BUSY; |
287 | } | 291 | } |
288 | 292 | ||
289 | dev_printk(KERN_DEBUG, &edev->dev, "%pR\n", &edev->res[i]); | ||
290 | if (request_resource(root->res, &edev->res[i])) | 293 | if (request_resource(root->res, &edev->res[i])) |
291 | goto failed; | 294 | goto failed; |
292 | } | 295 | } |
@@ -326,19 +329,20 @@ static int __init eisa_probe(struct eisa_root_device *root) | |||
326 | return -ENOMEM; | 329 | return -ENOMEM; |
327 | } | 330 | } |
328 | 331 | ||
329 | if (eisa_init_device(root, edev, 0)) { | 332 | if (eisa_request_resources(root, edev, 0)) { |
333 | dev_warn(root->dev, | ||
334 | "EISA: Cannot allocate resource for mainboard\n"); | ||
330 | kfree(edev); | 335 | kfree(edev); |
331 | if (!root->force_probe) | 336 | if (!root->force_probe) |
332 | return -ENODEV; | 337 | return -EBUSY; |
333 | goto force_probe; | 338 | goto force_probe; |
334 | } | 339 | } |
335 | 340 | ||
336 | if (eisa_request_resources(root, edev, 0)) { | 341 | if (eisa_init_device(root, edev, 0)) { |
337 | dev_warn(root->dev, | 342 | eisa_release_resources(edev); |
338 | "EISA: Cannot allocate resource for mainboard\n"); | ||
339 | kfree(edev); | 343 | kfree(edev); |
340 | if (!root->force_probe) | 344 | if (!root->force_probe) |
341 | return -EBUSY; | 345 | return -ENODEV; |
342 | goto force_probe; | 346 | goto force_probe; |
343 | } | 347 | } |
344 | 348 | ||
@@ -361,11 +365,6 @@ static int __init eisa_probe(struct eisa_root_device *root) | |||
361 | continue; | 365 | continue; |
362 | } | 366 | } |
363 | 367 | ||
364 | if (eisa_init_device(root, edev, i)) { | ||
365 | kfree(edev); | ||
366 | continue; | ||
367 | } | ||
368 | |||
369 | if (eisa_request_resources(root, edev, i)) { | 368 | if (eisa_request_resources(root, edev, i)) { |
370 | dev_warn(root->dev, | 369 | dev_warn(root->dev, |
371 | "Cannot allocate resource for EISA slot %d\n", | 370 | "Cannot allocate resource for EISA slot %d\n", |
@@ -374,6 +373,12 @@ static int __init eisa_probe(struct eisa_root_device *root) | |||
374 | continue; | 373 | continue; |
375 | } | 374 | } |
376 | 375 | ||
376 | if (eisa_init_device(root, edev, i)) { | ||
377 | eisa_release_resources(edev); | ||
378 | kfree(edev); | ||
379 | continue; | ||
380 | } | ||
381 | |||
377 | if (edev->state == (EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED)) | 382 | if (edev->state == (EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED)) |
378 | enabled_str = " (forced enabled)"; | 383 | enabled_str = " (forced enabled)"; |
379 | else if (edev->state == EISA_CONFIG_FORCED) | 384 | else if (edev->state == EISA_CONFIG_FORCED) |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index d3c3b5b15824..3540569948db 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -1265,14 +1265,14 @@ static int ggtt_probe_common(struct drm_device *dev, | |||
1265 | size_t gtt_size) | 1265 | size_t gtt_size) |
1266 | { | 1266 | { |
1267 | struct drm_i915_private *dev_priv = dev->dev_private; | 1267 | struct drm_i915_private *dev_priv = dev->dev_private; |
1268 | phys_addr_t gtt_bus_addr; | 1268 | phys_addr_t gtt_phys_addr; |
1269 | int ret; | 1269 | int ret; |
1270 | 1270 | ||
1271 | /* For Modern GENs the PTEs and register space are split in the BAR */ | 1271 | /* For Modern GENs the PTEs and register space are split in the BAR */ |
1272 | gtt_bus_addr = pci_resource_start(dev->pdev, 0) + | 1272 | gtt_phys_addr = pci_resource_start(dev->pdev, 0) + |
1273 | (pci_resource_len(dev->pdev, 0) / 2); | 1273 | (pci_resource_len(dev->pdev, 0) / 2); |
1274 | 1274 | ||
1275 | dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size); | 1275 | dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size); |
1276 | if (!dev_priv->gtt.gsm) { | 1276 | if (!dev_priv->gtt.gsm) { |
1277 | DRM_ERROR("Failed to map the gtt page table\n"); | 1277 | DRM_ERROR("Failed to map the gtt page table\n"); |
1278 | return -ENOMEM; | 1278 | return -ENOMEM; |
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 767ff4d839f4..570b18a113ff 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c | |||
@@ -346,7 +346,7 @@ static int mpt_remove_dead_ioc_func(void *arg) | |||
346 | if ((pdev == NULL)) | 346 | if ((pdev == NULL)) |
347 | return -1; | 347 | return -1; |
348 | 348 | ||
349 | pci_stop_and_remove_bus_device(pdev); | 349 | pci_stop_and_remove_bus_device_locked(pdev); |
350 | return 0; | 350 | return 0; |
351 | } | 351 | } |
352 | 352 | ||
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index b6a99f7a9b20..893503fa1782 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
@@ -105,9 +105,10 @@ config PCI_PASID | |||
105 | If unsure, say N. | 105 | If unsure, say N. |
106 | 106 | ||
107 | config PCI_IOAPIC | 107 | config PCI_IOAPIC |
108 | tristate "PCI IO-APIC hotplug support" if X86 | 108 | bool "PCI IO-APIC hotplug support" if X86 |
109 | depends on PCI | 109 | depends on PCI |
110 | depends on ACPI | 110 | depends on ACPI |
111 | depends on X86_IO_APIC | ||
111 | default !X86 | 112 | default !X86 |
112 | 113 | ||
113 | config PCI_LABEL | 114 | config PCI_LABEL |
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 6ebf5bf8e7a7..17d2b07ee67c 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \ | 5 | obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \ |
6 | pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ | 6 | pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ |
7 | irq.o vpd.o setup-bus.o | 7 | irq.o vpd.o setup-bus.o vc.o |
8 | obj-$(CONFIG_PROC_FS) += proc.o | 8 | obj-$(CONFIG_PROC_FS) += proc.o |
9 | obj-$(CONFIG_SYSFS) += slot.o | 9 | obj-$(CONFIG_SYSFS) += slot.o |
10 | 10 | ||
diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 0857ca981fae..7f8b78c08879 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c | |||
@@ -381,30 +381,6 @@ int pci_vpd_pci22_init(struct pci_dev *dev) | |||
381 | } | 381 | } |
382 | 382 | ||
383 | /** | 383 | /** |
384 | * pci_vpd_truncate - Set available Vital Product Data size | ||
385 | * @dev: pci device struct | ||
386 | * @size: available memory in bytes | ||
387 | * | ||
388 | * Adjust size of available VPD area. | ||
389 | */ | ||
390 | int pci_vpd_truncate(struct pci_dev *dev, size_t size) | ||
391 | { | ||
392 | if (!dev->vpd) | ||
393 | return -EINVAL; | ||
394 | |||
395 | /* limited by the access method */ | ||
396 | if (size > dev->vpd->len) | ||
397 | return -EINVAL; | ||
398 | |||
399 | dev->vpd->len = size; | ||
400 | if (dev->vpd->attr) | ||
401 | dev->vpd->attr->size = size; | ||
402 | |||
403 | return 0; | ||
404 | } | ||
405 | EXPORT_SYMBOL(pci_vpd_truncate); | ||
406 | |||
407 | /** | ||
408 | * pci_cfg_access_lock - Lock PCI config reads/writes | 384 | * pci_cfg_access_lock - Lock PCI config reads/writes |
409 | * @dev: pci device struct | 385 | * @dev: pci device struct |
410 | * | 386 | * |
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index e52d7ffa38b9..a8099d4d0c9d 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c | |||
@@ -235,27 +235,6 @@ void pci_disable_pri(struct pci_dev *pdev) | |||
235 | EXPORT_SYMBOL_GPL(pci_disable_pri); | 235 | EXPORT_SYMBOL_GPL(pci_disable_pri); |
236 | 236 | ||
237 | /** | 237 | /** |
238 | * pci_pri_enabled - Checks if PRI capability is enabled | ||
239 | * @pdev: PCI device structure | ||
240 | * | ||
241 | * Returns true if PRI is enabled on the device, false otherwise | ||
242 | */ | ||
243 | bool pci_pri_enabled(struct pci_dev *pdev) | ||
244 | { | ||
245 | u16 control; | ||
246 | int pos; | ||
247 | |||
248 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); | ||
249 | if (!pos) | ||
250 | return false; | ||
251 | |||
252 | pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); | ||
253 | |||
254 | return (control & PCI_PRI_CTRL_ENABLE) ? true : false; | ||
255 | } | ||
256 | EXPORT_SYMBOL_GPL(pci_pri_enabled); | ||
257 | |||
258 | /** | ||
259 | * pci_reset_pri - Resets device's PRI state | 238 | * pci_reset_pri - Resets device's PRI state |
260 | * @pdev: PCI device structure | 239 | * @pdev: PCI device structure |
261 | * | 240 | * |
@@ -282,67 +261,6 @@ int pci_reset_pri(struct pci_dev *pdev) | |||
282 | return 0; | 261 | return 0; |
283 | } | 262 | } |
284 | EXPORT_SYMBOL_GPL(pci_reset_pri); | 263 | EXPORT_SYMBOL_GPL(pci_reset_pri); |
285 | |||
286 | /** | ||
287 | * pci_pri_stopped - Checks whether the PRI capability is stopped | ||
288 | * @pdev: PCI device structure | ||
289 | * | ||
290 | * Returns true if the PRI capability on the device is disabled and the | ||
291 | * device has no outstanding PRI requests, false otherwise. The device | ||
292 | * indicates this via the STOPPED bit in the status register of the | ||
293 | * capability. | ||
294 | * The device internal state can be cleared by resetting the PRI state | ||
295 | * with pci_reset_pri(). This can force the capability into the STOPPED | ||
296 | * state. | ||
297 | */ | ||
298 | bool pci_pri_stopped(struct pci_dev *pdev) | ||
299 | { | ||
300 | u16 control, status; | ||
301 | int pos; | ||
302 | |||
303 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); | ||
304 | if (!pos) | ||
305 | return true; | ||
306 | |||
307 | pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); | ||
308 | pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status); | ||
309 | |||
310 | if (control & PCI_PRI_CTRL_ENABLE) | ||
311 | return false; | ||
312 | |||
313 | return (status & PCI_PRI_STATUS_STOPPED) ? true : false; | ||
314 | } | ||
315 | EXPORT_SYMBOL_GPL(pci_pri_stopped); | ||
316 | |||
317 | /** | ||
318 | * pci_pri_status - Request PRI status of a device | ||
319 | * @pdev: PCI device structure | ||
320 | * | ||
321 | * Returns negative value on failure, status on success. The status can | ||
322 | * be checked against status-bits. Supported bits are currently: | ||
323 | * PCI_PRI_STATUS_RF: Response failure | ||
324 | * PCI_PRI_STATUS_UPRGI: Unexpected Page Request Group Index | ||
325 | * PCI_PRI_STATUS_STOPPED: PRI has stopped | ||
326 | */ | ||
327 | int pci_pri_status(struct pci_dev *pdev) | ||
328 | { | ||
329 | u16 status, control; | ||
330 | int pos; | ||
331 | |||
332 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); | ||
333 | if (!pos) | ||
334 | return -EINVAL; | ||
335 | |||
336 | pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); | ||
337 | pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status); | ||
338 | |||
339 | /* Stopped bit is undefined when enable == 1, so clear it */ | ||
340 | if (control & PCI_PRI_CTRL_ENABLE) | ||
341 | status &= ~PCI_PRI_STATUS_STOPPED; | ||
342 | |||
343 | return status; | ||
344 | } | ||
345 | EXPORT_SYMBOL_GPL(pci_pri_status); | ||
346 | #endif /* CONFIG_PCI_PRI */ | 264 | #endif /* CONFIG_PCI_PRI */ |
347 | 265 | ||
348 | #ifdef CONFIG_PCI_PASID | 266 | #ifdef CONFIG_PCI_PASID |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index fc1b74013743..00660cc502c5 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
@@ -98,41 +98,54 @@ void pci_bus_remove_resources(struct pci_bus *bus) | |||
98 | } | 98 | } |
99 | } | 99 | } |
100 | 100 | ||
101 | /** | 101 | static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL}; |
102 | * pci_bus_alloc_resource - allocate a resource from a parent bus | 102 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
103 | * @bus: PCI bus | 103 | static struct pci_bus_region pci_64_bit = {0, |
104 | * @res: resource to allocate | 104 | (dma_addr_t) 0xffffffffffffffffULL}; |
105 | * @size: size of resource to allocate | 105 | static struct pci_bus_region pci_high = {(dma_addr_t) 0x100000000ULL, |
106 | * @align: alignment of resource to allocate | 106 | (dma_addr_t) 0xffffffffffffffffULL}; |
107 | * @min: minimum /proc/iomem address to allocate | 107 | #endif |
108 | * @type_mask: IORESOURCE_* type flags | 108 | |
109 | * @alignf: resource alignment function | 109 | /* |
110 | * @alignf_data: data argument for resource alignment function | 110 | * @res contains CPU addresses. Clip it so the corresponding bus addresses |
111 | * | 111 | * on @bus are entirely within @region. This is used to control the bus |
112 | * Given the PCI bus a device resides on, the size, minimum address, | 112 | * addresses of resources we allocate, e.g., we may need a resource that |
113 | * alignment and type, try to find an acceptable resource allocation | 113 | * can be mapped by a 32-bit BAR. |
114 | * for a specific device resource. | ||
115 | */ | 114 | */ |
116 | int | 115 | static void pci_clip_resource_to_region(struct pci_bus *bus, |
117 | pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, | 116 | struct resource *res, |
117 | struct pci_bus_region *region) | ||
118 | { | ||
119 | struct pci_bus_region r; | ||
120 | |||
121 | pcibios_resource_to_bus(bus, &r, res); | ||
122 | if (r.start < region->start) | ||
123 | r.start = region->start; | ||
124 | if (r.end > region->end) | ||
125 | r.end = region->end; | ||
126 | |||
127 | if (r.end < r.start) | ||
128 | res->end = res->start - 1; | ||
129 | else | ||
130 | pcibios_bus_to_resource(bus, res, &r); | ||
131 | } | ||
132 | |||
133 | static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res, | ||
118 | resource_size_t size, resource_size_t align, | 134 | resource_size_t size, resource_size_t align, |
119 | resource_size_t min, unsigned int type_mask, | 135 | resource_size_t min, unsigned int type_mask, |
120 | resource_size_t (*alignf)(void *, | 136 | resource_size_t (*alignf)(void *, |
121 | const struct resource *, | 137 | const struct resource *, |
122 | resource_size_t, | 138 | resource_size_t, |
123 | resource_size_t), | 139 | resource_size_t), |
124 | void *alignf_data) | 140 | void *alignf_data, |
141 | struct pci_bus_region *region) | ||
125 | { | 142 | { |
126 | int i, ret = -ENOMEM; | 143 | int i, ret; |
127 | struct resource *r; | 144 | struct resource *r, avail; |
128 | resource_size_t max = -1; | 145 | resource_size_t max; |
129 | 146 | ||
130 | type_mask |= IORESOURCE_IO | IORESOURCE_MEM; | 147 | type_mask |= IORESOURCE_IO | IORESOURCE_MEM; |
131 | 148 | ||
132 | /* don't allocate too high if the pref mem doesn't support 64bit*/ | ||
133 | if (!(res->flags & IORESOURCE_MEM_64)) | ||
134 | max = PCIBIOS_MAX_MEM_32; | ||
135 | |||
136 | pci_bus_for_each_resource(bus, r, i) { | 149 | pci_bus_for_each_resource(bus, r, i) { |
137 | if (!r) | 150 | if (!r) |
138 | continue; | 151 | continue; |
@@ -147,15 +160,74 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, | |||
147 | !(res->flags & IORESOURCE_PREFETCH)) | 160 | !(res->flags & IORESOURCE_PREFETCH)) |
148 | continue; | 161 | continue; |
149 | 162 | ||
163 | avail = *r; | ||
164 | pci_clip_resource_to_region(bus, &avail, region); | ||
165 | if (!resource_size(&avail)) | ||
166 | continue; | ||
167 | |||
168 | /* | ||
169 | * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to | ||
170 | * protect badly documented motherboard resources, but if | ||
171 | * this is an already-configured bridge window, its start | ||
172 | * overrides "min". | ||
173 | */ | ||
174 | if (avail.start) | ||
175 | min = avail.start; | ||
176 | |||
177 | max = avail.end; | ||
178 | |||
150 | /* Ok, try it out.. */ | 179 | /* Ok, try it out.. */ |
151 | ret = allocate_resource(r, res, size, | 180 | ret = allocate_resource(r, res, size, min, max, |
152 | r->start ? : min, | 181 | align, alignf, alignf_data); |
153 | max, align, | ||
154 | alignf, alignf_data); | ||
155 | if (ret == 0) | 182 | if (ret == 0) |
156 | break; | 183 | return 0; |
157 | } | 184 | } |
158 | return ret; | 185 | return -ENOMEM; |
186 | } | ||
187 | |||
188 | /** | ||
189 | * pci_bus_alloc_resource - allocate a resource from a parent bus | ||
190 | * @bus: PCI bus | ||
191 | * @res: resource to allocate | ||
192 | * @size: size of resource to allocate | ||
193 | * @align: alignment of resource to allocate | ||
194 | * @min: minimum /proc/iomem address to allocate | ||
195 | * @type_mask: IORESOURCE_* type flags | ||
196 | * @alignf: resource alignment function | ||
197 | * @alignf_data: data argument for resource alignment function | ||
198 | * | ||
199 | * Given the PCI bus a device resides on, the size, minimum address, | ||
200 | * alignment and type, try to find an acceptable resource allocation | ||
201 | * for a specific device resource. | ||
202 | */ | ||
203 | int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, | ||
204 | resource_size_t size, resource_size_t align, | ||
205 | resource_size_t min, unsigned int type_mask, | ||
206 | resource_size_t (*alignf)(void *, | ||
207 | const struct resource *, | ||
208 | resource_size_t, | ||
209 | resource_size_t), | ||
210 | void *alignf_data) | ||
211 | { | ||
212 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
213 | int rc; | ||
214 | |||
215 | if (res->flags & IORESOURCE_MEM_64) { | ||
216 | rc = pci_bus_alloc_from_region(bus, res, size, align, min, | ||
217 | type_mask, alignf, alignf_data, | ||
218 | &pci_high); | ||
219 | if (rc == 0) | ||
220 | return 0; | ||
221 | |||
222 | return pci_bus_alloc_from_region(bus, res, size, align, min, | ||
223 | type_mask, alignf, alignf_data, | ||
224 | &pci_64_bit); | ||
225 | } | ||
226 | #endif | ||
227 | |||
228 | return pci_bus_alloc_from_region(bus, res, size, align, min, | ||
229 | type_mask, alignf, alignf_data, | ||
230 | &pci_32_bit); | ||
159 | } | 231 | } |
160 | 232 | ||
161 | void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { } | 233 | void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { } |
@@ -176,6 +248,7 @@ int pci_bus_add_device(struct pci_dev *dev) | |||
176 | */ | 248 | */ |
177 | pci_fixup_device(pci_fixup_final, dev); | 249 | pci_fixup_device(pci_fixup_final, dev); |
178 | pci_create_sysfs_dev_files(dev); | 250 | pci_create_sysfs_dev_files(dev); |
251 | pci_proc_attach_device(dev); | ||
179 | 252 | ||
180 | dev->match_driver = true; | 253 | dev->match_driver = true; |
181 | retval = device_attach(&dev->dev); | 254 | retval = device_attach(&dev->dev); |
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c index a68dc613a5be..06ace6248c61 100644 --- a/drivers/pci/host-bridge.c +++ b/drivers/pci/host-bridge.c | |||
@@ -9,22 +9,19 @@ | |||
9 | 9 | ||
10 | #include "pci.h" | 10 | #include "pci.h" |
11 | 11 | ||
12 | static struct pci_bus *find_pci_root_bus(struct pci_dev *dev) | 12 | static struct pci_bus *find_pci_root_bus(struct pci_bus *bus) |
13 | { | 13 | { |
14 | struct pci_bus *bus; | ||
15 | |||
16 | bus = dev->bus; | ||
17 | while (bus->parent) | 14 | while (bus->parent) |
18 | bus = bus->parent; | 15 | bus = bus->parent; |
19 | 16 | ||
20 | return bus; | 17 | return bus; |
21 | } | 18 | } |
22 | 19 | ||
23 | static struct pci_host_bridge *find_pci_host_bridge(struct pci_dev *dev) | 20 | static struct pci_host_bridge *find_pci_host_bridge(struct pci_bus *bus) |
24 | { | 21 | { |
25 | struct pci_bus *bus = find_pci_root_bus(dev); | 22 | struct pci_bus *root_bus = find_pci_root_bus(bus); |
26 | 23 | ||
27 | return to_pci_host_bridge(bus->bridge); | 24 | return to_pci_host_bridge(root_bus->bridge); |
28 | } | 25 | } |
29 | 26 | ||
30 | void pci_set_host_bridge_release(struct pci_host_bridge *bridge, | 27 | void pci_set_host_bridge_release(struct pci_host_bridge *bridge, |
@@ -40,10 +37,10 @@ static bool resource_contains(struct resource *res1, struct resource *res2) | |||
40 | return res1->start <= res2->start && res1->end >= res2->end; | 37 | return res1->start <= res2->start && res1->end >= res2->end; |
41 | } | 38 | } |
42 | 39 | ||
43 | void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, | 40 | void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region, |
44 | struct resource *res) | 41 | struct resource *res) |
45 | { | 42 | { |
46 | struct pci_host_bridge *bridge = find_pci_host_bridge(dev); | 43 | struct pci_host_bridge *bridge = find_pci_host_bridge(bus); |
47 | struct pci_host_bridge_window *window; | 44 | struct pci_host_bridge_window *window; |
48 | resource_size_t offset = 0; | 45 | resource_size_t offset = 0; |
49 | 46 | ||
@@ -68,10 +65,10 @@ static bool region_contains(struct pci_bus_region *region1, | |||
68 | return region1->start <= region2->start && region1->end >= region2->end; | 65 | return region1->start <= region2->start && region1->end >= region2->end; |
69 | } | 66 | } |
70 | 67 | ||
71 | void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, | 68 | void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res, |
72 | struct pci_bus_region *region) | 69 | struct pci_bus_region *region) |
73 | { | 70 | { |
74 | struct pci_host_bridge *bridge = find_pci_host_bridge(dev); | 71 | struct pci_host_bridge *bridge = find_pci_host_bridge(bus); |
75 | struct pci_host_bridge_window *window; | 72 | struct pci_host_bridge_window *window; |
76 | resource_size_t offset = 0; | 73 | resource_size_t offset = 0; |
77 | 74 | ||
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c index 24beed38ddc7..3de6bfbbe8e9 100644 --- a/drivers/pci/host/pci-exynos.c +++ b/drivers/pci/host/pci-exynos.c | |||
@@ -468,7 +468,7 @@ static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, | |||
468 | int ret; | 468 | int ret; |
469 | 469 | ||
470 | exynos_pcie_sideband_dbi_r_mode(pp, true); | 470 | exynos_pcie_sideband_dbi_r_mode(pp, true); |
471 | ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val); | 471 | ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where, size, val); |
472 | exynos_pcie_sideband_dbi_r_mode(pp, false); | 472 | exynos_pcie_sideband_dbi_r_mode(pp, false); |
473 | return ret; | 473 | return ret; |
474 | } | 474 | } |
@@ -479,7 +479,8 @@ static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, | |||
479 | int ret; | 479 | int ret; |
480 | 480 | ||
481 | exynos_pcie_sideband_dbi_w_mode(pp, true); | 481 | exynos_pcie_sideband_dbi_w_mode(pp, true); |
482 | ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, val); | 482 | ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), |
483 | where, size, val); | ||
483 | exynos_pcie_sideband_dbi_w_mode(pp, false); | 484 | exynos_pcie_sideband_dbi_w_mode(pp, false); |
484 | return ret; | 485 | return ret; |
485 | } | 486 | } |
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c index bd70af8f31ac..e8663a8c3406 100644 --- a/drivers/pci/host/pci-imx6.c +++ b/drivers/pci/host/pci-imx6.c | |||
@@ -44,10 +44,18 @@ struct imx6_pcie { | |||
44 | void __iomem *mem_base; | 44 | void __iomem *mem_base; |
45 | }; | 45 | }; |
46 | 46 | ||
47 | /* PCIe Root Complex registers (memory-mapped) */ | ||
48 | #define PCIE_RC_LCR 0x7c | ||
49 | #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1 | ||
50 | #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2 | ||
51 | #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf | ||
52 | |||
47 | /* PCIe Port Logic registers (memory-mapped) */ | 53 | /* PCIe Port Logic registers (memory-mapped) */ |
48 | #define PL_OFFSET 0x700 | 54 | #define PL_OFFSET 0x700 |
49 | #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) | 55 | #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) |
50 | #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) | 56 | #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) |
57 | #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29) | ||
58 | #define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4) | ||
51 | 59 | ||
52 | #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) | 60 | #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) |
53 | #define PCIE_PHY_CTRL_DATA_LOC 0 | 61 | #define PCIE_PHY_CTRL_DATA_LOC 0 |
@@ -59,6 +67,9 @@ struct imx6_pcie { | |||
59 | #define PCIE_PHY_STAT (PL_OFFSET + 0x110) | 67 | #define PCIE_PHY_STAT (PL_OFFSET + 0x110) |
60 | #define PCIE_PHY_STAT_ACK_LOC 16 | 68 | #define PCIE_PHY_STAT_ACK_LOC 16 |
61 | 69 | ||
70 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C | ||
71 | #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) | ||
72 | |||
62 | /* PHY registers (not memory-mapped) */ | 73 | /* PHY registers (not memory-mapped) */ |
63 | #define PCIE_PHY_RX_ASIC_OUT 0x100D | 74 | #define PCIE_PHY_RX_ASIC_OUT 0x100D |
64 | 75 | ||
@@ -209,15 +220,9 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp) | |||
209 | 220 | ||
210 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | 221 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, |
211 | IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); | 222 | IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); |
212 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
213 | IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); | ||
214 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | 223 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, |
215 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); | 224 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); |
216 | 225 | ||
217 | gpio_set_value(imx6_pcie->reset_gpio, 0); | ||
218 | msleep(100); | ||
219 | gpio_set_value(imx6_pcie->reset_gpio, 1); | ||
220 | |||
221 | return 0; | 226 | return 0; |
222 | } | 227 | } |
223 | 228 | ||
@@ -261,6 +266,12 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp) | |||
261 | /* allow the clocks to stabilize */ | 266 | /* allow the clocks to stabilize */ |
262 | usleep_range(200, 500); | 267 | usleep_range(200, 500); |
263 | 268 | ||
269 | /* Some boards don't have PCIe reset GPIO. */ | ||
270 | if (gpio_is_valid(imx6_pcie->reset_gpio)) { | ||
271 | gpio_set_value(imx6_pcie->reset_gpio, 0); | ||
272 | msleep(100); | ||
273 | gpio_set_value(imx6_pcie->reset_gpio, 1); | ||
274 | } | ||
264 | return 0; | 275 | return 0; |
265 | 276 | ||
266 | err_pcie_axi: | 277 | err_pcie_axi: |
@@ -299,11 +310,90 @@ static void imx6_pcie_init_phy(struct pcie_port *pp) | |||
299 | IMX6Q_GPR8_TX_SWING_LOW, 127 << 25); | 310 | IMX6Q_GPR8_TX_SWING_LOW, 127 << 25); |
300 | } | 311 | } |
301 | 312 | ||
302 | static void imx6_pcie_host_init(struct pcie_port *pp) | 313 | static int imx6_pcie_wait_for_link(struct pcie_port *pp) |
314 | { | ||
315 | int count = 200; | ||
316 | |||
317 | while (!dw_pcie_link_up(pp)) { | ||
318 | usleep_range(100, 1000); | ||
319 | if (--count) | ||
320 | continue; | ||
321 | |||
322 | dev_err(pp->dev, "phy link never came up\n"); | ||
323 | dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", | ||
324 | readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), | ||
325 | readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); | ||
326 | return -EINVAL; | ||
327 | } | ||
328 | |||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | static int imx6_pcie_start_link(struct pcie_port *pp) | ||
303 | { | 333 | { |
304 | int count = 0; | ||
305 | struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); | 334 | struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); |
335 | uint32_t tmp; | ||
336 | int ret, count; | ||
306 | 337 | ||
338 | /* | ||
339 | * Force Gen1 operation when starting the link. In case the link is | ||
340 | * started in Gen2 mode, there is a possibility the devices on the | ||
341 | * bus will not be detected at all. This happens with PCIe switches. | ||
342 | */ | ||
343 | tmp = readl(pp->dbi_base + PCIE_RC_LCR); | ||
344 | tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; | ||
345 | tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1; | ||
346 | writel(tmp, pp->dbi_base + PCIE_RC_LCR); | ||
347 | |||
348 | /* Start LTSSM. */ | ||
349 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
350 | IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); | ||
351 | |||
352 | ret = imx6_pcie_wait_for_link(pp); | ||
353 | if (ret) | ||
354 | return ret; | ||
355 | |||
356 | /* Allow Gen2 mode after the link is up. */ | ||
357 | tmp = readl(pp->dbi_base + PCIE_RC_LCR); | ||
358 | tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; | ||
359 | tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; | ||
360 | writel(tmp, pp->dbi_base + PCIE_RC_LCR); | ||
361 | |||
362 | /* | ||
363 | * Start Directed Speed Change so the best possible speed both link | ||
364 | * partners support can be negotiated. | ||
365 | */ | ||
366 | tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); | ||
367 | tmp |= PORT_LOGIC_SPEED_CHANGE; | ||
368 | writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); | ||
369 | |||
370 | count = 200; | ||
371 | while (count--) { | ||
372 | tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); | ||
373 | /* Test if the speed change finished. */ | ||
374 | if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) | ||
375 | break; | ||
376 | usleep_range(100, 1000); | ||
377 | } | ||
378 | |||
379 | /* Make sure link training is finished as well! */ | ||
380 | if (count) | ||
381 | ret = imx6_pcie_wait_for_link(pp); | ||
382 | else | ||
383 | ret = -EINVAL; | ||
384 | |||
385 | if (ret) { | ||
386 | dev_err(pp->dev, "Failed to bring link up!\n"); | ||
387 | } else { | ||
388 | tmp = readl(pp->dbi_base + 0x80); | ||
389 | dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf); | ||
390 | } | ||
391 | |||
392 | return ret; | ||
393 | } | ||
394 | |||
395 | static void imx6_pcie_host_init(struct pcie_port *pp) | ||
396 | { | ||
307 | imx6_pcie_assert_core_reset(pp); | 397 | imx6_pcie_assert_core_reset(pp); |
308 | 398 | ||
309 | imx6_pcie_init_phy(pp); | 399 | imx6_pcie_init_phy(pp); |
@@ -312,33 +402,41 @@ static void imx6_pcie_host_init(struct pcie_port *pp) | |||
312 | 402 | ||
313 | dw_pcie_setup_rc(pp); | 403 | dw_pcie_setup_rc(pp); |
314 | 404 | ||
315 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | 405 | imx6_pcie_start_link(pp); |
316 | IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); | 406 | } |
317 | 407 | ||
318 | while (!dw_pcie_link_up(pp)) { | 408 | static void imx6_pcie_reset_phy(struct pcie_port *pp) |
319 | usleep_range(100, 1000); | 409 | { |
320 | count++; | 410 | uint32_t temp; |
321 | if (count >= 200) { | 411 | |
322 | dev_err(pp->dev, "phy link never came up\n"); | 412 | pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp); |
323 | dev_dbg(pp->dev, | 413 | temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | |
324 | "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", | 414 | PHY_RX_OVRD_IN_LO_RX_PLL_EN); |
325 | readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), | 415 | pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp); |
326 | readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); | 416 | |
327 | break; | 417 | usleep_range(2000, 3000); |
328 | } | ||
329 | } | ||
330 | 418 | ||
331 | return; | 419 | pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp); |
420 | temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | | ||
421 | PHY_RX_OVRD_IN_LO_RX_PLL_EN); | ||
422 | pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp); | ||
332 | } | 423 | } |
333 | 424 | ||
334 | static int imx6_pcie_link_up(struct pcie_port *pp) | 425 | static int imx6_pcie_link_up(struct pcie_port *pp) |
335 | { | 426 | { |
336 | u32 rc, ltssm, rx_valid, temp; | 427 | u32 rc, ltssm, rx_valid; |
337 | 428 | ||
338 | /* link is debug bit 36, debug register 1 starts at bit 32 */ | 429 | /* |
339 | rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32)); | 430 | * Test if the PHY reports that the link is up and also that |
340 | if (rc) | 431 | * the link training finished. It might happen that the PHY |
341 | return -EAGAIN; | 432 | * reports the link is already up, but the link training bit |
433 | * is still set, so make sure to check the training is done | ||
434 | * as well here. | ||
435 | */ | ||
436 | rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1); | ||
437 | if ((rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP) && | ||
438 | !(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING)) | ||
439 | return 1; | ||
342 | 440 | ||
343 | /* | 441 | /* |
344 | * From L0, initiate MAC entry to gen2 if EP/RC supports gen2. | 442 | * From L0, initiate MAC entry to gen2 if EP/RC supports gen2. |
@@ -358,21 +456,7 @@ static int imx6_pcie_link_up(struct pcie_port *pp) | |||
358 | 456 | ||
359 | dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n"); | 457 | dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n"); |
360 | 458 | ||
361 | pcie_phy_read(pp->dbi_base, | 459 | imx6_pcie_reset_phy(pp); |
362 | PHY_RX_OVRD_IN_LO, &temp); | ||
363 | temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | ||
364 | | PHY_RX_OVRD_IN_LO_RX_PLL_EN); | ||
365 | pcie_phy_write(pp->dbi_base, | ||
366 | PHY_RX_OVRD_IN_LO, temp); | ||
367 | |||
368 | usleep_range(2000, 3000); | ||
369 | |||
370 | pcie_phy_read(pp->dbi_base, | ||
371 | PHY_RX_OVRD_IN_LO, &temp); | ||
372 | temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | ||
373 | | PHY_RX_OVRD_IN_LO_RX_PLL_EN); | ||
374 | pcie_phy_write(pp->dbi_base, | ||
375 | PHY_RX_OVRD_IN_LO, temp); | ||
376 | 460 | ||
377 | return 0; | 461 | return 0; |
378 | } | 462 | } |
@@ -426,30 +510,19 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
426 | "imprecise external abort"); | 510 | "imprecise external abort"); |
427 | 511 | ||
428 | dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 512 | dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
429 | if (!dbi_base) { | ||
430 | dev_err(&pdev->dev, "dbi_base memory resource not found\n"); | ||
431 | return -ENODEV; | ||
432 | } | ||
433 | |||
434 | pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base); | 513 | pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base); |
435 | if (IS_ERR(pp->dbi_base)) { | 514 | if (IS_ERR(pp->dbi_base)) |
436 | ret = PTR_ERR(pp->dbi_base); | 515 | return PTR_ERR(pp->dbi_base); |
437 | goto err; | ||
438 | } | ||
439 | 516 | ||
440 | /* Fetch GPIOs */ | 517 | /* Fetch GPIOs */ |
441 | imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); | 518 | imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); |
442 | if (!gpio_is_valid(imx6_pcie->reset_gpio)) { | 519 | if (gpio_is_valid(imx6_pcie->reset_gpio)) { |
443 | dev_err(&pdev->dev, "no reset-gpio defined\n"); | 520 | ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio, |
444 | ret = -ENODEV; | 521 | GPIOF_OUT_INIT_LOW, "PCIe reset"); |
445 | } | 522 | if (ret) { |
446 | ret = devm_gpio_request_one(&pdev->dev, | 523 | dev_err(&pdev->dev, "unable to get reset gpio\n"); |
447 | imx6_pcie->reset_gpio, | 524 | return ret; |
448 | GPIOF_OUT_INIT_LOW, | 525 | } |
449 | "PCIe reset"); | ||
450 | if (ret) { | ||
451 | dev_err(&pdev->dev, "unable to get reset gpio\n"); | ||
452 | goto err; | ||
453 | } | 526 | } |
454 | 527 | ||
455 | imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0); | 528 | imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0); |
@@ -460,7 +533,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
460 | "PCIe power enable"); | 533 | "PCIe power enable"); |
461 | if (ret) { | 534 | if (ret) { |
462 | dev_err(&pdev->dev, "unable to get power-on gpio\n"); | 535 | dev_err(&pdev->dev, "unable to get power-on gpio\n"); |
463 | goto err; | 536 | return ret; |
464 | } | 537 | } |
465 | } | 538 | } |
466 | 539 | ||
@@ -472,7 +545,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
472 | "PCIe wake up"); | 545 | "PCIe wake up"); |
473 | if (ret) { | 546 | if (ret) { |
474 | dev_err(&pdev->dev, "unable to get wake-up gpio\n"); | 547 | dev_err(&pdev->dev, "unable to get wake-up gpio\n"); |
475 | goto err; | 548 | return ret; |
476 | } | 549 | } |
477 | } | 550 | } |
478 | 551 | ||
@@ -484,7 +557,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
484 | "PCIe disable endpoint"); | 557 | "PCIe disable endpoint"); |
485 | if (ret) { | 558 | if (ret) { |
486 | dev_err(&pdev->dev, "unable to get disable-ep gpio\n"); | 559 | dev_err(&pdev->dev, "unable to get disable-ep gpio\n"); |
487 | goto err; | 560 | return ret; |
488 | } | 561 | } |
489 | } | 562 | } |
490 | 563 | ||
@@ -493,32 +566,28 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
493 | if (IS_ERR(imx6_pcie->lvds_gate)) { | 566 | if (IS_ERR(imx6_pcie->lvds_gate)) { |
494 | dev_err(&pdev->dev, | 567 | dev_err(&pdev->dev, |
495 | "lvds_gate clock select missing or invalid\n"); | 568 | "lvds_gate clock select missing or invalid\n"); |
496 | ret = PTR_ERR(imx6_pcie->lvds_gate); | 569 | return PTR_ERR(imx6_pcie->lvds_gate); |
497 | goto err; | ||
498 | } | 570 | } |
499 | 571 | ||
500 | imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m"); | 572 | imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m"); |
501 | if (IS_ERR(imx6_pcie->sata_ref_100m)) { | 573 | if (IS_ERR(imx6_pcie->sata_ref_100m)) { |
502 | dev_err(&pdev->dev, | 574 | dev_err(&pdev->dev, |
503 | "sata_ref_100m clock source missing or invalid\n"); | 575 | "sata_ref_100m clock source missing or invalid\n"); |
504 | ret = PTR_ERR(imx6_pcie->sata_ref_100m); | 576 | return PTR_ERR(imx6_pcie->sata_ref_100m); |
505 | goto err; | ||
506 | } | 577 | } |
507 | 578 | ||
508 | imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m"); | 579 | imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m"); |
509 | if (IS_ERR(imx6_pcie->pcie_ref_125m)) { | 580 | if (IS_ERR(imx6_pcie->pcie_ref_125m)) { |
510 | dev_err(&pdev->dev, | 581 | dev_err(&pdev->dev, |
511 | "pcie_ref_125m clock source missing or invalid\n"); | 582 | "pcie_ref_125m clock source missing or invalid\n"); |
512 | ret = PTR_ERR(imx6_pcie->pcie_ref_125m); | 583 | return PTR_ERR(imx6_pcie->pcie_ref_125m); |
513 | goto err; | ||
514 | } | 584 | } |
515 | 585 | ||
516 | imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi"); | 586 | imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi"); |
517 | if (IS_ERR(imx6_pcie->pcie_axi)) { | 587 | if (IS_ERR(imx6_pcie->pcie_axi)) { |
518 | dev_err(&pdev->dev, | 588 | dev_err(&pdev->dev, |
519 | "pcie_axi clock source missing or invalid\n"); | 589 | "pcie_axi clock source missing or invalid\n"); |
520 | ret = PTR_ERR(imx6_pcie->pcie_axi); | 590 | return PTR_ERR(imx6_pcie->pcie_axi); |
521 | goto err; | ||
522 | } | 591 | } |
523 | 592 | ||
524 | /* Grab GPR config register range */ | 593 | /* Grab GPR config register range */ |
@@ -526,19 +595,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
526 | syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); | 595 | syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); |
527 | if (IS_ERR(imx6_pcie->iomuxc_gpr)) { | 596 | if (IS_ERR(imx6_pcie->iomuxc_gpr)) { |
528 | dev_err(&pdev->dev, "unable to find iomuxc registers\n"); | 597 | dev_err(&pdev->dev, "unable to find iomuxc registers\n"); |
529 | ret = PTR_ERR(imx6_pcie->iomuxc_gpr); | 598 | return PTR_ERR(imx6_pcie->iomuxc_gpr); |
530 | goto err; | ||
531 | } | 599 | } |
532 | 600 | ||
533 | ret = imx6_add_pcie_port(pp, pdev); | 601 | ret = imx6_add_pcie_port(pp, pdev); |
534 | if (ret < 0) | 602 | if (ret < 0) |
535 | goto err; | 603 | return ret; |
536 | 604 | ||
537 | platform_set_drvdata(pdev, imx6_pcie); | 605 | platform_set_drvdata(pdev, imx6_pcie); |
538 | return 0; | 606 | return 0; |
539 | |||
540 | err: | ||
541 | return ret; | ||
542 | } | 607 | } |
543 | 608 | ||
544 | static const struct of_device_id imx6_pcie_of_match[] = { | 609 | static const struct of_device_id imx6_pcie_of_match[] = { |
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index 2aa7b77c7c88..13478ecd4113 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c | |||
@@ -150,6 +150,11 @@ static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg) | |||
150 | return readl(port->base + reg); | 150 | return readl(port->base + reg); |
151 | } | 151 | } |
152 | 152 | ||
153 | static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port) | ||
154 | { | ||
155 | return port->io_target != -1 && port->io_attr != -1; | ||
156 | } | ||
157 | |||
153 | static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) | 158 | static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) |
154 | { | 159 | { |
155 | return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); | 160 | return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); |
@@ -300,7 +305,8 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) | |||
300 | 305 | ||
301 | /* Are the new iobase/iolimit values invalid? */ | 306 | /* Are the new iobase/iolimit values invalid? */ |
302 | if (port->bridge.iolimit < port->bridge.iobase || | 307 | if (port->bridge.iolimit < port->bridge.iobase || |
303 | port->bridge.iolimitupper < port->bridge.iobaseupper) { | 308 | port->bridge.iolimitupper < port->bridge.iobaseupper || |
309 | !(port->bridge.command & PCI_COMMAND_IO)) { | ||
304 | 310 | ||
305 | /* If a window was configured, remove it */ | 311 | /* If a window was configured, remove it */ |
306 | if (port->iowin_base) { | 312 | if (port->iowin_base) { |
@@ -313,6 +319,12 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) | |||
313 | return; | 319 | return; |
314 | } | 320 | } |
315 | 321 | ||
322 | if (!mvebu_has_ioport(port)) { | ||
323 | dev_WARN(&port->pcie->pdev->dev, | ||
324 | "Attempt to set IO when IO is disabled\n"); | ||
325 | return; | ||
326 | } | ||
327 | |||
316 | /* | 328 | /* |
317 | * We read the PCI-to-PCI bridge emulated registers, and | 329 | * We read the PCI-to-PCI bridge emulated registers, and |
318 | * calculate the base address and size of the address decoding | 330 | * calculate the base address and size of the address decoding |
@@ -330,14 +342,13 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) | |||
330 | mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr, | 342 | mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr, |
331 | port->iowin_base, port->iowin_size, | 343 | port->iowin_base, port->iowin_size, |
332 | iobase); | 344 | iobase); |
333 | |||
334 | pci_ioremap_io(iobase, port->iowin_base); | ||
335 | } | 345 | } |
336 | 346 | ||
337 | static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) | 347 | static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) |
338 | { | 348 | { |
339 | /* Are the new membase/memlimit values invalid? */ | 349 | /* Are the new membase/memlimit values invalid? */ |
340 | if (port->bridge.memlimit < port->bridge.membase) { | 350 | if (port->bridge.memlimit < port->bridge.membase || |
351 | !(port->bridge.command & PCI_COMMAND_MEMORY)) { | ||
341 | 352 | ||
342 | /* If a window was configured, remove it */ | 353 | /* If a window was configured, remove it */ |
343 | if (port->memwin_base) { | 354 | if (port->memwin_base) { |
@@ -426,9 +437,12 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port, | |||
426 | break; | 437 | break; |
427 | 438 | ||
428 | case PCI_IO_BASE: | 439 | case PCI_IO_BASE: |
429 | *value = (bridge->secondary_status << 16 | | 440 | if (!mvebu_has_ioport(port)) |
430 | bridge->iolimit << 8 | | 441 | *value = bridge->secondary_status << 16; |
431 | bridge->iobase); | 442 | else |
443 | *value = (bridge->secondary_status << 16 | | ||
444 | bridge->iolimit << 8 | | ||
445 | bridge->iobase); | ||
432 | break; | 446 | break; |
433 | 447 | ||
434 | case PCI_MEMORY_BASE: | 448 | case PCI_MEMORY_BASE: |
@@ -490,8 +504,19 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port, | |||
490 | 504 | ||
491 | switch (where & ~3) { | 505 | switch (where & ~3) { |
492 | case PCI_COMMAND: | 506 | case PCI_COMMAND: |
507 | { | ||
508 | u32 old = bridge->command; | ||
509 | |||
510 | if (!mvebu_has_ioport(port)) | ||
511 | value &= ~PCI_COMMAND_IO; | ||
512 | |||
493 | bridge->command = value & 0xffff; | 513 | bridge->command = value & 0xffff; |
514 | if ((old ^ bridge->command) & PCI_COMMAND_IO) | ||
515 | mvebu_pcie_handle_iobase_change(port); | ||
516 | if ((old ^ bridge->command) & PCI_COMMAND_MEMORY) | ||
517 | mvebu_pcie_handle_membase_change(port); | ||
494 | break; | 518 | break; |
519 | } | ||
495 | 520 | ||
496 | case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1: | 521 | case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1: |
497 | bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value; | 522 | bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value; |
@@ -505,7 +530,6 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port, | |||
505 | */ | 530 | */ |
506 | bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32; | 531 | bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32; |
507 | bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32; | 532 | bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32; |
508 | bridge->secondary_status = value >> 16; | ||
509 | mvebu_pcie_handle_iobase_change(port); | 533 | mvebu_pcie_handle_iobase_change(port); |
510 | break; | 534 | break; |
511 | 535 | ||
@@ -656,7 +680,9 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys) | |||
656 | struct mvebu_pcie *pcie = sys_to_pcie(sys); | 680 | struct mvebu_pcie *pcie = sys_to_pcie(sys); |
657 | int i; | 681 | int i; |
658 | 682 | ||
659 | pci_add_resource_offset(&sys->resources, &pcie->realio, sys->io_offset); | 683 | if (resource_size(&pcie->realio) != 0) |
684 | pci_add_resource_offset(&sys->resources, &pcie->realio, | ||
685 | sys->io_offset); | ||
660 | pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); | 686 | pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); |
661 | pci_add_resource(&sys->resources, &pcie->busn); | 687 | pci_add_resource(&sys->resources, &pcie->busn); |
662 | 688 | ||
@@ -707,9 +733,9 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, | |||
707 | * aligned on their size | 733 | * aligned on their size |
708 | */ | 734 | */ |
709 | if (res->flags & IORESOURCE_IO) | 735 | if (res->flags & IORESOURCE_IO) |
710 | return round_up(start, max((resource_size_t)SZ_64K, size)); | 736 | return round_up(start, max_t(resource_size_t, SZ_64K, size)); |
711 | else if (res->flags & IORESOURCE_MEM) | 737 | else if (res->flags & IORESOURCE_MEM) |
712 | return round_up(start, max((resource_size_t)SZ_1M, size)); | 738 | return round_up(start, max_t(resource_size_t, SZ_1M, size)); |
713 | else | 739 | else |
714 | return start; | 740 | return start; |
715 | } | 741 | } |
@@ -757,12 +783,17 @@ static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev, | |||
757 | #define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF) | 783 | #define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF) |
758 | 784 | ||
759 | static int mvebu_get_tgt_attr(struct device_node *np, int devfn, | 785 | static int mvebu_get_tgt_attr(struct device_node *np, int devfn, |
760 | unsigned long type, int *tgt, int *attr) | 786 | unsigned long type, |
787 | unsigned int *tgt, | ||
788 | unsigned int *attr) | ||
761 | { | 789 | { |
762 | const int na = 3, ns = 2; | 790 | const int na = 3, ns = 2; |
763 | const __be32 *range; | 791 | const __be32 *range; |
764 | int rlen, nranges, rangesz, pna, i; | 792 | int rlen, nranges, rangesz, pna, i; |
765 | 793 | ||
794 | *tgt = -1; | ||
795 | *attr = -1; | ||
796 | |||
766 | range = of_get_property(np, "ranges", &rlen); | 797 | range = of_get_property(np, "ranges", &rlen); |
767 | if (!range) | 798 | if (!range) |
768 | return -EINVAL; | 799 | return -EINVAL; |
@@ -832,16 +863,15 @@ static int mvebu_pcie_probe(struct platform_device *pdev) | |||
832 | } | 863 | } |
833 | 864 | ||
834 | mvebu_mbus_get_pcie_io_aperture(&pcie->io); | 865 | mvebu_mbus_get_pcie_io_aperture(&pcie->io); |
835 | if (resource_size(&pcie->io) == 0) { | ||
836 | dev_err(&pdev->dev, "invalid I/O aperture size\n"); | ||
837 | return -EINVAL; | ||
838 | } | ||
839 | 866 | ||
840 | pcie->realio.flags = pcie->io.flags; | 867 | if (resource_size(&pcie->io) != 0) { |
841 | pcie->realio.start = PCIBIOS_MIN_IO; | 868 | pcie->realio.flags = pcie->io.flags; |
842 | pcie->realio.end = min_t(resource_size_t, | 869 | pcie->realio.start = PCIBIOS_MIN_IO; |
843 | IO_SPACE_LIMIT, | 870 | pcie->realio.end = min_t(resource_size_t, |
844 | resource_size(&pcie->io)); | 871 | IO_SPACE_LIMIT, |
872 | resource_size(&pcie->io)); | ||
873 | } else | ||
874 | pcie->realio = pcie->io; | ||
845 | 875 | ||
846 | /* Get the bus range */ | 876 | /* Get the bus range */ |
847 | ret = of_pci_parse_bus_range(np, &pcie->busn); | 877 | ret = of_pci_parse_bus_range(np, &pcie->busn); |
@@ -900,12 +930,12 @@ static int mvebu_pcie_probe(struct platform_device *pdev) | |||
900 | continue; | 930 | continue; |
901 | } | 931 | } |
902 | 932 | ||
903 | ret = mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_IO, | 933 | if (resource_size(&pcie->io) != 0) |
904 | &port->io_target, &port->io_attr); | 934 | mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_IO, |
905 | if (ret < 0) { | 935 | &port->io_target, &port->io_attr); |
906 | dev_err(&pdev->dev, "PCIe%d.%d: cannot get tgt/attr for io window\n", | 936 | else { |
907 | port->port, port->lane); | 937 | port->io_target = -1; |
908 | continue; | 938 | port->io_attr = -1; |
909 | } | 939 | } |
910 | 940 | ||
911 | port->reset_gpio = of_get_named_gpio_flags(child, | 941 | port->reset_gpio = of_get_named_gpio_flags(child, |
@@ -954,14 +984,6 @@ static int mvebu_pcie_probe(struct platform_device *pdev) | |||
954 | 984 | ||
955 | mvebu_pcie_set_local_dev_nr(port, 1); | 985 | mvebu_pcie_set_local_dev_nr(port, 1); |
956 | 986 | ||
957 | port->clk = of_clk_get_by_name(child, NULL); | ||
958 | if (IS_ERR(port->clk)) { | ||
959 | dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n", | ||
960 | port->port, port->lane); | ||
961 | iounmap(port->base); | ||
962 | continue; | ||
963 | } | ||
964 | |||
965 | port->dn = child; | 987 | port->dn = child; |
966 | spin_lock_init(&port->conf_lock); | 988 | spin_lock_init(&port->conf_lock); |
967 | mvebu_sw_pci_bridge_init(port); | 989 | mvebu_sw_pci_bridge_init(port); |
@@ -969,6 +991,10 @@ static int mvebu_pcie_probe(struct platform_device *pdev) | |||
969 | } | 991 | } |
970 | 992 | ||
971 | pcie->nports = i; | 993 | pcie->nports = i; |
994 | |||
995 | for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K) | ||
996 | pci_ioremap_io(i, pcie->io.start + i); | ||
997 | |||
972 | mvebu_pcie_msi_enable(pcie); | 998 | mvebu_pcie_msi_enable(pcie); |
973 | mvebu_pcie_enable(pcie); | 999 | mvebu_pcie_enable(pcie); |
974 | 1000 | ||
@@ -988,8 +1014,7 @@ static struct platform_driver mvebu_pcie_driver = { | |||
988 | .driver = { | 1014 | .driver = { |
989 | .owner = THIS_MODULE, | 1015 | .owner = THIS_MODULE, |
990 | .name = "mvebu-pcie", | 1016 | .name = "mvebu-pcie", |
991 | .of_match_table = | 1017 | .of_match_table = mvebu_pcie_of_match_table, |
992 | of_match_ptr(mvebu_pcie_of_match_table), | ||
993 | /* driver unloading/unbinding currently not supported */ | 1018 | /* driver unloading/unbinding currently not supported */ |
994 | .suppress_bind_attrs = true, | 1019 | .suppress_bind_attrs = true, |
995 | }, | 1020 | }, |
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c index cbaa5c4397e3..ceec147baec3 100644 --- a/drivers/pci/host/pci-rcar-gen2.c +++ b/drivers/pci/host/pci-rcar-gen2.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/pci.h> | 18 | #include <linux/pci.h> |
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/pm_runtime.h> | ||
20 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
21 | 22 | ||
22 | /* AHB-PCI Bridge PCI communication registers */ | 23 | /* AHB-PCI Bridge PCI communication registers */ |
@@ -77,6 +78,7 @@ | |||
77 | #define RCAR_PCI_NR_CONTROLLERS 3 | 78 | #define RCAR_PCI_NR_CONTROLLERS 3 |
78 | 79 | ||
79 | struct rcar_pci_priv { | 80 | struct rcar_pci_priv { |
81 | struct device *dev; | ||
80 | void __iomem *reg; | 82 | void __iomem *reg; |
81 | struct resource io_res; | 83 | struct resource io_res; |
82 | struct resource mem_res; | 84 | struct resource mem_res; |
@@ -169,8 +171,11 @@ static int __init rcar_pci_setup(int nr, struct pci_sys_data *sys) | |||
169 | void __iomem *reg = priv->reg; | 171 | void __iomem *reg = priv->reg; |
170 | u32 val; | 172 | u32 val; |
171 | 173 | ||
174 | pm_runtime_enable(priv->dev); | ||
175 | pm_runtime_get_sync(priv->dev); | ||
176 | |||
172 | val = ioread32(reg + RCAR_PCI_UNIT_REV_REG); | 177 | val = ioread32(reg + RCAR_PCI_UNIT_REV_REG); |
173 | pr_info("PCI: bus%u revision %x\n", sys->busnr, val); | 178 | dev_info(priv->dev, "PCI: bus%u revision %x\n", sys->busnr, val); |
174 | 179 | ||
175 | /* Disable Direct Power Down State and assert reset */ | 180 | /* Disable Direct Power Down State and assert reset */ |
176 | val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD; | 181 | val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD; |
@@ -276,8 +281,8 @@ static int __init rcar_pci_probe(struct platform_device *pdev) | |||
276 | 281 | ||
277 | cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 282 | cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
278 | reg = devm_ioremap_resource(&pdev->dev, cfg_res); | 283 | reg = devm_ioremap_resource(&pdev->dev, cfg_res); |
279 | if (!reg) | 284 | if (IS_ERR(reg)) |
280 | return -ENODEV; | 285 | return PTR_ERR(reg); |
281 | 286 | ||
282 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 287 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
283 | if (!mem_res || !mem_res->start) | 288 | if (!mem_res || !mem_res->start) |
@@ -301,6 +306,7 @@ static int __init rcar_pci_probe(struct platform_device *pdev) | |||
301 | 306 | ||
302 | priv->irq = platform_get_irq(pdev, 0); | 307 | priv->irq = platform_get_irq(pdev, 0); |
303 | priv->reg = reg; | 308 | priv->reg = reg; |
309 | priv->dev = &pdev->dev; | ||
304 | 310 | ||
305 | return rcar_pci_add_controller(priv); | 311 | return rcar_pci_add_controller(priv); |
306 | } | 312 | } |
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 0afbbbc55c81..b8ba2f794559 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c | |||
@@ -805,7 +805,7 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) | |||
805 | afi_writel(pcie, value, AFI_PCIE_CONFIG); | 805 | afi_writel(pcie, value, AFI_PCIE_CONFIG); |
806 | 806 | ||
807 | value = afi_readl(pcie, AFI_FUSE); | 807 | value = afi_readl(pcie, AFI_FUSE); |
808 | value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS; | 808 | value |= AFI_FUSE_PCIE_T0_GEN2_DIS; |
809 | afi_writel(pcie, value, AFI_FUSE); | 809 | afi_writel(pcie, value, AFI_FUSE); |
810 | 810 | ||
811 | /* initialize internal PHY, enable up to 16 PCIE lanes */ | 811 | /* initialize internal PHY, enable up to 16 PCIE lanes */ |
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index e33b68be0391..17ce88f79d2b 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c | |||
@@ -74,7 +74,7 @@ static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) | |||
74 | return sys->private_data; | 74 | return sys->private_data; |
75 | } | 75 | } |
76 | 76 | ||
77 | int cfg_read(void __iomem *addr, int where, int size, u32 *val) | 77 | int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val) |
78 | { | 78 | { |
79 | *val = readl(addr); | 79 | *val = readl(addr); |
80 | 80 | ||
@@ -88,7 +88,7 @@ int cfg_read(void __iomem *addr, int where, int size, u32 *val) | |||
88 | return PCIBIOS_SUCCESSFUL; | 88 | return PCIBIOS_SUCCESSFUL; |
89 | } | 89 | } |
90 | 90 | ||
91 | int cfg_write(void __iomem *addr, int where, int size, u32 val) | 91 | int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val) |
92 | { | 92 | { |
93 | if (size == 4) | 93 | if (size == 4) |
94 | writel(val, addr); | 94 | writel(val, addr); |
@@ -126,7 +126,8 @@ static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, | |||
126 | if (pp->ops->rd_own_conf) | 126 | if (pp->ops->rd_own_conf) |
127 | ret = pp->ops->rd_own_conf(pp, where, size, val); | 127 | ret = pp->ops->rd_own_conf(pp, where, size, val); |
128 | else | 128 | else |
129 | ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val); | 129 | ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where, |
130 | size, val); | ||
130 | 131 | ||
131 | return ret; | 132 | return ret; |
132 | } | 133 | } |
@@ -139,8 +140,8 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, | |||
139 | if (pp->ops->wr_own_conf) | 140 | if (pp->ops->wr_own_conf) |
140 | ret = pp->ops->wr_own_conf(pp, where, size, val); | 141 | ret = pp->ops->wr_own_conf(pp, where, size, val); |
141 | else | 142 | else |
142 | ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, | 143 | ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), where, |
143 | val); | 144 | size, val); |
144 | 145 | ||
145 | return ret; | 146 | return ret; |
146 | } | 147 | } |
@@ -167,11 +168,13 @@ void dw_handle_msi_irq(struct pcie_port *pp) | |||
167 | while ((pos = find_next_bit(&val, 32, pos)) != 32) { | 168 | while ((pos = find_next_bit(&val, 32, pos)) != 32) { |
168 | irq = irq_find_mapping(pp->irq_domain, | 169 | irq = irq_find_mapping(pp->irq_domain, |
169 | i * 32 + pos); | 170 | i * 32 + pos); |
171 | dw_pcie_wr_own_conf(pp, | ||
172 | PCIE_MSI_INTR0_STATUS + i * 12, | ||
173 | 4, 1 << pos); | ||
170 | generic_handle_irq(irq); | 174 | generic_handle_irq(irq); |
171 | pos++; | 175 | pos++; |
172 | } | 176 | } |
173 | } | 177 | } |
174 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, val); | ||
175 | } | 178 | } |
176 | } | 179 | } |
177 | 180 | ||
@@ -209,6 +212,23 @@ static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0) | |||
209 | return 0; | 212 | return 0; |
210 | } | 213 | } |
211 | 214 | ||
215 | static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base, | ||
216 | unsigned int nvec, unsigned int pos) | ||
217 | { | ||
218 | unsigned int i, res, bit, val; | ||
219 | |||
220 | for (i = 0; i < nvec; i++) { | ||
221 | irq_set_msi_desc_off(irq_base, i, NULL); | ||
222 | clear_bit(pos + i, pp->msi_irq_in_use); | ||
223 | /* Disable corresponding interrupt on MSI controller */ | ||
224 | res = ((pos + i) / 32) * 12; | ||
225 | bit = (pos + i) % 32; | ||
226 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); | ||
227 | val &= ~(1 << bit); | ||
228 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); | ||
229 | } | ||
230 | } | ||
231 | |||
212 | static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) | 232 | static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) |
213 | { | 233 | { |
214 | int res, bit, irq, pos0, pos1, i; | 234 | int res, bit, irq, pos0, pos1, i; |
@@ -242,18 +262,25 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) | |||
242 | if (!irq) | 262 | if (!irq) |
243 | goto no_valid_irq; | 263 | goto no_valid_irq; |
244 | 264 | ||
245 | i = 0; | 265 | /* |
246 | while (i < no_irqs) { | 266 | * irq_create_mapping (called from dw_pcie_host_init) pre-allocates |
267 | * descs so there is no need to allocate descs here. We can therefore | ||
268 | * assume that if irq_find_mapping above returns non-zero, then the | ||
269 | * descs are also successfully allocated. | ||
270 | */ | ||
271 | |||
272 | for (i = 0; i < no_irqs; i++) { | ||
273 | if (irq_set_msi_desc_off(irq, i, desc) != 0) { | ||
274 | clear_irq_range(pp, irq, i, pos0); | ||
275 | goto no_valid_irq; | ||
276 | } | ||
247 | set_bit(pos0 + i, pp->msi_irq_in_use); | 277 | set_bit(pos0 + i, pp->msi_irq_in_use); |
248 | irq_alloc_descs((irq + i), (irq + i), 1, 0); | ||
249 | irq_set_msi_desc(irq + i, desc); | ||
250 | /*Enable corresponding interrupt in MSI interrupt controller */ | 278 | /*Enable corresponding interrupt in MSI interrupt controller */ |
251 | res = ((pos0 + i) / 32) * 12; | 279 | res = ((pos0 + i) / 32) * 12; |
252 | bit = (pos0 + i) % 32; | 280 | bit = (pos0 + i) % 32; |
253 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); | 281 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); |
254 | val |= 1 << bit; | 282 | val |= 1 << bit; |
255 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); | 283 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); |
256 | i++; | ||
257 | } | 284 | } |
258 | 285 | ||
259 | *pos = pos0; | 286 | *pos = pos0; |
@@ -266,7 +293,7 @@ no_valid_irq: | |||
266 | 293 | ||
267 | static void clear_irq(unsigned int irq) | 294 | static void clear_irq(unsigned int irq) |
268 | { | 295 | { |
269 | int res, bit, val, pos; | 296 | unsigned int pos, nvec; |
270 | struct irq_desc *desc; | 297 | struct irq_desc *desc; |
271 | struct msi_desc *msi; | 298 | struct msi_desc *msi; |
272 | struct pcie_port *pp; | 299 | struct pcie_port *pp; |
@@ -281,18 +308,15 @@ static void clear_irq(unsigned int irq) | |||
281 | return; | 308 | return; |
282 | } | 309 | } |
283 | 310 | ||
311 | /* undo what was done in assign_irq */ | ||
284 | pos = data->hwirq; | 312 | pos = data->hwirq; |
313 | nvec = 1 << msi->msi_attrib.multiple; | ||
285 | 314 | ||
286 | irq_free_desc(irq); | 315 | clear_irq_range(pp, irq, nvec, pos); |
287 | |||
288 | clear_bit(pos, pp->msi_irq_in_use); | ||
289 | 316 | ||
290 | /* Disable corresponding interrupt on MSI interrupt controller */ | 317 | /* all irqs cleared; reset attributes */ |
291 | res = (pos / 32) * 12; | 318 | msi->irq = 0; |
292 | bit = pos % 32; | 319 | msi->msi_attrib.multiple = 0; |
293 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); | ||
294 | val &= ~(1 << bit); | ||
295 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); | ||
296 | } | 320 | } |
297 | 321 | ||
298 | static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, | 322 | static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, |
@@ -320,10 +344,10 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, | |||
320 | if (irq < 0) | 344 | if (irq < 0) |
321 | return irq; | 345 | return irq; |
322 | 346 | ||
323 | msg_ctr &= ~PCI_MSI_FLAGS_QSIZE; | 347 | /* |
324 | msg_ctr |= msgvec << 4; | 348 | * write_msi_msg() will update PCI_MSI_FLAGS so there is |
325 | pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS, | 349 | * no need to explicitly call pci_write_config_word(). |
326 | msg_ctr); | 350 | */ |
327 | desc->msi_attrib.multiple = msgvec; | 351 | desc->msi_attrib.multiple = msgvec; |
328 | 352 | ||
329 | msg.address_lo = virt_to_phys((void *)pp->msi_data); | 353 | msg.address_lo = virt_to_phys((void *)pp->msi_data); |
@@ -394,6 +418,7 @@ int __init dw_pcie_host_init(struct pcie_port *pp) | |||
394 | + global_io_offset); | 418 | + global_io_offset); |
395 | pp->config.io_size = resource_size(&pp->io); | 419 | pp->config.io_size = resource_size(&pp->io); |
396 | pp->config.io_bus_addr = range.pci_addr; | 420 | pp->config.io_bus_addr = range.pci_addr; |
421 | pp->io_base = range.cpu_addr; | ||
397 | } | 422 | } |
398 | if (restype == IORESOURCE_MEM) { | 423 | if (restype == IORESOURCE_MEM) { |
399 | of_pci_range_to_resource(&range, np, &pp->mem); | 424 | of_pci_range_to_resource(&range, np, &pp->mem); |
@@ -419,7 +444,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp) | |||
419 | 444 | ||
420 | pp->cfg0_base = pp->cfg.start; | 445 | pp->cfg0_base = pp->cfg.start; |
421 | pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size; | 446 | pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size; |
422 | pp->io_base = pp->io.start; | ||
423 | pp->mem_base = pp->mem.start; | 447 | pp->mem_base = pp->mem.start; |
424 | 448 | ||
425 | pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base, | 449 | pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base, |
@@ -551,11 +575,13 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, | |||
551 | 575 | ||
552 | if (bus->parent->number == pp->root_bus_nr) { | 576 | if (bus->parent->number == pp->root_bus_nr) { |
553 | dw_pcie_prog_viewport_cfg0(pp, busdev); | 577 | dw_pcie_prog_viewport_cfg0(pp, busdev); |
554 | ret = cfg_read(pp->va_cfg0_base + address, where, size, val); | 578 | ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size, |
579 | val); | ||
555 | dw_pcie_prog_viewport_mem_outbound(pp); | 580 | dw_pcie_prog_viewport_mem_outbound(pp); |
556 | } else { | 581 | } else { |
557 | dw_pcie_prog_viewport_cfg1(pp, busdev); | 582 | dw_pcie_prog_viewport_cfg1(pp, busdev); |
558 | ret = cfg_read(pp->va_cfg1_base + address, where, size, val); | 583 | ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size, |
584 | val); | ||
559 | dw_pcie_prog_viewport_io_outbound(pp); | 585 | dw_pcie_prog_viewport_io_outbound(pp); |
560 | } | 586 | } |
561 | 587 | ||
@@ -574,18 +600,19 @@ static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, | |||
574 | 600 | ||
575 | if (bus->parent->number == pp->root_bus_nr) { | 601 | if (bus->parent->number == pp->root_bus_nr) { |
576 | dw_pcie_prog_viewport_cfg0(pp, busdev); | 602 | dw_pcie_prog_viewport_cfg0(pp, busdev); |
577 | ret = cfg_write(pp->va_cfg0_base + address, where, size, val); | 603 | ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size, |
604 | val); | ||
578 | dw_pcie_prog_viewport_mem_outbound(pp); | 605 | dw_pcie_prog_viewport_mem_outbound(pp); |
579 | } else { | 606 | } else { |
580 | dw_pcie_prog_viewport_cfg1(pp, busdev); | 607 | dw_pcie_prog_viewport_cfg1(pp, busdev); |
581 | ret = cfg_write(pp->va_cfg1_base + address, where, size, val); | 608 | ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size, |
609 | val); | ||
582 | dw_pcie_prog_viewport_io_outbound(pp); | 610 | dw_pcie_prog_viewport_io_outbound(pp); |
583 | } | 611 | } |
584 | 612 | ||
585 | return ret; | 613 | return ret; |
586 | } | 614 | } |
587 | 615 | ||
588 | |||
589 | static int dw_pcie_valid_config(struct pcie_port *pp, | 616 | static int dw_pcie_valid_config(struct pcie_port *pp, |
590 | struct pci_bus *bus, int dev) | 617 | struct pci_bus *bus, int dev) |
591 | { | 618 | { |
@@ -679,7 +706,7 @@ static int dw_pcie_setup(int nr, struct pci_sys_data *sys) | |||
679 | 706 | ||
680 | if (global_io_offset < SZ_1M && pp->config.io_size > 0) { | 707 | if (global_io_offset < SZ_1M && pp->config.io_size > 0) { |
681 | sys->io_offset = global_io_offset - pp->config.io_bus_addr; | 708 | sys->io_offset = global_io_offset - pp->config.io_bus_addr; |
682 | pci_ioremap_io(sys->io_offset, pp->io.start); | 709 | pci_ioremap_io(global_io_offset, pp->io_base); |
683 | global_io_offset += SZ_64K; | 710 | global_io_offset += SZ_64K; |
684 | pci_add_resource_offset(&sys->resources, &pp->io, | 711 | pci_add_resource_offset(&sys->resources, &pp->io, |
685 | sys->io_offset); | 712 | sys->io_offset); |
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h index c15379be2372..3063b3594d88 100644 --- a/drivers/pci/host/pcie-designware.h +++ b/drivers/pci/host/pcie-designware.h | |||
@@ -66,8 +66,8 @@ struct pcie_host_ops { | |||
66 | void (*host_init)(struct pcie_port *pp); | 66 | void (*host_init)(struct pcie_port *pp); |
67 | }; | 67 | }; |
68 | 68 | ||
69 | int cfg_read(void __iomem *addr, int where, int size, u32 *val); | 69 | int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val); |
70 | int cfg_write(void __iomem *addr, int where, int size, u32 val); | 70 | int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val); |
71 | void dw_handle_msi_irq(struct pcie_port *pp); | 71 | void dw_handle_msi_irq(struct pcie_port *pp); |
72 | void dw_pcie_msi_init(struct pcie_port *pp); | 72 | void dw_pcie_msi_init(struct pcie_port *pp); |
73 | int dw_pcie_link_up(struct pcie_port *pp); | 73 | int dw_pcie_link_up(struct pcie_port *pp); |
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index 1592dbe4f904..b6162be4df40 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h | |||
@@ -77,6 +77,8 @@ struct acpiphp_bridge { | |||
77 | 77 | ||
78 | /* PCI-to-PCI bridge device */ | 78 | /* PCI-to-PCI bridge device */ |
79 | struct pci_dev *pci_dev; | 79 | struct pci_dev *pci_dev; |
80 | |||
81 | bool is_going_away; | ||
80 | }; | 82 | }; |
81 | 83 | ||
82 | 84 | ||
@@ -150,6 +152,7 @@ struct acpiphp_attention_info | |||
150 | /* slot flags */ | 152 | /* slot flags */ |
151 | 153 | ||
152 | #define SLOT_ENABLED (0x00000001) | 154 | #define SLOT_ENABLED (0x00000001) |
155 | #define SLOT_IS_GOING_AWAY (0x00000002) | ||
153 | 156 | ||
154 | /* function flags */ | 157 | /* function flags */ |
155 | 158 | ||
@@ -169,7 +172,7 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot); | |||
169 | typedef int (*acpiphp_callback)(struct acpiphp_slot *slot, void *data); | 172 | typedef int (*acpiphp_callback)(struct acpiphp_slot *slot, void *data); |
170 | 173 | ||
171 | int acpiphp_enable_slot(struct acpiphp_slot *slot); | 174 | int acpiphp_enable_slot(struct acpiphp_slot *slot); |
172 | int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot); | 175 | int acpiphp_disable_slot(struct acpiphp_slot *slot); |
173 | u8 acpiphp_get_power_status(struct acpiphp_slot *slot); | 176 | u8 acpiphp_get_power_status(struct acpiphp_slot *slot); |
174 | u8 acpiphp_get_attention_status(struct acpiphp_slot *slot); | 177 | u8 acpiphp_get_attention_status(struct acpiphp_slot *slot); |
175 | u8 acpiphp_get_latch_status(struct acpiphp_slot *slot); | 178 | u8 acpiphp_get_latch_status(struct acpiphp_slot *slot); |
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c index dca66bc44578..728c31f4c2c5 100644 --- a/drivers/pci/hotplug/acpiphp_core.c +++ b/drivers/pci/hotplug/acpiphp_core.c | |||
@@ -156,7 +156,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) | |||
156 | pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot)); | 156 | pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot)); |
157 | 157 | ||
158 | /* disable the specified slot */ | 158 | /* disable the specified slot */ |
159 | return acpiphp_disable_and_eject_slot(slot->acpi_slot); | 159 | return acpiphp_disable_slot(slot->acpi_slot); |
160 | } | 160 | } |
161 | 161 | ||
162 | 162 | ||
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index e86439283a5d..ee26bac2d378 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -432,6 +432,7 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge) | |||
432 | pr_err("failed to remove notify handler\n"); | 432 | pr_err("failed to remove notify handler\n"); |
433 | } | 433 | } |
434 | } | 434 | } |
435 | slot->flags |= SLOT_IS_GOING_AWAY; | ||
435 | if (slot->slot) | 436 | if (slot->slot) |
436 | acpiphp_unregister_hotplug_slot(slot); | 437 | acpiphp_unregister_hotplug_slot(slot); |
437 | } | 438 | } |
@@ -439,6 +440,8 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge) | |||
439 | mutex_lock(&bridge_mutex); | 440 | mutex_lock(&bridge_mutex); |
440 | list_del(&bridge->list); | 441 | list_del(&bridge->list); |
441 | mutex_unlock(&bridge_mutex); | 442 | mutex_unlock(&bridge_mutex); |
443 | |||
444 | bridge->is_going_away = true; | ||
442 | } | 445 | } |
443 | 446 | ||
444 | /** | 447 | /** |
@@ -757,6 +760,10 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge) | |||
757 | { | 760 | { |
758 | struct acpiphp_slot *slot; | 761 | struct acpiphp_slot *slot; |
759 | 762 | ||
763 | /* Bail out if the bridge is going away. */ | ||
764 | if (bridge->is_going_away) | ||
765 | return; | ||
766 | |||
760 | list_for_each_entry(slot, &bridge->slots, node) { | 767 | list_for_each_entry(slot, &bridge->slots, node) { |
761 | struct pci_bus *bus = slot->bus; | 768 | struct pci_bus *bus = slot->bus; |
762 | struct pci_dev *dev, *tmp; | 769 | struct pci_dev *dev, *tmp; |
@@ -827,6 +834,8 @@ void acpiphp_check_host_bridge(acpi_handle handle) | |||
827 | } | 834 | } |
828 | } | 835 | } |
829 | 836 | ||
837 | static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot); | ||
838 | |||
830 | static void hotplug_event(acpi_handle handle, u32 type, void *data) | 839 | static void hotplug_event(acpi_handle handle, u32 type, void *data) |
831 | { | 840 | { |
832 | struct acpiphp_context *context = data; | 841 | struct acpiphp_context *context = data; |
@@ -856,6 +865,9 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data) | |||
856 | } else { | 865 | } else { |
857 | struct acpiphp_slot *slot = func->slot; | 866 | struct acpiphp_slot *slot = func->slot; |
858 | 867 | ||
868 | if (slot->flags & SLOT_IS_GOING_AWAY) | ||
869 | break; | ||
870 | |||
859 | mutex_lock(&slot->crit_sect); | 871 | mutex_lock(&slot->crit_sect); |
860 | enable_slot(slot); | 872 | enable_slot(slot); |
861 | mutex_unlock(&slot->crit_sect); | 873 | mutex_unlock(&slot->crit_sect); |
@@ -871,6 +883,9 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data) | |||
871 | struct acpiphp_slot *slot = func->slot; | 883 | struct acpiphp_slot *slot = func->slot; |
872 | int ret; | 884 | int ret; |
873 | 885 | ||
886 | if (slot->flags & SLOT_IS_GOING_AWAY) | ||
887 | break; | ||
888 | |||
874 | /* | 889 | /* |
875 | * Check if anything has changed in the slot and rescan | 890 | * Check if anything has changed in the slot and rescan |
876 | * from the parent if that's the case. | 891 | * from the parent if that's the case. |
@@ -900,9 +915,11 @@ static void hotplug_event_work(void *data, u32 type) | |||
900 | acpi_handle handle = context->handle; | 915 | acpi_handle handle = context->handle; |
901 | 916 | ||
902 | acpi_scan_lock_acquire(); | 917 | acpi_scan_lock_acquire(); |
918 | pci_lock_rescan_remove(); | ||
903 | 919 | ||
904 | hotplug_event(handle, type, context); | 920 | hotplug_event(handle, type, context); |
905 | 921 | ||
922 | pci_unlock_rescan_remove(); | ||
906 | acpi_scan_lock_release(); | 923 | acpi_scan_lock_release(); |
907 | acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL); | 924 | acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL); |
908 | put_bridge(context->func.parent); | 925 | put_bridge(context->func.parent); |
@@ -1070,12 +1087,19 @@ void acpiphp_remove_slots(struct pci_bus *bus) | |||
1070 | */ | 1087 | */ |
1071 | int acpiphp_enable_slot(struct acpiphp_slot *slot) | 1088 | int acpiphp_enable_slot(struct acpiphp_slot *slot) |
1072 | { | 1089 | { |
1090 | pci_lock_rescan_remove(); | ||
1091 | |||
1092 | if (slot->flags & SLOT_IS_GOING_AWAY) | ||
1093 | return -ENODEV; | ||
1094 | |||
1073 | mutex_lock(&slot->crit_sect); | 1095 | mutex_lock(&slot->crit_sect); |
1074 | /* configure all functions */ | 1096 | /* configure all functions */ |
1075 | if (!(slot->flags & SLOT_ENABLED)) | 1097 | if (!(slot->flags & SLOT_ENABLED)) |
1076 | enable_slot(slot); | 1098 | enable_slot(slot); |
1077 | 1099 | ||
1078 | mutex_unlock(&slot->crit_sect); | 1100 | mutex_unlock(&slot->crit_sect); |
1101 | |||
1102 | pci_unlock_rescan_remove(); | ||
1079 | return 0; | 1103 | return 0; |
1080 | } | 1104 | } |
1081 | 1105 | ||
@@ -1083,10 +1107,12 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot) | |||
1083 | * acpiphp_disable_and_eject_slot - power off and eject slot | 1107 | * acpiphp_disable_and_eject_slot - power off and eject slot |
1084 | * @slot: ACPI PHP slot | 1108 | * @slot: ACPI PHP slot |
1085 | */ | 1109 | */ |
1086 | int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot) | 1110 | static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot) |
1087 | { | 1111 | { |
1088 | struct acpiphp_func *func; | 1112 | struct acpiphp_func *func; |
1089 | int retval = 0; | 1113 | |
1114 | if (slot->flags & SLOT_IS_GOING_AWAY) | ||
1115 | return -ENODEV; | ||
1090 | 1116 | ||
1091 | mutex_lock(&slot->crit_sect); | 1117 | mutex_lock(&slot->crit_sect); |
1092 | 1118 | ||
@@ -1104,9 +1130,18 @@ int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot) | |||
1104 | } | 1130 | } |
1105 | 1131 | ||
1106 | mutex_unlock(&slot->crit_sect); | 1132 | mutex_unlock(&slot->crit_sect); |
1107 | return retval; | 1133 | return 0; |
1108 | } | 1134 | } |
1109 | 1135 | ||
1136 | int acpiphp_disable_slot(struct acpiphp_slot *slot) | ||
1137 | { | ||
1138 | int ret; | ||
1139 | |||
1140 | pci_lock_rescan_remove(); | ||
1141 | ret = acpiphp_disable_and_eject_slot(slot); | ||
1142 | pci_unlock_rescan_remove(); | ||
1143 | return ret; | ||
1144 | } | ||
1110 | 1145 | ||
1111 | /* | 1146 | /* |
1112 | * slot enabled: 1 | 1147 | * slot enabled: 1 |
@@ -1117,7 +1152,6 @@ u8 acpiphp_get_power_status(struct acpiphp_slot *slot) | |||
1117 | return (slot->flags & SLOT_ENABLED); | 1152 | return (slot->flags & SLOT_ENABLED); |
1118 | } | 1153 | } |
1119 | 1154 | ||
1120 | |||
1121 | /* | 1155 | /* |
1122 | * latch open: 1 | 1156 | * latch open: 1 |
1123 | * latch closed: 0 | 1157 | * latch closed: 0 |
@@ -1127,7 +1161,6 @@ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot) | |||
1127 | return !(get_slot_status(slot) & ACPI_STA_DEVICE_UI); | 1161 | return !(get_slot_status(slot) & ACPI_STA_DEVICE_UI); |
1128 | } | 1162 | } |
1129 | 1163 | ||
1130 | |||
1131 | /* | 1164 | /* |
1132 | * adapter presence : 1 | 1165 | * adapter presence : 1 |
1133 | * absence : 0 | 1166 | * absence : 0 |
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c index d3add9819f63..8c1464851768 100644 --- a/drivers/pci/hotplug/cpci_hotplug_pci.c +++ b/drivers/pci/hotplug/cpci_hotplug_pci.c | |||
@@ -254,9 +254,12 @@ int __ref cpci_configure_slot(struct slot *slot) | |||
254 | { | 254 | { |
255 | struct pci_dev *dev; | 255 | struct pci_dev *dev; |
256 | struct pci_bus *parent; | 256 | struct pci_bus *parent; |
257 | int ret = 0; | ||
257 | 258 | ||
258 | dbg("%s - enter", __func__); | 259 | dbg("%s - enter", __func__); |
259 | 260 | ||
261 | pci_lock_rescan_remove(); | ||
262 | |||
260 | if (slot->dev == NULL) { | 263 | if (slot->dev == NULL) { |
261 | dbg("pci_dev null, finding %02x:%02x:%x", | 264 | dbg("pci_dev null, finding %02x:%02x:%x", |
262 | slot->bus->number, PCI_SLOT(slot->devfn), PCI_FUNC(slot->devfn)); | 265 | slot->bus->number, PCI_SLOT(slot->devfn), PCI_FUNC(slot->devfn)); |
@@ -277,7 +280,8 @@ int __ref cpci_configure_slot(struct slot *slot) | |||
277 | slot->dev = pci_get_slot(slot->bus, slot->devfn); | 280 | slot->dev = pci_get_slot(slot->bus, slot->devfn); |
278 | if (slot->dev == NULL) { | 281 | if (slot->dev == NULL) { |
279 | err("Could not find PCI device for slot %02x", slot->number); | 282 | err("Could not find PCI device for slot %02x", slot->number); |
280 | return -ENODEV; | 283 | ret = -ENODEV; |
284 | goto out; | ||
281 | } | 285 | } |
282 | } | 286 | } |
283 | parent = slot->dev->bus; | 287 | parent = slot->dev->bus; |
@@ -294,8 +298,10 @@ int __ref cpci_configure_slot(struct slot *slot) | |||
294 | 298 | ||
295 | pci_bus_add_devices(parent); | 299 | pci_bus_add_devices(parent); |
296 | 300 | ||
301 | out: | ||
302 | pci_unlock_rescan_remove(); | ||
297 | dbg("%s - exit", __func__); | 303 | dbg("%s - exit", __func__); |
298 | return 0; | 304 | return ret; |
299 | } | 305 | } |
300 | 306 | ||
301 | int cpci_unconfigure_slot(struct slot* slot) | 307 | int cpci_unconfigure_slot(struct slot* slot) |
@@ -308,6 +314,8 @@ int cpci_unconfigure_slot(struct slot* slot) | |||
308 | return -ENODEV; | 314 | return -ENODEV; |
309 | } | 315 | } |
310 | 316 | ||
317 | pci_lock_rescan_remove(); | ||
318 | |||
311 | list_for_each_entry_safe(dev, temp, &slot->bus->devices, bus_list) { | 319 | list_for_each_entry_safe(dev, temp, &slot->bus->devices, bus_list) { |
312 | if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn)) | 320 | if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn)) |
313 | continue; | 321 | continue; |
@@ -318,6 +326,8 @@ int cpci_unconfigure_slot(struct slot* slot) | |||
318 | pci_dev_put(slot->dev); | 326 | pci_dev_put(slot->dev); |
319 | slot->dev = NULL; | 327 | slot->dev = NULL; |
320 | 328 | ||
329 | pci_unlock_rescan_remove(); | ||
330 | |||
321 | dbg("%s - exit", __func__); | 331 | dbg("%s - exit", __func__); |
322 | return 0; | 332 | return 0; |
323 | } | 333 | } |
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c index 6e4a12c91adb..a3e3c2002b58 100644 --- a/drivers/pci/hotplug/cpqphp_pci.c +++ b/drivers/pci/hotplug/cpqphp_pci.c | |||
@@ -86,6 +86,8 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func) | |||
86 | struct pci_bus *child; | 86 | struct pci_bus *child; |
87 | int num; | 87 | int num; |
88 | 88 | ||
89 | pci_lock_rescan_remove(); | ||
90 | |||
89 | if (func->pci_dev == NULL) | 91 | if (func->pci_dev == NULL) |
90 | func->pci_dev = pci_get_bus_and_slot(func->bus,PCI_DEVFN(func->device, func->function)); | 92 | func->pci_dev = pci_get_bus_and_slot(func->bus,PCI_DEVFN(func->device, func->function)); |
91 | 93 | ||
@@ -100,7 +102,7 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func) | |||
100 | func->pci_dev = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, func->function)); | 102 | func->pci_dev = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, func->function)); |
101 | if (func->pci_dev == NULL) { | 103 | if (func->pci_dev == NULL) { |
102 | dbg("ERROR: pci_dev still null\n"); | 104 | dbg("ERROR: pci_dev still null\n"); |
103 | return 0; | 105 | goto out; |
104 | } | 106 | } |
105 | } | 107 | } |
106 | 108 | ||
@@ -113,6 +115,8 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func) | |||
113 | 115 | ||
114 | pci_dev_put(func->pci_dev); | 116 | pci_dev_put(func->pci_dev); |
115 | 117 | ||
118 | out: | ||
119 | pci_unlock_rescan_remove(); | ||
116 | return 0; | 120 | return 0; |
117 | } | 121 | } |
118 | 122 | ||
@@ -123,6 +127,7 @@ int cpqhp_unconfigure_device(struct pci_func* func) | |||
123 | 127 | ||
124 | dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function); | 128 | dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function); |
125 | 129 | ||
130 | pci_lock_rescan_remove(); | ||
126 | for (j=0; j<8 ; j++) { | 131 | for (j=0; j<8 ; j++) { |
127 | struct pci_dev* temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j)); | 132 | struct pci_dev* temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j)); |
128 | if (temp) { | 133 | if (temp) { |
@@ -130,6 +135,7 @@ int cpqhp_unconfigure_device(struct pci_func* func) | |||
130 | pci_stop_and_remove_bus_device(temp); | 135 | pci_stop_and_remove_bus_device(temp); |
131 | } | 136 | } |
132 | } | 137 | } |
138 | pci_unlock_rescan_remove(); | ||
133 | return 0; | 139 | return 0; |
134 | } | 140 | } |
135 | 141 | ||
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index efdc13adbe41..cf3ac1e4b099 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c | |||
@@ -718,6 +718,8 @@ static void ibm_unconfigure_device(struct pci_func *func) | |||
718 | func->device, func->function); | 718 | func->device, func->function); |
719 | debug("func->device << 3 | 0x0 = %x\n", func->device << 3 | 0x0); | 719 | debug("func->device << 3 | 0x0 = %x\n", func->device << 3 | 0x0); |
720 | 720 | ||
721 | pci_lock_rescan_remove(); | ||
722 | |||
721 | for (j = 0; j < 0x08; j++) { | 723 | for (j = 0; j < 0x08; j++) { |
722 | temp = pci_get_bus_and_slot(func->busno, (func->device << 3) | j); | 724 | temp = pci_get_bus_and_slot(func->busno, (func->device << 3) | j); |
723 | if (temp) { | 725 | if (temp) { |
@@ -725,7 +727,10 @@ static void ibm_unconfigure_device(struct pci_func *func) | |||
725 | pci_dev_put(temp); | 727 | pci_dev_put(temp); |
726 | } | 728 | } |
727 | } | 729 | } |
730 | |||
728 | pci_dev_put(func->dev); | 731 | pci_dev_put(func->dev); |
732 | |||
733 | pci_unlock_rescan_remove(); | ||
729 | } | 734 | } |
730 | 735 | ||
731 | /* | 736 | /* |
@@ -780,6 +785,8 @@ static int ibm_configure_device(struct pci_func *func) | |||
780 | int flag = 0; /* this is to make sure we don't double scan the bus, | 785 | int flag = 0; /* this is to make sure we don't double scan the bus, |
781 | for bridged devices primarily */ | 786 | for bridged devices primarily */ |
782 | 787 | ||
788 | pci_lock_rescan_remove(); | ||
789 | |||
783 | if (!(bus_structure_fixup(func->busno))) | 790 | if (!(bus_structure_fixup(func->busno))) |
784 | flag = 1; | 791 | flag = 1; |
785 | if (func->dev == NULL) | 792 | if (func->dev == NULL) |
@@ -789,7 +796,7 @@ static int ibm_configure_device(struct pci_func *func) | |||
789 | if (func->dev == NULL) { | 796 | if (func->dev == NULL) { |
790 | struct pci_bus *bus = pci_find_bus(0, func->busno); | 797 | struct pci_bus *bus = pci_find_bus(0, func->busno); |
791 | if (!bus) | 798 | if (!bus) |
792 | return 0; | 799 | goto out; |
793 | 800 | ||
794 | num = pci_scan_slot(bus, | 801 | num = pci_scan_slot(bus, |
795 | PCI_DEVFN(func->device, func->function)); | 802 | PCI_DEVFN(func->device, func->function)); |
@@ -800,7 +807,7 @@ static int ibm_configure_device(struct pci_func *func) | |||
800 | PCI_DEVFN(func->device, func->function)); | 807 | PCI_DEVFN(func->device, func->function)); |
801 | if (func->dev == NULL) { | 808 | if (func->dev == NULL) { |
802 | err("ERROR... : pci_dev still NULL\n"); | 809 | err("ERROR... : pci_dev still NULL\n"); |
803 | return 0; | 810 | goto out; |
804 | } | 811 | } |
805 | } | 812 | } |
806 | if (!(flag) && (func->dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) { | 813 | if (!(flag) && (func->dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) { |
@@ -810,6 +817,8 @@ static int ibm_configure_device(struct pci_func *func) | |||
810 | pci_bus_add_devices(child); | 817 | pci_bus_add_devices(child); |
811 | } | 818 | } |
812 | 819 | ||
820 | out: | ||
821 | pci_unlock_rescan_remove(); | ||
813 | return 0; | 822 | return 0; |
814 | } | 823 | } |
815 | 824 | ||
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 21e865ded1dc..ccb0925bcd7b 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h | |||
@@ -43,7 +43,6 @@ | |||
43 | extern bool pciehp_poll_mode; | 43 | extern bool pciehp_poll_mode; |
44 | extern int pciehp_poll_time; | 44 | extern int pciehp_poll_time; |
45 | extern bool pciehp_debug; | 45 | extern bool pciehp_debug; |
46 | extern bool pciehp_force; | ||
47 | 46 | ||
48 | #define dbg(format, arg...) \ | 47 | #define dbg(format, arg...) \ |
49 | do { \ | 48 | do { \ |
@@ -140,15 +139,15 @@ struct controller *pcie_init(struct pcie_device *dev); | |||
140 | int pcie_init_notification(struct controller *ctrl); | 139 | int pcie_init_notification(struct controller *ctrl); |
141 | int pciehp_enable_slot(struct slot *p_slot); | 140 | int pciehp_enable_slot(struct slot *p_slot); |
142 | int pciehp_disable_slot(struct slot *p_slot); | 141 | int pciehp_disable_slot(struct slot *p_slot); |
143 | int pcie_enable_notification(struct controller *ctrl); | 142 | void pcie_enable_notification(struct controller *ctrl); |
144 | int pciehp_power_on_slot(struct slot *slot); | 143 | int pciehp_power_on_slot(struct slot *slot); |
145 | int pciehp_power_off_slot(struct slot *slot); | 144 | void pciehp_power_off_slot(struct slot *slot); |
146 | int pciehp_get_power_status(struct slot *slot, u8 *status); | 145 | void pciehp_get_power_status(struct slot *slot, u8 *status); |
147 | int pciehp_get_attention_status(struct slot *slot, u8 *status); | 146 | void pciehp_get_attention_status(struct slot *slot, u8 *status); |
148 | 147 | ||
149 | int pciehp_set_attention_status(struct slot *slot, u8 status); | 148 | void pciehp_set_attention_status(struct slot *slot, u8 status); |
150 | int pciehp_get_latch_status(struct slot *slot, u8 *status); | 149 | void pciehp_get_latch_status(struct slot *slot, u8 *status); |
151 | int pciehp_get_adapter_status(struct slot *slot, u8 *status); | 150 | void pciehp_get_adapter_status(struct slot *slot, u8 *status); |
152 | int pciehp_query_power_fault(struct slot *slot); | 151 | int pciehp_query_power_fault(struct slot *slot); |
153 | void pciehp_green_led_on(struct slot *slot); | 152 | void pciehp_green_led_on(struct slot *slot); |
154 | void pciehp_green_led_off(struct slot *slot); | 153 | void pciehp_green_led_off(struct slot *slot); |
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index bbd48bbe4e9b..53b58debc288 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c | |||
@@ -41,7 +41,7 @@ | |||
41 | bool pciehp_debug; | 41 | bool pciehp_debug; |
42 | bool pciehp_poll_mode; | 42 | bool pciehp_poll_mode; |
43 | int pciehp_poll_time; | 43 | int pciehp_poll_time; |
44 | bool pciehp_force; | 44 | static bool pciehp_force; |
45 | 45 | ||
46 | #define DRIVER_VERSION "0.4" | 46 | #define DRIVER_VERSION "0.4" |
47 | #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" | 47 | #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" |
@@ -160,7 +160,8 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) | |||
160 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 160 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
161 | __func__, slot_name(slot)); | 161 | __func__, slot_name(slot)); |
162 | 162 | ||
163 | return pciehp_set_attention_status(slot, status); | 163 | pciehp_set_attention_status(slot, status); |
164 | return 0; | ||
164 | } | 165 | } |
165 | 166 | ||
166 | 167 | ||
@@ -192,7 +193,8 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
192 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 193 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
193 | __func__, slot_name(slot)); | 194 | __func__, slot_name(slot)); |
194 | 195 | ||
195 | return pciehp_get_power_status(slot, value); | 196 | pciehp_get_power_status(slot, value); |
197 | return 0; | ||
196 | } | 198 | } |
197 | 199 | ||
198 | static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) | 200 | static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) |
@@ -202,7 +204,8 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
202 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 204 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
203 | __func__, slot_name(slot)); | 205 | __func__, slot_name(slot)); |
204 | 206 | ||
205 | return pciehp_get_attention_status(slot, value); | 207 | pciehp_get_attention_status(slot, value); |
208 | return 0; | ||
206 | } | 209 | } |
207 | 210 | ||
208 | static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) | 211 | static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) |
@@ -212,7 +215,8 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
212 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 215 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
213 | __func__, slot_name(slot)); | 216 | __func__, slot_name(slot)); |
214 | 217 | ||
215 | return pciehp_get_latch_status(slot, value); | 218 | pciehp_get_latch_status(slot, value); |
219 | return 0; | ||
216 | } | 220 | } |
217 | 221 | ||
218 | static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) | 222 | static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) |
@@ -222,7 +226,8 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
222 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 226 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
223 | __func__, slot_name(slot)); | 227 | __func__, slot_name(slot)); |
224 | 228 | ||
225 | return pciehp_get_adapter_status(slot, value); | 229 | pciehp_get_adapter_status(slot, value); |
230 | return 0; | ||
226 | } | 231 | } |
227 | 232 | ||
228 | static int reset_slot(struct hotplug_slot *hotplug_slot, int probe) | 233 | static int reset_slot(struct hotplug_slot *hotplug_slot, int probe) |
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 38f018679175..50628487597d 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
@@ -158,11 +158,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot) | |||
158 | { | 158 | { |
159 | /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ | 159 | /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ |
160 | if (POWER_CTRL(ctrl)) { | 160 | if (POWER_CTRL(ctrl)) { |
161 | if (pciehp_power_off_slot(pslot)) { | 161 | pciehp_power_off_slot(pslot); |
162 | ctrl_err(ctrl, | 162 | |
163 | "Issue of Slot Power Off command failed\n"); | ||
164 | return; | ||
165 | } | ||
166 | /* | 163 | /* |
167 | * After turning power off, we must wait for at least 1 second | 164 | * After turning power off, we must wait for at least 1 second |
168 | * before taking any action that relies on power having been | 165 | * before taking any action that relies on power having been |
@@ -171,16 +168,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot) | |||
171 | msleep(1000); | 168 | msleep(1000); |
172 | } | 169 | } |
173 | 170 | ||
174 | if (PWR_LED(ctrl)) | 171 | pciehp_green_led_off(pslot); |
175 | pciehp_green_led_off(pslot); | 172 | pciehp_set_attention_status(pslot, 1); |
176 | |||
177 | if (ATTN_LED(ctrl)) { | ||
178 | if (pciehp_set_attention_status(pslot, 1)) { | ||
179 | ctrl_err(ctrl, | ||
180 | "Issue of Set Attention Led command failed\n"); | ||
181 | return; | ||
182 | } | ||
183 | } | ||
184 | } | 173 | } |
185 | 174 | ||
186 | /** | 175 | /** |
@@ -203,8 +192,7 @@ static int board_added(struct slot *p_slot) | |||
203 | return retval; | 192 | return retval; |
204 | } | 193 | } |
205 | 194 | ||
206 | if (PWR_LED(ctrl)) | 195 | pciehp_green_led_blink(p_slot); |
207 | pciehp_green_led_blink(p_slot); | ||
208 | 196 | ||
209 | /* Check link training status */ | 197 | /* Check link training status */ |
210 | retval = pciehp_check_link_status(ctrl); | 198 | retval = pciehp_check_link_status(ctrl); |
@@ -227,9 +215,7 @@ static int board_added(struct slot *p_slot) | |||
227 | goto err_exit; | 215 | goto err_exit; |
228 | } | 216 | } |
229 | 217 | ||
230 | if (PWR_LED(ctrl)) | 218 | pciehp_green_led_on(p_slot); |
231 | pciehp_green_led_on(p_slot); | ||
232 | |||
233 | return 0; | 219 | return 0; |
234 | 220 | ||
235 | err_exit: | 221 | err_exit: |
@@ -243,7 +229,7 @@ err_exit: | |||
243 | */ | 229 | */ |
244 | static int remove_board(struct slot *p_slot) | 230 | static int remove_board(struct slot *p_slot) |
245 | { | 231 | { |
246 | int retval = 0; | 232 | int retval; |
247 | struct controller *ctrl = p_slot->ctrl; | 233 | struct controller *ctrl = p_slot->ctrl; |
248 | 234 | ||
249 | retval = pciehp_unconfigure_device(p_slot); | 235 | retval = pciehp_unconfigure_device(p_slot); |
@@ -251,13 +237,8 @@ static int remove_board(struct slot *p_slot) | |||
251 | return retval; | 237 | return retval; |
252 | 238 | ||
253 | if (POWER_CTRL(ctrl)) { | 239 | if (POWER_CTRL(ctrl)) { |
254 | /* power off slot */ | 240 | pciehp_power_off_slot(p_slot); |
255 | retval = pciehp_power_off_slot(p_slot); | 241 | |
256 | if (retval) { | ||
257 | ctrl_err(ctrl, | ||
258 | "Issue of Slot Disable command failed\n"); | ||
259 | return retval; | ||
260 | } | ||
261 | /* | 242 | /* |
262 | * After turning power off, we must wait for at least 1 second | 243 | * After turning power off, we must wait for at least 1 second |
263 | * before taking any action that relies on power having been | 244 | * before taking any action that relies on power having been |
@@ -267,9 +248,7 @@ static int remove_board(struct slot *p_slot) | |||
267 | } | 248 | } |
268 | 249 | ||
269 | /* turn off Green LED */ | 250 | /* turn off Green LED */ |
270 | if (PWR_LED(ctrl)) | 251 | pciehp_green_led_off(p_slot); |
271 | pciehp_green_led_off(p_slot); | ||
272 | |||
273 | return 0; | 252 | return 0; |
274 | } | 253 | } |
275 | 254 | ||
@@ -305,7 +284,7 @@ static void pciehp_power_thread(struct work_struct *work) | |||
305 | break; | 284 | break; |
306 | case POWERON_STATE: | 285 | case POWERON_STATE: |
307 | mutex_unlock(&p_slot->lock); | 286 | mutex_unlock(&p_slot->lock); |
308 | if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl)) | 287 | if (pciehp_enable_slot(p_slot)) |
309 | pciehp_green_led_off(p_slot); | 288 | pciehp_green_led_off(p_slot); |
310 | mutex_lock(&p_slot->lock); | 289 | mutex_lock(&p_slot->lock); |
311 | p_slot->state = STATIC_STATE; | 290 | p_slot->state = STATIC_STATE; |
@@ -372,11 +351,8 @@ static void handle_button_press_event(struct slot *p_slot) | |||
372 | "press.\n", slot_name(p_slot)); | 351 | "press.\n", slot_name(p_slot)); |
373 | } | 352 | } |
374 | /* blink green LED and turn off amber */ | 353 | /* blink green LED and turn off amber */ |
375 | if (PWR_LED(ctrl)) | 354 | pciehp_green_led_blink(p_slot); |
376 | pciehp_green_led_blink(p_slot); | 355 | pciehp_set_attention_status(p_slot, 0); |
377 | if (ATTN_LED(ctrl)) | ||
378 | pciehp_set_attention_status(p_slot, 0); | ||
379 | |||
380 | queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); | 356 | queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); |
381 | break; | 357 | break; |
382 | case BLINKINGOFF_STATE: | 358 | case BLINKINGOFF_STATE: |
@@ -389,14 +365,11 @@ static void handle_button_press_event(struct slot *p_slot) | |||
389 | ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot)); | 365 | ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot)); |
390 | cancel_delayed_work(&p_slot->work); | 366 | cancel_delayed_work(&p_slot->work); |
391 | if (p_slot->state == BLINKINGOFF_STATE) { | 367 | if (p_slot->state == BLINKINGOFF_STATE) { |
392 | if (PWR_LED(ctrl)) | 368 | pciehp_green_led_on(p_slot); |
393 | pciehp_green_led_on(p_slot); | ||
394 | } else { | 369 | } else { |
395 | if (PWR_LED(ctrl)) | 370 | pciehp_green_led_off(p_slot); |
396 | pciehp_green_led_off(p_slot); | ||
397 | } | 371 | } |
398 | if (ATTN_LED(ctrl)) | 372 | pciehp_set_attention_status(p_slot, 0); |
399 | pciehp_set_attention_status(p_slot, 0); | ||
400 | ctrl_info(ctrl, "PCI slot #%s - action canceled " | 373 | ctrl_info(ctrl, "PCI slot #%s - action canceled " |
401 | "due to button press\n", slot_name(p_slot)); | 374 | "due to button press\n", slot_name(p_slot)); |
402 | p_slot->state = STATIC_STATE; | 375 | p_slot->state = STATIC_STATE; |
@@ -456,10 +429,8 @@ static void interrupt_event_handler(struct work_struct *work) | |||
456 | case INT_POWER_FAULT: | 429 | case INT_POWER_FAULT: |
457 | if (!POWER_CTRL(ctrl)) | 430 | if (!POWER_CTRL(ctrl)) |
458 | break; | 431 | break; |
459 | if (ATTN_LED(ctrl)) | 432 | pciehp_set_attention_status(p_slot, 1); |
460 | pciehp_set_attention_status(p_slot, 1); | 433 | pciehp_green_led_off(p_slot); |
461 | if (PWR_LED(ctrl)) | ||
462 | pciehp_green_led_off(p_slot); | ||
463 | break; | 434 | break; |
464 | case INT_PRESENCE_ON: | 435 | case INT_PRESENCE_ON: |
465 | case INT_PRESENCE_OFF: | 436 | case INT_PRESENCE_OFF: |
@@ -482,14 +453,14 @@ int pciehp_enable_slot(struct slot *p_slot) | |||
482 | int rc; | 453 | int rc; |
483 | struct controller *ctrl = p_slot->ctrl; | 454 | struct controller *ctrl = p_slot->ctrl; |
484 | 455 | ||
485 | rc = pciehp_get_adapter_status(p_slot, &getstatus); | 456 | pciehp_get_adapter_status(p_slot, &getstatus); |
486 | if (rc || !getstatus) { | 457 | if (!getstatus) { |
487 | ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); | 458 | ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); |
488 | return -ENODEV; | 459 | return -ENODEV; |
489 | } | 460 | } |
490 | if (MRL_SENS(p_slot->ctrl)) { | 461 | if (MRL_SENS(p_slot->ctrl)) { |
491 | rc = pciehp_get_latch_status(p_slot, &getstatus); | 462 | pciehp_get_latch_status(p_slot, &getstatus); |
492 | if (rc || getstatus) { | 463 | if (getstatus) { |
493 | ctrl_info(ctrl, "Latch open on slot(%s)\n", | 464 | ctrl_info(ctrl, "Latch open on slot(%s)\n", |
494 | slot_name(p_slot)); | 465 | slot_name(p_slot)); |
495 | return -ENODEV; | 466 | return -ENODEV; |
@@ -497,8 +468,8 @@ int pciehp_enable_slot(struct slot *p_slot) | |||
497 | } | 468 | } |
498 | 469 | ||
499 | if (POWER_CTRL(p_slot->ctrl)) { | 470 | if (POWER_CTRL(p_slot->ctrl)) { |
500 | rc = pciehp_get_power_status(p_slot, &getstatus); | 471 | pciehp_get_power_status(p_slot, &getstatus); |
501 | if (rc || getstatus) { | 472 | if (getstatus) { |
502 | ctrl_info(ctrl, "Already enabled on slot(%s)\n", | 473 | ctrl_info(ctrl, "Already enabled on slot(%s)\n", |
503 | slot_name(p_slot)); | 474 | slot_name(p_slot)); |
504 | return -EINVAL; | 475 | return -EINVAL; |
@@ -518,15 +489,14 @@ int pciehp_enable_slot(struct slot *p_slot) | |||
518 | int pciehp_disable_slot(struct slot *p_slot) | 489 | int pciehp_disable_slot(struct slot *p_slot) |
519 | { | 490 | { |
520 | u8 getstatus = 0; | 491 | u8 getstatus = 0; |
521 | int ret = 0; | ||
522 | struct controller *ctrl = p_slot->ctrl; | 492 | struct controller *ctrl = p_slot->ctrl; |
523 | 493 | ||
524 | if (!p_slot->ctrl) | 494 | if (!p_slot->ctrl) |
525 | return 1; | 495 | return 1; |
526 | 496 | ||
527 | if (!HP_SUPR_RM(p_slot->ctrl)) { | 497 | if (!HP_SUPR_RM(p_slot->ctrl)) { |
528 | ret = pciehp_get_adapter_status(p_slot, &getstatus); | 498 | pciehp_get_adapter_status(p_slot, &getstatus); |
529 | if (ret || !getstatus) { | 499 | if (!getstatus) { |
530 | ctrl_info(ctrl, "No adapter on slot(%s)\n", | 500 | ctrl_info(ctrl, "No adapter on slot(%s)\n", |
531 | slot_name(p_slot)); | 501 | slot_name(p_slot)); |
532 | return -ENODEV; | 502 | return -ENODEV; |
@@ -534,8 +504,8 @@ int pciehp_disable_slot(struct slot *p_slot) | |||
534 | } | 504 | } |
535 | 505 | ||
536 | if (MRL_SENS(p_slot->ctrl)) { | 506 | if (MRL_SENS(p_slot->ctrl)) { |
537 | ret = pciehp_get_latch_status(p_slot, &getstatus); | 507 | pciehp_get_latch_status(p_slot, &getstatus); |
538 | if (ret || getstatus) { | 508 | if (getstatus) { |
539 | ctrl_info(ctrl, "Latch open on slot(%s)\n", | 509 | ctrl_info(ctrl, "Latch open on slot(%s)\n", |
540 | slot_name(p_slot)); | 510 | slot_name(p_slot)); |
541 | return -ENODEV; | 511 | return -ENODEV; |
@@ -543,8 +513,8 @@ int pciehp_disable_slot(struct slot *p_slot) | |||
543 | } | 513 | } |
544 | 514 | ||
545 | if (POWER_CTRL(p_slot->ctrl)) { | 515 | if (POWER_CTRL(p_slot->ctrl)) { |
546 | ret = pciehp_get_power_status(p_slot, &getstatus); | 516 | pciehp_get_power_status(p_slot, &getstatus); |
547 | if (ret || !getstatus) { | 517 | if (!getstatus) { |
548 | ctrl_info(ctrl, "Already disabled on slot(%s)\n", | 518 | ctrl_info(ctrl, "Already disabled on slot(%s)\n", |
549 | slot_name(p_slot)); | 519 | slot_name(p_slot)); |
550 | return -EINVAL; | 520 | return -EINVAL; |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 3eea3fdd4b0b..14acfccb7670 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -41,34 +41,11 @@ | |||
41 | #include "../pci.h" | 41 | #include "../pci.h" |
42 | #include "pciehp.h" | 42 | #include "pciehp.h" |
43 | 43 | ||
44 | static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) | 44 | static inline struct pci_dev *ctrl_dev(struct controller *ctrl) |
45 | { | 45 | { |
46 | struct pci_dev *dev = ctrl->pcie->port; | 46 | return ctrl->pcie->port; |
47 | return pcie_capability_read_word(dev, reg, value); | ||
48 | } | 47 | } |
49 | 48 | ||
50 | static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value) | ||
51 | { | ||
52 | struct pci_dev *dev = ctrl->pcie->port; | ||
53 | return pcie_capability_read_dword(dev, reg, value); | ||
54 | } | ||
55 | |||
56 | static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value) | ||
57 | { | ||
58 | struct pci_dev *dev = ctrl->pcie->port; | ||
59 | return pcie_capability_write_word(dev, reg, value); | ||
60 | } | ||
61 | |||
62 | static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value) | ||
63 | { | ||
64 | struct pci_dev *dev = ctrl->pcie->port; | ||
65 | return pcie_capability_write_dword(dev, reg, value); | ||
66 | } | ||
67 | |||
68 | /* Power Control Command */ | ||
69 | #define POWER_ON 0 | ||
70 | #define POWER_OFF PCI_EXP_SLTCTL_PCC | ||
71 | |||
72 | static irqreturn_t pcie_isr(int irq, void *dev_id); | 49 | static irqreturn_t pcie_isr(int irq, void *dev_id); |
73 | static void start_int_poll_timer(struct controller *ctrl, int sec); | 50 | static void start_int_poll_timer(struct controller *ctrl, int sec); |
74 | 51 | ||
@@ -129,20 +106,23 @@ static inline void pciehp_free_irq(struct controller *ctrl) | |||
129 | 106 | ||
130 | static int pcie_poll_cmd(struct controller *ctrl) | 107 | static int pcie_poll_cmd(struct controller *ctrl) |
131 | { | 108 | { |
109 | struct pci_dev *pdev = ctrl_dev(ctrl); | ||
132 | u16 slot_status; | 110 | u16 slot_status; |
133 | int err, timeout = 1000; | 111 | int timeout = 1000; |
134 | 112 | ||
135 | err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); | 113 | pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); |
136 | if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) { | 114 | if (slot_status & PCI_EXP_SLTSTA_CC) { |
137 | pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); | 115 | pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, |
116 | PCI_EXP_SLTSTA_CC); | ||
138 | return 1; | 117 | return 1; |
139 | } | 118 | } |
140 | while (timeout > 0) { | 119 | while (timeout > 0) { |
141 | msleep(10); | 120 | msleep(10); |
142 | timeout -= 10; | 121 | timeout -= 10; |
143 | err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); | 122 | pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); |
144 | if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) { | 123 | if (slot_status & PCI_EXP_SLTSTA_CC) { |
145 | pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); | 124 | pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, |
125 | PCI_EXP_SLTSTA_CC); | ||
146 | return 1; | 126 | return 1; |
147 | } | 127 | } |
148 | } | 128 | } |
@@ -169,21 +149,15 @@ static void pcie_wait_cmd(struct controller *ctrl, int poll) | |||
169 | * @cmd: command value written to slot control register | 149 | * @cmd: command value written to slot control register |
170 | * @mask: bitmask of slot control register to be modified | 150 | * @mask: bitmask of slot control register to be modified |
171 | */ | 151 | */ |
172 | static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) | 152 | static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) |
173 | { | 153 | { |
174 | int retval = 0; | 154 | struct pci_dev *pdev = ctrl_dev(ctrl); |
175 | u16 slot_status; | 155 | u16 slot_status; |
176 | u16 slot_ctrl; | 156 | u16 slot_ctrl; |
177 | 157 | ||
178 | mutex_lock(&ctrl->ctrl_lock); | 158 | mutex_lock(&ctrl->ctrl_lock); |
179 | 159 | ||
180 | retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); | 160 | pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); |
181 | if (retval) { | ||
182 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", | ||
183 | __func__); | ||
184 | goto out; | ||
185 | } | ||
186 | |||
187 | if (slot_status & PCI_EXP_SLTSTA_CC) { | 161 | if (slot_status & PCI_EXP_SLTSTA_CC) { |
188 | if (!ctrl->no_cmd_complete) { | 162 | if (!ctrl->no_cmd_complete) { |
189 | /* | 163 | /* |
@@ -207,24 +181,17 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) | |||
207 | } | 181 | } |
208 | } | 182 | } |
209 | 183 | ||
210 | retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl); | 184 | pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl); |
211 | if (retval) { | ||
212 | ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); | ||
213 | goto out; | ||
214 | } | ||
215 | |||
216 | slot_ctrl &= ~mask; | 185 | slot_ctrl &= ~mask; |
217 | slot_ctrl |= (cmd & mask); | 186 | slot_ctrl |= (cmd & mask); |
218 | ctrl->cmd_busy = 1; | 187 | ctrl->cmd_busy = 1; |
219 | smp_mb(); | 188 | smp_mb(); |
220 | retval = pciehp_writew(ctrl, PCI_EXP_SLTCTL, slot_ctrl); | 189 | pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl); |
221 | if (retval) | ||
222 | ctrl_err(ctrl, "Cannot write to SLOTCTRL register\n"); | ||
223 | 190 | ||
224 | /* | 191 | /* |
225 | * Wait for command completion. | 192 | * Wait for command completion. |
226 | */ | 193 | */ |
227 | if (!retval && !ctrl->no_cmd_complete) { | 194 | if (!ctrl->no_cmd_complete) { |
228 | int poll = 0; | 195 | int poll = 0; |
229 | /* | 196 | /* |
230 | * if hotplug interrupt is not enabled or command | 197 | * if hotplug interrupt is not enabled or command |
@@ -236,19 +203,16 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) | |||
236 | poll = 1; | 203 | poll = 1; |
237 | pcie_wait_cmd(ctrl, poll); | 204 | pcie_wait_cmd(ctrl, poll); |
238 | } | 205 | } |
239 | out: | ||
240 | mutex_unlock(&ctrl->ctrl_lock); | 206 | mutex_unlock(&ctrl->ctrl_lock); |
241 | return retval; | ||
242 | } | 207 | } |
243 | 208 | ||
244 | static bool check_link_active(struct controller *ctrl) | 209 | static bool check_link_active(struct controller *ctrl) |
245 | { | 210 | { |
246 | bool ret = false; | 211 | struct pci_dev *pdev = ctrl_dev(ctrl); |
247 | u16 lnk_status; | 212 | u16 lnk_status; |
213 | bool ret; | ||
248 | 214 | ||
249 | if (pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status)) | 215 | pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); |
250 | return ret; | ||
251 | |||
252 | ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA); | 216 | ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA); |
253 | 217 | ||
254 | if (ret) | 218 | if (ret) |
@@ -311,9 +275,9 @@ static bool pci_bus_check_dev(struct pci_bus *bus, int devfn) | |||
311 | 275 | ||
312 | int pciehp_check_link_status(struct controller *ctrl) | 276 | int pciehp_check_link_status(struct controller *ctrl) |
313 | { | 277 | { |
278 | struct pci_dev *pdev = ctrl_dev(ctrl); | ||
279 | bool found; | ||
314 | u16 lnk_status; | 280 | u16 lnk_status; |
315 | int retval = 0; | ||
316 | bool found = false; | ||
317 | 281 | ||
318 | /* | 282 | /* |
319 | * Data Link Layer Link Active Reporting must be capable for | 283 | * Data Link Layer Link Active Reporting must be capable for |
@@ -330,52 +294,37 @@ int pciehp_check_link_status(struct controller *ctrl) | |||
330 | found = pci_bus_check_dev(ctrl->pcie->port->subordinate, | 294 | found = pci_bus_check_dev(ctrl->pcie->port->subordinate, |
331 | PCI_DEVFN(0, 0)); | 295 | PCI_DEVFN(0, 0)); |
332 | 296 | ||
333 | retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); | 297 | pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); |
334 | if (retval) { | ||
335 | ctrl_err(ctrl, "Cannot read LNKSTATUS register\n"); | ||
336 | return retval; | ||
337 | } | ||
338 | |||
339 | ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); | 298 | ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); |
340 | if ((lnk_status & PCI_EXP_LNKSTA_LT) || | 299 | if ((lnk_status & PCI_EXP_LNKSTA_LT) || |
341 | !(lnk_status & PCI_EXP_LNKSTA_NLW)) { | 300 | !(lnk_status & PCI_EXP_LNKSTA_NLW)) { |
342 | ctrl_err(ctrl, "Link Training Error occurs \n"); | 301 | ctrl_err(ctrl, "Link Training Error occurs \n"); |
343 | retval = -1; | 302 | return -1; |
344 | return retval; | ||
345 | } | 303 | } |
346 | 304 | ||
347 | pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); | 305 | pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); |
348 | 306 | ||
349 | if (!found && !retval) | 307 | if (!found) |
350 | retval = -1; | 308 | return -1; |
351 | 309 | ||
352 | return retval; | 310 | return 0; |
353 | } | 311 | } |
354 | 312 | ||
355 | static int __pciehp_link_set(struct controller *ctrl, bool enable) | 313 | static int __pciehp_link_set(struct controller *ctrl, bool enable) |
356 | { | 314 | { |
315 | struct pci_dev *pdev = ctrl_dev(ctrl); | ||
357 | u16 lnk_ctrl; | 316 | u16 lnk_ctrl; |
358 | int retval = 0; | ||
359 | 317 | ||
360 | retval = pciehp_readw(ctrl, PCI_EXP_LNKCTL, &lnk_ctrl); | 318 | pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl); |
361 | if (retval) { | ||
362 | ctrl_err(ctrl, "Cannot read LNKCTRL register\n"); | ||
363 | return retval; | ||
364 | } | ||
365 | 319 | ||
366 | if (enable) | 320 | if (enable) |
367 | lnk_ctrl &= ~PCI_EXP_LNKCTL_LD; | 321 | lnk_ctrl &= ~PCI_EXP_LNKCTL_LD; |
368 | else | 322 | else |
369 | lnk_ctrl |= PCI_EXP_LNKCTL_LD; | 323 | lnk_ctrl |= PCI_EXP_LNKCTL_LD; |
370 | 324 | ||
371 | retval = pciehp_writew(ctrl, PCI_EXP_LNKCTL, lnk_ctrl); | 325 | pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl); |
372 | if (retval) { | ||
373 | ctrl_err(ctrl, "Cannot write LNKCTRL register\n"); | ||
374 | return retval; | ||
375 | } | ||
376 | ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl); | 326 | ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl); |
377 | 327 | return 0; | |
378 | return retval; | ||
379 | } | 328 | } |
380 | 329 | ||
381 | static int pciehp_link_enable(struct controller *ctrl) | 330 | static int pciehp_link_enable(struct controller *ctrl) |
@@ -388,223 +337,165 @@ static int pciehp_link_disable(struct controller *ctrl) | |||
388 | return __pciehp_link_set(ctrl, false); | 337 | return __pciehp_link_set(ctrl, false); |
389 | } | 338 | } |
390 | 339 | ||
391 | int pciehp_get_attention_status(struct slot *slot, u8 *status) | 340 | void pciehp_get_attention_status(struct slot *slot, u8 *status) |
392 | { | 341 | { |
393 | struct controller *ctrl = slot->ctrl; | 342 | struct controller *ctrl = slot->ctrl; |
343 | struct pci_dev *pdev = ctrl_dev(ctrl); | ||
394 | u16 slot_ctrl; | 344 | u16 slot_ctrl; |
395 | u8 atten_led_state; | ||
396 | int retval = 0; | ||
397 | |||
398 | retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl); | ||
399 | if (retval) { | ||
400 | ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); | ||
401 | return retval; | ||
402 | } | ||
403 | 345 | ||
346 | pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl); | ||
404 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__, | 347 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__, |
405 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl); | 348 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl); |
406 | 349 | ||
407 | atten_led_state = (slot_ctrl & PCI_EXP_SLTCTL_AIC) >> 6; | 350 | switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) { |
408 | 351 | case PCI_EXP_SLTCTL_ATTN_IND_ON: | |
409 | switch (atten_led_state) { | ||
410 | case 0: | ||
411 | *status = 0xFF; /* Reserved */ | ||
412 | break; | ||
413 | case 1: | ||
414 | *status = 1; /* On */ | 352 | *status = 1; /* On */ |
415 | break; | 353 | break; |
416 | case 2: | 354 | case PCI_EXP_SLTCTL_ATTN_IND_BLINK: |
417 | *status = 2; /* Blink */ | 355 | *status = 2; /* Blink */ |
418 | break; | 356 | break; |
419 | case 3: | 357 | case PCI_EXP_SLTCTL_ATTN_IND_OFF: |
420 | *status = 0; /* Off */ | 358 | *status = 0; /* Off */ |
421 | break; | 359 | break; |
422 | default: | 360 | default: |
423 | *status = 0xFF; | 361 | *status = 0xFF; |
424 | break; | 362 | break; |
425 | } | 363 | } |
426 | |||
427 | return 0; | ||
428 | } | 364 | } |
429 | 365 | ||
430 | int pciehp_get_power_status(struct slot *slot, u8 *status) | 366 | void pciehp_get_power_status(struct slot *slot, u8 *status) |
431 | { | 367 | { |
432 | struct controller *ctrl = slot->ctrl; | 368 | struct controller *ctrl = slot->ctrl; |
369 | struct pci_dev *pdev = ctrl_dev(ctrl); | ||
433 | u16 slot_ctrl; | 370 | u16 slot_ctrl; |
434 | u8 pwr_state; | ||
435 | int retval = 0; | ||
436 | 371 | ||
437 | retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl); | 372 | pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl); |
438 | if (retval) { | ||
439 | ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); | ||
440 | return retval; | ||
441 | } | ||
442 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__, | 373 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__, |
443 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl); | 374 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl); |
444 | 375 | ||
445 | pwr_state = (slot_ctrl & PCI_EXP_SLTCTL_PCC) >> 10; | 376 | switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) { |
446 | 377 | case PCI_EXP_SLTCTL_PWR_ON: | |
447 | switch (pwr_state) { | 378 | *status = 1; /* On */ |
448 | case 0: | ||
449 | *status = 1; | ||
450 | break; | 379 | break; |
451 | case 1: | 380 | case PCI_EXP_SLTCTL_PWR_OFF: |
452 | *status = 0; | 381 | *status = 0; /* Off */ |
453 | break; | 382 | break; |
454 | default: | 383 | default: |
455 | *status = 0xFF; | 384 | *status = 0xFF; |
456 | break; | 385 | break; |
457 | } | 386 | } |
458 | |||
459 | return retval; | ||
460 | } | 387 | } |
461 | 388 | ||
462 | int pciehp_get_latch_status(struct slot *slot, u8 *status) | 389 | void pciehp_get_latch_status(struct slot *slot, u8 *status) |
463 | { | 390 | { |
464 | struct controller *ctrl = slot->ctrl; | 391 | struct pci_dev *pdev = ctrl_dev(slot->ctrl); |
465 | u16 slot_status; | 392 | u16 slot_status; |
466 | int retval; | ||
467 | 393 | ||
468 | retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); | 394 | pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); |
469 | if (retval) { | ||
470 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", | ||
471 | __func__); | ||
472 | return retval; | ||
473 | } | ||
474 | *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS); | 395 | *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS); |
475 | return 0; | ||
476 | } | 396 | } |
477 | 397 | ||
478 | int pciehp_get_adapter_status(struct slot *slot, u8 *status) | 398 | void pciehp_get_adapter_status(struct slot *slot, u8 *status) |
479 | { | 399 | { |
480 | struct controller *ctrl = slot->ctrl; | 400 | struct pci_dev *pdev = ctrl_dev(slot->ctrl); |
481 | u16 slot_status; | 401 | u16 slot_status; |
482 | int retval; | ||
483 | 402 | ||
484 | retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); | 403 | pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); |
485 | if (retval) { | ||
486 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", | ||
487 | __func__); | ||
488 | return retval; | ||
489 | } | ||
490 | *status = !!(slot_status & PCI_EXP_SLTSTA_PDS); | 404 | *status = !!(slot_status & PCI_EXP_SLTSTA_PDS); |
491 | return 0; | ||
492 | } | 405 | } |
493 | 406 | ||
494 | int pciehp_query_power_fault(struct slot *slot) | 407 | int pciehp_query_power_fault(struct slot *slot) |
495 | { | 408 | { |
496 | struct controller *ctrl = slot->ctrl; | 409 | struct pci_dev *pdev = ctrl_dev(slot->ctrl); |
497 | u16 slot_status; | 410 | u16 slot_status; |
498 | int retval; | ||
499 | 411 | ||
500 | retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); | 412 | pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); |
501 | if (retval) { | ||
502 | ctrl_err(ctrl, "Cannot check for power fault\n"); | ||
503 | return retval; | ||
504 | } | ||
505 | return !!(slot_status & PCI_EXP_SLTSTA_PFD); | 413 | return !!(slot_status & PCI_EXP_SLTSTA_PFD); |
506 | } | 414 | } |
507 | 415 | ||
508 | int pciehp_set_attention_status(struct slot *slot, u8 value) | 416 | void pciehp_set_attention_status(struct slot *slot, u8 value) |
509 | { | 417 | { |
510 | struct controller *ctrl = slot->ctrl; | 418 | struct controller *ctrl = slot->ctrl; |
511 | u16 slot_cmd; | 419 | u16 slot_cmd; |
512 | u16 cmd_mask; | ||
513 | 420 | ||
514 | cmd_mask = PCI_EXP_SLTCTL_AIC; | 421 | if (!ATTN_LED(ctrl)) |
422 | return; | ||
423 | |||
515 | switch (value) { | 424 | switch (value) { |
516 | case 0 : /* turn off */ | 425 | case 0 : /* turn off */ |
517 | slot_cmd = 0x00C0; | 426 | slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_OFF; |
518 | break; | 427 | break; |
519 | case 1: /* turn on */ | 428 | case 1: /* turn on */ |
520 | slot_cmd = 0x0040; | 429 | slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_ON; |
521 | break; | 430 | break; |
522 | case 2: /* turn blink */ | 431 | case 2: /* turn blink */ |
523 | slot_cmd = 0x0080; | 432 | slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_BLINK; |
524 | break; | 433 | break; |
525 | default: | 434 | default: |
526 | return -EINVAL; | 435 | return; |
527 | } | 436 | } |
528 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, | 437 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, |
529 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); | 438 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); |
530 | return pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 439 | pcie_write_cmd(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC); |
531 | } | 440 | } |
532 | 441 | ||
533 | void pciehp_green_led_on(struct slot *slot) | 442 | void pciehp_green_led_on(struct slot *slot) |
534 | { | 443 | { |
535 | struct controller *ctrl = slot->ctrl; | 444 | struct controller *ctrl = slot->ctrl; |
536 | u16 slot_cmd; | ||
537 | u16 cmd_mask; | ||
538 | 445 | ||
539 | slot_cmd = 0x0100; | 446 | if (!PWR_LED(ctrl)) |
540 | cmd_mask = PCI_EXP_SLTCTL_PIC; | 447 | return; |
541 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 448 | |
449 | pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, PCI_EXP_SLTCTL_PIC); | ||
542 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, | 450 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, |
543 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); | 451 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, |
452 | PCI_EXP_SLTCTL_PWR_IND_ON); | ||
544 | } | 453 | } |
545 | 454 | ||
546 | void pciehp_green_led_off(struct slot *slot) | 455 | void pciehp_green_led_off(struct slot *slot) |
547 | { | 456 | { |
548 | struct controller *ctrl = slot->ctrl; | 457 | struct controller *ctrl = slot->ctrl; |
549 | u16 slot_cmd; | ||
550 | u16 cmd_mask; | ||
551 | 458 | ||
552 | slot_cmd = 0x0300; | 459 | if (!PWR_LED(ctrl)) |
553 | cmd_mask = PCI_EXP_SLTCTL_PIC; | 460 | return; |
554 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 461 | |
462 | pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, PCI_EXP_SLTCTL_PIC); | ||
555 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, | 463 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, |
556 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); | 464 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, |
465 | PCI_EXP_SLTCTL_PWR_IND_OFF); | ||
557 | } | 466 | } |
558 | 467 | ||
559 | void pciehp_green_led_blink(struct slot *slot) | 468 | void pciehp_green_led_blink(struct slot *slot) |
560 | { | 469 | { |
561 | struct controller *ctrl = slot->ctrl; | 470 | struct controller *ctrl = slot->ctrl; |
562 | u16 slot_cmd; | ||
563 | u16 cmd_mask; | ||
564 | 471 | ||
565 | slot_cmd = 0x0200; | 472 | if (!PWR_LED(ctrl)) |
566 | cmd_mask = PCI_EXP_SLTCTL_PIC; | 473 | return; |
567 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 474 | |
475 | pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, PCI_EXP_SLTCTL_PIC); | ||
568 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, | 476 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, |
569 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); | 477 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, |
478 | PCI_EXP_SLTCTL_PWR_IND_BLINK); | ||
570 | } | 479 | } |
571 | 480 | ||
572 | int pciehp_power_on_slot(struct slot * slot) | 481 | int pciehp_power_on_slot(struct slot * slot) |
573 | { | 482 | { |
574 | struct controller *ctrl = slot->ctrl; | 483 | struct controller *ctrl = slot->ctrl; |
575 | u16 slot_cmd; | 484 | struct pci_dev *pdev = ctrl_dev(ctrl); |
576 | u16 cmd_mask; | ||
577 | u16 slot_status; | 485 | u16 slot_status; |
578 | int retval = 0; | 486 | int retval; |
579 | 487 | ||
580 | /* Clear sticky power-fault bit from previous power failures */ | 488 | /* Clear sticky power-fault bit from previous power failures */ |
581 | retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); | 489 | pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); |
582 | if (retval) { | 490 | if (slot_status & PCI_EXP_SLTSTA_PFD) |
583 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", | 491 | pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, |
584 | __func__); | 492 | PCI_EXP_SLTSTA_PFD); |
585 | return retval; | ||
586 | } | ||
587 | slot_status &= PCI_EXP_SLTSTA_PFD; | ||
588 | if (slot_status) { | ||
589 | retval = pciehp_writew(ctrl, PCI_EXP_SLTSTA, slot_status); | ||
590 | if (retval) { | ||
591 | ctrl_err(ctrl, | ||
592 | "%s: Cannot write to SLOTSTATUS register\n", | ||
593 | __func__); | ||
594 | return retval; | ||
595 | } | ||
596 | } | ||
597 | ctrl->power_fault_detected = 0; | 493 | ctrl->power_fault_detected = 0; |
598 | 494 | ||
599 | slot_cmd = POWER_ON; | 495 | pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC); |
600 | cmd_mask = PCI_EXP_SLTCTL_PCC; | ||
601 | retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | ||
602 | if (retval) { | ||
603 | ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd); | ||
604 | return retval; | ||
605 | } | ||
606 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, | 496 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, |
607 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); | 497 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, |
498 | PCI_EXP_SLTCTL_PWR_ON); | ||
608 | 499 | ||
609 | retval = pciehp_link_enable(ctrl); | 500 | retval = pciehp_link_enable(ctrl); |
610 | if (retval) | 501 | if (retval) |
@@ -613,12 +504,9 @@ int pciehp_power_on_slot(struct slot * slot) | |||
613 | return retval; | 504 | return retval; |
614 | } | 505 | } |
615 | 506 | ||
616 | int pciehp_power_off_slot(struct slot * slot) | 507 | void pciehp_power_off_slot(struct slot * slot) |
617 | { | 508 | { |
618 | struct controller *ctrl = slot->ctrl; | 509 | struct controller *ctrl = slot->ctrl; |
619 | u16 slot_cmd; | ||
620 | u16 cmd_mask; | ||
621 | int retval; | ||
622 | 510 | ||
623 | /* Disable the link at first */ | 511 | /* Disable the link at first */ |
624 | pciehp_link_disable(ctrl); | 512 | pciehp_link_disable(ctrl); |
@@ -628,21 +516,16 @@ int pciehp_power_off_slot(struct slot * slot) | |||
628 | else | 516 | else |
629 | msleep(1000); | 517 | msleep(1000); |
630 | 518 | ||
631 | slot_cmd = POWER_OFF; | 519 | pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC); |
632 | cmd_mask = PCI_EXP_SLTCTL_PCC; | ||
633 | retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | ||
634 | if (retval) { | ||
635 | ctrl_err(ctrl, "Write command failed!\n"); | ||
636 | return retval; | ||
637 | } | ||
638 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, | 520 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, |
639 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); | 521 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, |
640 | return 0; | 522 | PCI_EXP_SLTCTL_PWR_OFF); |
641 | } | 523 | } |
642 | 524 | ||
643 | static irqreturn_t pcie_isr(int irq, void *dev_id) | 525 | static irqreturn_t pcie_isr(int irq, void *dev_id) |
644 | { | 526 | { |
645 | struct controller *ctrl = (struct controller *)dev_id; | 527 | struct controller *ctrl = (struct controller *)dev_id; |
528 | struct pci_dev *pdev = ctrl_dev(ctrl); | ||
646 | struct slot *slot = ctrl->slot; | 529 | struct slot *slot = ctrl->slot; |
647 | u16 detected, intr_loc; | 530 | u16 detected, intr_loc; |
648 | 531 | ||
@@ -653,11 +536,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) | |||
653 | */ | 536 | */ |
654 | intr_loc = 0; | 537 | intr_loc = 0; |
655 | do { | 538 | do { |
656 | if (pciehp_readw(ctrl, PCI_EXP_SLTSTA, &detected)) { | 539 | pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &detected); |
657 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n", | ||
658 | __func__); | ||
659 | return IRQ_NONE; | ||
660 | } | ||
661 | 540 | ||
662 | detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | | 541 | detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | |
663 | PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | | 542 | PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | |
@@ -666,11 +545,9 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) | |||
666 | intr_loc |= detected; | 545 | intr_loc |= detected; |
667 | if (!intr_loc) | 546 | if (!intr_loc) |
668 | return IRQ_NONE; | 547 | return IRQ_NONE; |
669 | if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, intr_loc)) { | 548 | if (detected) |
670 | ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n", | 549 | pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, |
671 | __func__); | 550 | intr_loc); |
672 | return IRQ_NONE; | ||
673 | } | ||
674 | } while (detected); | 551 | } while (detected); |
675 | 552 | ||
676 | ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc); | 553 | ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc); |
@@ -705,7 +582,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) | |||
705 | return IRQ_HANDLED; | 582 | return IRQ_HANDLED; |
706 | } | 583 | } |
707 | 584 | ||
708 | int pcie_enable_notification(struct controller *ctrl) | 585 | void pcie_enable_notification(struct controller *ctrl) |
709 | { | 586 | { |
710 | u16 cmd, mask; | 587 | u16 cmd, mask; |
711 | 588 | ||
@@ -731,22 +608,18 @@ int pcie_enable_notification(struct controller *ctrl) | |||
731 | PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | | 608 | PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | |
732 | PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE); | 609 | PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE); |
733 | 610 | ||
734 | if (pcie_write_cmd(ctrl, cmd, mask)) { | 611 | pcie_write_cmd(ctrl, cmd, mask); |
735 | ctrl_err(ctrl, "Cannot enable software notification\n"); | ||
736 | return -1; | ||
737 | } | ||
738 | return 0; | ||
739 | } | 612 | } |
740 | 613 | ||
741 | static void pcie_disable_notification(struct controller *ctrl) | 614 | static void pcie_disable_notification(struct controller *ctrl) |
742 | { | 615 | { |
743 | u16 mask; | 616 | u16 mask; |
617 | |||
744 | mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | | 618 | mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | |
745 | PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | | 619 | PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | |
746 | PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE | | 620 | PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE | |
747 | PCI_EXP_SLTCTL_DLLSCE); | 621 | PCI_EXP_SLTCTL_DLLSCE); |
748 | if (pcie_write_cmd(ctrl, 0, mask)) | 622 | pcie_write_cmd(ctrl, 0, mask); |
749 | ctrl_warn(ctrl, "Cannot disable software notification\n"); | ||
750 | } | 623 | } |
751 | 624 | ||
752 | /* | 625 | /* |
@@ -758,6 +631,7 @@ static void pcie_disable_notification(struct controller *ctrl) | |||
758 | int pciehp_reset_slot(struct slot *slot, int probe) | 631 | int pciehp_reset_slot(struct slot *slot, int probe) |
759 | { | 632 | { |
760 | struct controller *ctrl = slot->ctrl; | 633 | struct controller *ctrl = slot->ctrl; |
634 | struct pci_dev *pdev = ctrl_dev(ctrl); | ||
761 | 635 | ||
762 | if (probe) | 636 | if (probe) |
763 | return 0; | 637 | return 0; |
@@ -771,7 +645,8 @@ int pciehp_reset_slot(struct slot *slot, int probe) | |||
771 | pci_reset_bridge_secondary_bus(ctrl->pcie->port); | 645 | pci_reset_bridge_secondary_bus(ctrl->pcie->port); |
772 | 646 | ||
773 | if (HP_SUPR_RM(ctrl)) { | 647 | if (HP_SUPR_RM(ctrl)) { |
774 | pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDC); | 648 | pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, |
649 | PCI_EXP_SLTSTA_PDC); | ||
775 | pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PDCE, PCI_EXP_SLTCTL_PDCE); | 650 | pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PDCE, PCI_EXP_SLTCTL_PDCE); |
776 | if (pciehp_poll_mode) | 651 | if (pciehp_poll_mode) |
777 | int_poll_timeout(ctrl->poll_timer.data); | 652 | int_poll_timeout(ctrl->poll_timer.data); |
@@ -784,10 +659,7 @@ int pcie_init_notification(struct controller *ctrl) | |||
784 | { | 659 | { |
785 | if (pciehp_request_irq(ctrl)) | 660 | if (pciehp_request_irq(ctrl)) |
786 | return -1; | 661 | return -1; |
787 | if (pcie_enable_notification(ctrl)) { | 662 | pcie_enable_notification(ctrl); |
788 | pciehp_free_irq(ctrl); | ||
789 | return -1; | ||
790 | } | ||
791 | ctrl->notification_enabled = 1; | 663 | ctrl->notification_enabled = 1; |
792 | return 0; | 664 | return 0; |
793 | } | 665 | } |
@@ -875,12 +747,14 @@ static inline void dbg_ctrl(struct controller *ctrl) | |||
875 | EMI(ctrl) ? "yes" : "no"); | 747 | EMI(ctrl) ? "yes" : "no"); |
876 | ctrl_info(ctrl, " Command Completed : %3s\n", | 748 | ctrl_info(ctrl, " Command Completed : %3s\n", |
877 | NO_CMD_CMPL(ctrl) ? "no" : "yes"); | 749 | NO_CMD_CMPL(ctrl) ? "no" : "yes"); |
878 | pciehp_readw(ctrl, PCI_EXP_SLTSTA, ®16); | 750 | pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, ®16); |
879 | ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16); | 751 | ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16); |
880 | pciehp_readw(ctrl, PCI_EXP_SLTCTL, ®16); | 752 | pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, ®16); |
881 | ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16); | 753 | ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16); |
882 | } | 754 | } |
883 | 755 | ||
756 | #define FLAG(x,y) (((x) & (y)) ? '+' : '-') | ||
757 | |||
884 | struct controller *pcie_init(struct pcie_device *dev) | 758 | struct controller *pcie_init(struct pcie_device *dev) |
885 | { | 759 | { |
886 | struct controller *ctrl; | 760 | struct controller *ctrl; |
@@ -893,11 +767,7 @@ struct controller *pcie_init(struct pcie_device *dev) | |||
893 | goto abort; | 767 | goto abort; |
894 | } | 768 | } |
895 | ctrl->pcie = dev; | 769 | ctrl->pcie = dev; |
896 | if (pciehp_readl(ctrl, PCI_EXP_SLTCAP, &slot_cap)) { | 770 | pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap); |
897 | ctrl_err(ctrl, "Cannot read SLOTCAP register\n"); | ||
898 | goto abort_ctrl; | ||
899 | } | ||
900 | |||
901 | ctrl->slot_cap = slot_cap; | 771 | ctrl->slot_cap = slot_cap; |
902 | mutex_init(&ctrl->ctrl_lock); | 772 | mutex_init(&ctrl->ctrl_lock); |
903 | init_waitqueue_head(&ctrl->queue); | 773 | init_waitqueue_head(&ctrl->queue); |
@@ -913,25 +783,31 @@ struct controller *pcie_init(struct pcie_device *dev) | |||
913 | ctrl->no_cmd_complete = 1; | 783 | ctrl->no_cmd_complete = 1; |
914 | 784 | ||
915 | /* Check if Data Link Layer Link Active Reporting is implemented */ | 785 | /* Check if Data Link Layer Link Active Reporting is implemented */ |
916 | if (pciehp_readl(ctrl, PCI_EXP_LNKCAP, &link_cap)) { | 786 | pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap); |
917 | ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); | ||
918 | goto abort_ctrl; | ||
919 | } | ||
920 | if (link_cap & PCI_EXP_LNKCAP_DLLLARC) { | 787 | if (link_cap & PCI_EXP_LNKCAP_DLLLARC) { |
921 | ctrl_dbg(ctrl, "Link Active Reporting supported\n"); | 788 | ctrl_dbg(ctrl, "Link Active Reporting supported\n"); |
922 | ctrl->link_active_reporting = 1; | 789 | ctrl->link_active_reporting = 1; |
923 | } | 790 | } |
924 | 791 | ||
925 | /* Clear all remaining event bits in Slot Status register */ | 792 | /* Clear all remaining event bits in Slot Status register */ |
926 | if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f)) | 793 | pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, |
927 | goto abort_ctrl; | 794 | PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | |
795 | PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | | ||
796 | PCI_EXP_SLTSTA_CC); | ||
928 | 797 | ||
929 | /* Disable software notification */ | 798 | /* Disable software notification */ |
930 | pcie_disable_notification(ctrl); | 799 | pcie_disable_notification(ctrl); |
931 | 800 | ||
932 | ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", | 801 | ctrl_info(ctrl, "Slot #%d AttnBtn%c AttnInd%c PwrInd%c PwrCtrl%c MRL%c Interlock%c NoCompl%c LLActRep%c\n", |
933 | pdev->vendor, pdev->device, pdev->subsystem_vendor, | 802 | (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19, |
934 | pdev->subsystem_device); | 803 | FLAG(slot_cap, PCI_EXP_SLTCAP_ABP), |
804 | FLAG(slot_cap, PCI_EXP_SLTCAP_AIP), | ||
805 | FLAG(slot_cap, PCI_EXP_SLTCAP_PIP), | ||
806 | FLAG(slot_cap, PCI_EXP_SLTCAP_PCP), | ||
807 | FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP), | ||
808 | FLAG(slot_cap, PCI_EXP_SLTCAP_EIP), | ||
809 | FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS), | ||
810 | FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC)); | ||
935 | 811 | ||
936 | if (pcie_init_slot(ctrl)) | 812 | if (pcie_init_slot(ctrl)) |
937 | goto abort_ctrl; | 813 | goto abort_ctrl; |
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index 0e0d0f7f63fd..b07d7cc2d697 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c | |||
@@ -39,22 +39,26 @@ int pciehp_configure_device(struct slot *p_slot) | |||
39 | struct pci_dev *dev; | 39 | struct pci_dev *dev; |
40 | struct pci_dev *bridge = p_slot->ctrl->pcie->port; | 40 | struct pci_dev *bridge = p_slot->ctrl->pcie->port; |
41 | struct pci_bus *parent = bridge->subordinate; | 41 | struct pci_bus *parent = bridge->subordinate; |
42 | int num; | 42 | int num, ret = 0; |
43 | struct controller *ctrl = p_slot->ctrl; | 43 | struct controller *ctrl = p_slot->ctrl; |
44 | 44 | ||
45 | pci_lock_rescan_remove(); | ||
46 | |||
45 | dev = pci_get_slot(parent, PCI_DEVFN(0, 0)); | 47 | dev = pci_get_slot(parent, PCI_DEVFN(0, 0)); |
46 | if (dev) { | 48 | if (dev) { |
47 | ctrl_err(ctrl, "Device %s already exists " | 49 | ctrl_err(ctrl, "Device %s already exists " |
48 | "at %04x:%02x:00, cannot hot-add\n", pci_name(dev), | 50 | "at %04x:%02x:00, cannot hot-add\n", pci_name(dev), |
49 | pci_domain_nr(parent), parent->number); | 51 | pci_domain_nr(parent), parent->number); |
50 | pci_dev_put(dev); | 52 | pci_dev_put(dev); |
51 | return -EINVAL; | 53 | ret = -EINVAL; |
54 | goto out; | ||
52 | } | 55 | } |
53 | 56 | ||
54 | num = pci_scan_slot(parent, PCI_DEVFN(0, 0)); | 57 | num = pci_scan_slot(parent, PCI_DEVFN(0, 0)); |
55 | if (num == 0) { | 58 | if (num == 0) { |
56 | ctrl_err(ctrl, "No new device found\n"); | 59 | ctrl_err(ctrl, "No new device found\n"); |
57 | return -ENODEV; | 60 | ret = -ENODEV; |
61 | goto out; | ||
58 | } | 62 | } |
59 | 63 | ||
60 | list_for_each_entry(dev, &parent->devices, bus_list) | 64 | list_for_each_entry(dev, &parent->devices, bus_list) |
@@ -73,12 +77,14 @@ int pciehp_configure_device(struct slot *p_slot) | |||
73 | 77 | ||
74 | pci_bus_add_devices(parent); | 78 | pci_bus_add_devices(parent); |
75 | 79 | ||
76 | return 0; | 80 | out: |
81 | pci_unlock_rescan_remove(); | ||
82 | return ret; | ||
77 | } | 83 | } |
78 | 84 | ||
79 | int pciehp_unconfigure_device(struct slot *p_slot) | 85 | int pciehp_unconfigure_device(struct slot *p_slot) |
80 | { | 86 | { |
81 | int ret, rc = 0; | 87 | int rc = 0; |
82 | u8 bctl = 0; | 88 | u8 bctl = 0; |
83 | u8 presence = 0; | 89 | u8 presence = 0; |
84 | struct pci_dev *dev, *temp; | 90 | struct pci_dev *dev, *temp; |
@@ -88,9 +94,9 @@ int pciehp_unconfigure_device(struct slot *p_slot) | |||
88 | 94 | ||
89 | ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n", | 95 | ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n", |
90 | __func__, pci_domain_nr(parent), parent->number); | 96 | __func__, pci_domain_nr(parent), parent->number); |
91 | ret = pciehp_get_adapter_status(p_slot, &presence); | 97 | pciehp_get_adapter_status(p_slot, &presence); |
92 | if (ret) | 98 | |
93 | presence = 0; | 99 | pci_lock_rescan_remove(); |
94 | 100 | ||
95 | /* | 101 | /* |
96 | * Stopping an SR-IOV PF device removes all the associated VFs, | 102 | * Stopping an SR-IOV PF device removes all the associated VFs, |
@@ -126,5 +132,6 @@ int pciehp_unconfigure_device(struct slot *p_slot) | |||
126 | pci_dev_put(dev); | 132 | pci_dev_put(dev); |
127 | } | 133 | } |
128 | 134 | ||
135 | pci_unlock_rescan_remove(); | ||
129 | return rc; | 136 | return rc; |
130 | } | 137 | } |
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index e9c044d15add..4fcdeedda31b 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c | |||
@@ -354,10 +354,15 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn) | |||
354 | { | 354 | { |
355 | struct pci_bus *bus; | 355 | struct pci_bus *bus; |
356 | struct slot *slot; | 356 | struct slot *slot; |
357 | int ret = 0; | ||
358 | |||
359 | pci_lock_rescan_remove(); | ||
357 | 360 | ||
358 | bus = pcibios_find_pci_bus(dn); | 361 | bus = pcibios_find_pci_bus(dn); |
359 | if (!bus) | 362 | if (!bus) { |
360 | return -EINVAL; | 363 | ret = -EINVAL; |
364 | goto out; | ||
365 | } | ||
361 | 366 | ||
362 | pr_debug("PCI: Removing PCI slot below EADS bridge %s\n", | 367 | pr_debug("PCI: Removing PCI slot below EADS bridge %s\n", |
363 | bus->self ? pci_name(bus->self) : "<!PHB!>"); | 368 | bus->self ? pci_name(bus->self) : "<!PHB!>"); |
@@ -371,7 +376,8 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn) | |||
371 | printk(KERN_ERR | 376 | printk(KERN_ERR |
372 | "%s: unable to remove hotplug slot %s\n", | 377 | "%s: unable to remove hotplug slot %s\n", |
373 | __func__, drc_name); | 378 | __func__, drc_name); |
374 | return -EIO; | 379 | ret = -EIO; |
380 | goto out; | ||
375 | } | 381 | } |
376 | } | 382 | } |
377 | 383 | ||
@@ -382,7 +388,8 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn) | |||
382 | if (pcibios_unmap_io_space(bus)) { | 388 | if (pcibios_unmap_io_space(bus)) { |
383 | printk(KERN_ERR "%s: failed to unmap bus range\n", | 389 | printk(KERN_ERR "%s: failed to unmap bus range\n", |
384 | __func__); | 390 | __func__); |
385 | return -ERANGE; | 391 | ret = -ERANGE; |
392 | goto out; | ||
386 | } | 393 | } |
387 | 394 | ||
388 | /* Remove the EADS bridge device itself */ | 395 | /* Remove the EADS bridge device itself */ |
@@ -390,7 +397,9 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn) | |||
390 | pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self)); | 397 | pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self)); |
391 | pci_stop_and_remove_bus_device(bus->self); | 398 | pci_stop_and_remove_bus_device(bus->self); |
392 | 399 | ||
393 | return 0; | 400 | out: |
401 | pci_unlock_rescan_remove(); | ||
402 | return ret; | ||
394 | } | 403 | } |
395 | 404 | ||
396 | /** | 405 | /** |
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index b7fc5c9255a5..4796c15fba94 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c | |||
@@ -398,7 +398,9 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) | |||
398 | return retval; | 398 | return retval; |
399 | 399 | ||
400 | if (state == PRESENT) { | 400 | if (state == PRESENT) { |
401 | pci_lock_rescan_remove(); | ||
401 | pcibios_add_pci_devices(slot->bus); | 402 | pcibios_add_pci_devices(slot->bus); |
403 | pci_unlock_rescan_remove(); | ||
402 | slot->state = CONFIGURED; | 404 | slot->state = CONFIGURED; |
403 | } else if (state == EMPTY) { | 405 | } else if (state == EMPTY) { |
404 | slot->state = EMPTY; | 406 | slot->state = EMPTY; |
@@ -418,7 +420,9 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) | |||
418 | if (slot->state == NOT_CONFIGURED) | 420 | if (slot->state == NOT_CONFIGURED) |
419 | return -EINVAL; | 421 | return -EINVAL; |
420 | 422 | ||
423 | pci_lock_rescan_remove(); | ||
421 | pcibios_remove_pci_devices(slot->bus); | 424 | pcibios_remove_pci_devices(slot->bus); |
425 | pci_unlock_rescan_remove(); | ||
422 | vm_unmap_aliases(); | 426 | vm_unmap_aliases(); |
423 | 427 | ||
424 | slot->state = NOT_CONFIGURED; | 428 | slot->state = NOT_CONFIGURED; |
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index 3c7eb5dd91c6..8d2ce22151eb 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c | |||
@@ -80,7 +80,9 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) | |||
80 | goto out_deconfigure; | 80 | goto out_deconfigure; |
81 | 81 | ||
82 | pci_scan_slot(slot->zdev->bus, ZPCI_DEVFN); | 82 | pci_scan_slot(slot->zdev->bus, ZPCI_DEVFN); |
83 | pci_lock_rescan_remove(); | ||
83 | pci_bus_add_devices(slot->zdev->bus); | 84 | pci_bus_add_devices(slot->zdev->bus); |
85 | pci_unlock_rescan_remove(); | ||
84 | 86 | ||
85 | return rc; | 87 | return rc; |
86 | 88 | ||
@@ -98,7 +100,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) | |||
98 | return -EIO; | 100 | return -EIO; |
99 | 101 | ||
100 | if (slot->zdev->pdev) | 102 | if (slot->zdev->pdev) |
101 | pci_stop_and_remove_bus_device(slot->zdev->pdev); | 103 | pci_stop_and_remove_bus_device_locked(slot->zdev->pdev); |
102 | 104 | ||
103 | rc = zpci_disable_device(slot->zdev); | 105 | rc = zpci_disable_device(slot->zdev); |
104 | if (rc) | 106 | if (rc) |
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index 5b05a68cca6c..613043f7576f 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c | |||
@@ -459,12 +459,15 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot) | |||
459 | acpi_scan_lock_release(); | 459 | acpi_scan_lock_release(); |
460 | } | 460 | } |
461 | 461 | ||
462 | pci_lock_rescan_remove(); | ||
463 | |||
462 | /* Call the driver for the new device */ | 464 | /* Call the driver for the new device */ |
463 | pci_bus_add_devices(slot->pci_bus); | 465 | pci_bus_add_devices(slot->pci_bus); |
464 | /* Call the drivers for the new devices subordinate to PPB */ | 466 | /* Call the drivers for the new devices subordinate to PPB */ |
465 | if (new_ppb) | 467 | if (new_ppb) |
466 | pci_bus_add_devices(new_bus); | 468 | pci_bus_add_devices(new_bus); |
467 | 469 | ||
470 | pci_unlock_rescan_remove(); | ||
468 | mutex_unlock(&sn_hotplug_mutex); | 471 | mutex_unlock(&sn_hotplug_mutex); |
469 | 472 | ||
470 | if (rc == 0) | 473 | if (rc == 0) |
@@ -540,6 +543,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) | |||
540 | acpi_scan_lock_release(); | 543 | acpi_scan_lock_release(); |
541 | } | 544 | } |
542 | 545 | ||
546 | pci_lock_rescan_remove(); | ||
543 | /* Free the SN resources assigned to the Linux device.*/ | 547 | /* Free the SN resources assigned to the Linux device.*/ |
544 | list_for_each_entry_safe(dev, temp, &slot->pci_bus->devices, bus_list) { | 548 | list_for_each_entry_safe(dev, temp, &slot->pci_bus->devices, bus_list) { |
545 | if (PCI_SLOT(dev->devfn) != slot->device_num + 1) | 549 | if (PCI_SLOT(dev->devfn) != slot->device_num + 1) |
@@ -550,6 +554,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) | |||
550 | pci_stop_and_remove_bus_device(dev); | 554 | pci_stop_and_remove_bus_device(dev); |
551 | pci_dev_put(dev); | 555 | pci_dev_put(dev); |
552 | } | 556 | } |
557 | pci_unlock_rescan_remove(); | ||
553 | 558 | ||
554 | /* Remove the SSDT for the slot from the ACPI namespace */ | 559 | /* Remove the SSDT for the slot from the ACPI namespace */ |
555 | if (SN_ACPI_BASE_SUPPORT() && ssdt_id) { | 560 | if (SN_ACPI_BASE_SUPPORT() && ssdt_id) { |
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c index b0e83132542e..2bf69fe1926c 100644 --- a/drivers/pci/hotplug/shpchp_pci.c +++ b/drivers/pci/hotplug/shpchp_pci.c | |||
@@ -40,7 +40,9 @@ int __ref shpchp_configure_device(struct slot *p_slot) | |||
40 | struct controller *ctrl = p_slot->ctrl; | 40 | struct controller *ctrl = p_slot->ctrl; |
41 | struct pci_dev *bridge = ctrl->pci_dev; | 41 | struct pci_dev *bridge = ctrl->pci_dev; |
42 | struct pci_bus *parent = bridge->subordinate; | 42 | struct pci_bus *parent = bridge->subordinate; |
43 | int num; | 43 | int num, ret = 0; |
44 | |||
45 | pci_lock_rescan_remove(); | ||
44 | 46 | ||
45 | dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0)); | 47 | dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0)); |
46 | if (dev) { | 48 | if (dev) { |
@@ -48,13 +50,15 @@ int __ref shpchp_configure_device(struct slot *p_slot) | |||
48 | "at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev), | 50 | "at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev), |
49 | pci_domain_nr(parent), p_slot->bus, p_slot->device); | 51 | pci_domain_nr(parent), p_slot->bus, p_slot->device); |
50 | pci_dev_put(dev); | 52 | pci_dev_put(dev); |
51 | return -EINVAL; | 53 | ret = -EINVAL; |
54 | goto out; | ||
52 | } | 55 | } |
53 | 56 | ||
54 | num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0)); | 57 | num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0)); |
55 | if (num == 0) { | 58 | if (num == 0) { |
56 | ctrl_err(ctrl, "No new device found\n"); | 59 | ctrl_err(ctrl, "No new device found\n"); |
57 | return -ENODEV; | 60 | ret = -ENODEV; |
61 | goto out; | ||
58 | } | 62 | } |
59 | 63 | ||
60 | list_for_each_entry(dev, &parent->devices, bus_list) { | 64 | list_for_each_entry(dev, &parent->devices, bus_list) { |
@@ -75,7 +79,9 @@ int __ref shpchp_configure_device(struct slot *p_slot) | |||
75 | 79 | ||
76 | pci_bus_add_devices(parent); | 80 | pci_bus_add_devices(parent); |
77 | 81 | ||
78 | return 0; | 82 | out: |
83 | pci_unlock_rescan_remove(); | ||
84 | return ret; | ||
79 | } | 85 | } |
80 | 86 | ||
81 | int shpchp_unconfigure_device(struct slot *p_slot) | 87 | int shpchp_unconfigure_device(struct slot *p_slot) |
@@ -89,6 +95,8 @@ int shpchp_unconfigure_device(struct slot *p_slot) | |||
89 | ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n", | 95 | ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n", |
90 | __func__, pci_domain_nr(parent), p_slot->bus, p_slot->device); | 96 | __func__, pci_domain_nr(parent), p_slot->bus, p_slot->device); |
91 | 97 | ||
98 | pci_lock_rescan_remove(); | ||
99 | |||
92 | list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) { | 100 | list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) { |
93 | if (PCI_SLOT(dev->devfn) != p_slot->device) | 101 | if (PCI_SLOT(dev->devfn) != p_slot->device) |
94 | continue; | 102 | continue; |
@@ -108,6 +116,8 @@ int shpchp_unconfigure_device(struct slot *p_slot) | |||
108 | pci_stop_and_remove_bus_device(dev); | 116 | pci_stop_and_remove_bus_device(dev); |
109 | pci_dev_put(dev); | 117 | pci_dev_put(dev); |
110 | } | 118 | } |
119 | |||
120 | pci_unlock_rescan_remove(); | ||
111 | return rc; | 121 | return rc; |
112 | } | 122 | } |
113 | 123 | ||
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c index 50ce68098298..2c2930ea06ad 100644 --- a/drivers/pci/ioapic.c +++ b/drivers/pci/ioapic.c | |||
@@ -113,6 +113,10 @@ static struct pci_driver ioapic_driver = { | |||
113 | .remove = ioapic_remove, | 113 | .remove = ioapic_remove, |
114 | }; | 114 | }; |
115 | 115 | ||
116 | module_pci_driver(ioapic_driver); | 116 | static int __init ioapic_init(void) |
117 | { | ||
118 | return pci_register_driver(&ioapic_driver); | ||
119 | } | ||
120 | module_init(ioapic_init); | ||
117 | 121 | ||
118 | MODULE_LICENSE("GPL"); | 122 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 1fe2d6fb19d5..9dce7c5e2a77 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c | |||
@@ -84,6 +84,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset) | |||
84 | virtfn->dev.parent = dev->dev.parent; | 84 | virtfn->dev.parent = dev->dev.parent; |
85 | virtfn->physfn = pci_dev_get(dev); | 85 | virtfn->physfn = pci_dev_get(dev); |
86 | virtfn->is_virtfn = 1; | 86 | virtfn->is_virtfn = 1; |
87 | virtfn->multifunction = 0; | ||
87 | 88 | ||
88 | for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { | 89 | for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { |
89 | res = dev->resource + PCI_IOV_RESOURCES + i; | 90 | res = dev->resource + PCI_IOV_RESOURCES + i; |
@@ -441,6 +442,7 @@ static int sriov_init(struct pci_dev *dev, int pos) | |||
441 | 442 | ||
442 | found: | 443 | found: |
443 | pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl); | 444 | pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl); |
445 | pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, 0); | ||
444 | pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); | 446 | pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); |
445 | pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); | 447 | pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); |
446 | if (!offset || (total > 1 && !stride)) | 448 | if (!offset || (total > 1 && !stride)) |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 3fcd67a16677..7a0fec6ce571 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -116,7 +116,7 @@ void __weak arch_teardown_msi_irqs(struct pci_dev *dev) | |||
116 | return default_teardown_msi_irqs(dev); | 116 | return default_teardown_msi_irqs(dev); |
117 | } | 117 | } |
118 | 118 | ||
119 | void default_restore_msi_irqs(struct pci_dev *dev, int irq) | 119 | static void default_restore_msi_irq(struct pci_dev *dev, int irq) |
120 | { | 120 | { |
121 | struct msi_desc *entry; | 121 | struct msi_desc *entry; |
122 | 122 | ||
@@ -134,9 +134,9 @@ void default_restore_msi_irqs(struct pci_dev *dev, int irq) | |||
134 | write_msi_msg(irq, &entry->msg); | 134 | write_msi_msg(irq, &entry->msg); |
135 | } | 135 | } |
136 | 136 | ||
137 | void __weak arch_restore_msi_irqs(struct pci_dev *dev, int irq) | 137 | void __weak arch_restore_msi_irqs(struct pci_dev *dev) |
138 | { | 138 | { |
139 | return default_restore_msi_irqs(dev, irq); | 139 | return default_restore_msi_irqs(dev); |
140 | } | 140 | } |
141 | 141 | ||
142 | static void msi_set_enable(struct pci_dev *dev, int enable) | 142 | static void msi_set_enable(struct pci_dev *dev, int enable) |
@@ -262,6 +262,15 @@ void unmask_msi_irq(struct irq_data *data) | |||
262 | msi_set_mask_bit(data, 0); | 262 | msi_set_mask_bit(data, 0); |
263 | } | 263 | } |
264 | 264 | ||
265 | void default_restore_msi_irqs(struct pci_dev *dev) | ||
266 | { | ||
267 | struct msi_desc *entry; | ||
268 | |||
269 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
270 | default_restore_msi_irq(dev, entry->irq); | ||
271 | } | ||
272 | } | ||
273 | |||
265 | void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | 274 | void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
266 | { | 275 | { |
267 | BUG_ON(entry->dev->current_state != PCI_D0); | 276 | BUG_ON(entry->dev->current_state != PCI_D0); |
@@ -363,6 +372,9 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg) | |||
363 | static void free_msi_irqs(struct pci_dev *dev) | 372 | static void free_msi_irqs(struct pci_dev *dev) |
364 | { | 373 | { |
365 | struct msi_desc *entry, *tmp; | 374 | struct msi_desc *entry, *tmp; |
375 | struct attribute **msi_attrs; | ||
376 | struct device_attribute *dev_attr; | ||
377 | int count = 0; | ||
366 | 378 | ||
367 | list_for_each_entry(entry, &dev->msi_list, list) { | 379 | list_for_each_entry(entry, &dev->msi_list, list) { |
368 | int i, nvec; | 380 | int i, nvec; |
@@ -398,6 +410,22 @@ static void free_msi_irqs(struct pci_dev *dev) | |||
398 | list_del(&entry->list); | 410 | list_del(&entry->list); |
399 | kfree(entry); | 411 | kfree(entry); |
400 | } | 412 | } |
413 | |||
414 | if (dev->msi_irq_groups) { | ||
415 | sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups); | ||
416 | msi_attrs = dev->msi_irq_groups[0]->attrs; | ||
417 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
418 | dev_attr = container_of(msi_attrs[count], | ||
419 | struct device_attribute, attr); | ||
420 | kfree(dev_attr->attr.name); | ||
421 | kfree(dev_attr); | ||
422 | ++count; | ||
423 | } | ||
424 | kfree(msi_attrs); | ||
425 | kfree(dev->msi_irq_groups[0]); | ||
426 | kfree(dev->msi_irq_groups); | ||
427 | dev->msi_irq_groups = NULL; | ||
428 | } | ||
401 | } | 429 | } |
402 | 430 | ||
403 | static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) | 431 | static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) |
@@ -430,7 +458,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev) | |||
430 | 458 | ||
431 | pci_intx_for_msi(dev, 0); | 459 | pci_intx_for_msi(dev, 0); |
432 | msi_set_enable(dev, 0); | 460 | msi_set_enable(dev, 0); |
433 | arch_restore_msi_irqs(dev, dev->irq); | 461 | arch_restore_msi_irqs(dev); |
434 | 462 | ||
435 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); | 463 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); |
436 | msi_mask_irq(entry, msi_capable_mask(control), entry->masked); | 464 | msi_mask_irq(entry, msi_capable_mask(control), entry->masked); |
@@ -455,8 +483,8 @@ static void __pci_restore_msix_state(struct pci_dev *dev) | |||
455 | control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL; | 483 | control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL; |
456 | pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); | 484 | pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); |
457 | 485 | ||
486 | arch_restore_msi_irqs(dev); | ||
458 | list_for_each_entry(entry, &dev->msi_list, list) { | 487 | list_for_each_entry(entry, &dev->msi_list, list) { |
459 | arch_restore_msi_irqs(dev, entry->irq); | ||
460 | msix_mask_irq(entry, entry->masked); | 488 | msix_mask_irq(entry, entry->masked); |
461 | } | 489 | } |
462 | 490 | ||
@@ -471,94 +499,95 @@ void pci_restore_msi_state(struct pci_dev *dev) | |||
471 | } | 499 | } |
472 | EXPORT_SYMBOL_GPL(pci_restore_msi_state); | 500 | EXPORT_SYMBOL_GPL(pci_restore_msi_state); |
473 | 501 | ||
474 | 502 | static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr, | |
475 | #define to_msi_attr(obj) container_of(obj, struct msi_attribute, attr) | ||
476 | #define to_msi_desc(obj) container_of(obj, struct msi_desc, kobj) | ||
477 | |||
478 | struct msi_attribute { | ||
479 | struct attribute attr; | ||
480 | ssize_t (*show)(struct msi_desc *entry, struct msi_attribute *attr, | ||
481 | char *buf); | ||
482 | ssize_t (*store)(struct msi_desc *entry, struct msi_attribute *attr, | ||
483 | const char *buf, size_t count); | ||
484 | }; | ||
485 | |||
486 | static ssize_t show_msi_mode(struct msi_desc *entry, struct msi_attribute *atr, | ||
487 | char *buf) | 503 | char *buf) |
488 | { | 504 | { |
489 | return sprintf(buf, "%s\n", entry->msi_attrib.is_msix ? "msix" : "msi"); | 505 | struct pci_dev *pdev = to_pci_dev(dev); |
490 | } | 506 | struct msi_desc *entry; |
491 | 507 | unsigned long irq; | |
492 | static ssize_t msi_irq_attr_show(struct kobject *kobj, | 508 | int retval; |
493 | struct attribute *attr, char *buf) | ||
494 | { | ||
495 | struct msi_attribute *attribute = to_msi_attr(attr); | ||
496 | struct msi_desc *entry = to_msi_desc(kobj); | ||
497 | |||
498 | if (!attribute->show) | ||
499 | return -EIO; | ||
500 | |||
501 | return attribute->show(entry, attribute, buf); | ||
502 | } | ||
503 | |||
504 | static const struct sysfs_ops msi_irq_sysfs_ops = { | ||
505 | .show = msi_irq_attr_show, | ||
506 | }; | ||
507 | |||
508 | static struct msi_attribute mode_attribute = | ||
509 | __ATTR(mode, S_IRUGO, show_msi_mode, NULL); | ||
510 | |||
511 | 509 | ||
512 | static struct attribute *msi_irq_default_attrs[] = { | 510 | retval = kstrtoul(attr->attr.name, 10, &irq); |
513 | &mode_attribute.attr, | 511 | if (retval) |
514 | NULL | 512 | return retval; |
515 | }; | ||
516 | 513 | ||
517 | static void msi_kobj_release(struct kobject *kobj) | 514 | list_for_each_entry(entry, &pdev->msi_list, list) { |
518 | { | 515 | if (entry->irq == irq) { |
519 | struct msi_desc *entry = to_msi_desc(kobj); | 516 | return sprintf(buf, "%s\n", |
520 | 517 | entry->msi_attrib.is_msix ? "msix" : "msi"); | |
521 | pci_dev_put(entry->dev); | 518 | } |
519 | } | ||
520 | return -ENODEV; | ||
522 | } | 521 | } |
523 | 522 | ||
524 | static struct kobj_type msi_irq_ktype = { | ||
525 | .release = msi_kobj_release, | ||
526 | .sysfs_ops = &msi_irq_sysfs_ops, | ||
527 | .default_attrs = msi_irq_default_attrs, | ||
528 | }; | ||
529 | |||
530 | static int populate_msi_sysfs(struct pci_dev *pdev) | 523 | static int populate_msi_sysfs(struct pci_dev *pdev) |
531 | { | 524 | { |
525 | struct attribute **msi_attrs; | ||
526 | struct attribute *msi_attr; | ||
527 | struct device_attribute *msi_dev_attr; | ||
528 | struct attribute_group *msi_irq_group; | ||
529 | const struct attribute_group **msi_irq_groups; | ||
532 | struct msi_desc *entry; | 530 | struct msi_desc *entry; |
533 | struct kobject *kobj; | 531 | int ret = -ENOMEM; |
534 | int ret; | 532 | int num_msi = 0; |
535 | int count = 0; | 533 | int count = 0; |
536 | 534 | ||
537 | pdev->msi_kset = kset_create_and_add("msi_irqs", NULL, &pdev->dev.kobj); | 535 | /* Determine how many msi entries we have */ |
538 | if (!pdev->msi_kset) | 536 | list_for_each_entry(entry, &pdev->msi_list, list) { |
539 | return -ENOMEM; | 537 | ++num_msi; |
538 | } | ||
539 | if (!num_msi) | ||
540 | return 0; | ||
540 | 541 | ||
542 | /* Dynamically create the MSI attributes for the PCI device */ | ||
543 | msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL); | ||
544 | if (!msi_attrs) | ||
545 | return -ENOMEM; | ||
541 | list_for_each_entry(entry, &pdev->msi_list, list) { | 546 | list_for_each_entry(entry, &pdev->msi_list, list) { |
542 | kobj = &entry->kobj; | 547 | char *name = kmalloc(20, GFP_KERNEL); |
543 | kobj->kset = pdev->msi_kset; | 548 | msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); |
544 | pci_dev_get(pdev); | 549 | if (!msi_dev_attr) |
545 | ret = kobject_init_and_add(kobj, &msi_irq_ktype, NULL, | 550 | goto error_attrs; |
546 | "%u", entry->irq); | 551 | sprintf(name, "%d", entry->irq); |
547 | if (ret) | 552 | sysfs_attr_init(&msi_dev_attr->attr); |
548 | goto out_unroll; | 553 | msi_dev_attr->attr.name = name; |
549 | 554 | msi_dev_attr->attr.mode = S_IRUGO; | |
550 | count++; | 555 | msi_dev_attr->show = msi_mode_show; |
556 | msi_attrs[count] = &msi_dev_attr->attr; | ||
557 | ++count; | ||
551 | } | 558 | } |
552 | 559 | ||
560 | msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL); | ||
561 | if (!msi_irq_group) | ||
562 | goto error_attrs; | ||
563 | msi_irq_group->name = "msi_irqs"; | ||
564 | msi_irq_group->attrs = msi_attrs; | ||
565 | |||
566 | msi_irq_groups = kzalloc(sizeof(void *) * 2, GFP_KERNEL); | ||
567 | if (!msi_irq_groups) | ||
568 | goto error_irq_group; | ||
569 | msi_irq_groups[0] = msi_irq_group; | ||
570 | |||
571 | ret = sysfs_create_groups(&pdev->dev.kobj, msi_irq_groups); | ||
572 | if (ret) | ||
573 | goto error_irq_groups; | ||
574 | pdev->msi_irq_groups = msi_irq_groups; | ||
575 | |||
553 | return 0; | 576 | return 0; |
554 | 577 | ||
555 | out_unroll: | 578 | error_irq_groups: |
556 | list_for_each_entry(entry, &pdev->msi_list, list) { | 579 | kfree(msi_irq_groups); |
557 | if (!count) | 580 | error_irq_group: |
558 | break; | 581 | kfree(msi_irq_group); |
559 | kobject_del(&entry->kobj); | 582 | error_attrs: |
560 | kobject_put(&entry->kobj); | 583 | count = 0; |
561 | count--; | 584 | msi_attr = msi_attrs[count]; |
585 | while (msi_attr) { | ||
586 | msi_dev_attr = container_of(msi_attr, struct device_attribute, attr); | ||
587 | kfree(msi_attr->name); | ||
588 | kfree(msi_dev_attr); | ||
589 | ++count; | ||
590 | msi_attr = msi_attrs[count]; | ||
562 | } | 591 | } |
563 | return ret; | 592 | return ret; |
564 | } | 593 | } |
@@ -729,7 +758,7 @@ static int msix_capability_init(struct pci_dev *dev, | |||
729 | 758 | ||
730 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); | 759 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); |
731 | if (ret) | 760 | if (ret) |
732 | goto error; | 761 | goto out_avail; |
733 | 762 | ||
734 | /* | 763 | /* |
735 | * Some devices require MSI-X to be enabled before we can touch the | 764 | * Some devices require MSI-X to be enabled before we can touch the |
@@ -742,10 +771,8 @@ static int msix_capability_init(struct pci_dev *dev, | |||
742 | msix_program_entries(dev, entries); | 771 | msix_program_entries(dev, entries); |
743 | 772 | ||
744 | ret = populate_msi_sysfs(dev); | 773 | ret = populate_msi_sysfs(dev); |
745 | if (ret) { | 774 | if (ret) |
746 | ret = 0; | 775 | goto out_free; |
747 | goto error; | ||
748 | } | ||
749 | 776 | ||
750 | /* Set MSI-X enabled bits and unmask the function */ | 777 | /* Set MSI-X enabled bits and unmask the function */ |
751 | pci_intx_for_msi(dev, 0); | 778 | pci_intx_for_msi(dev, 0); |
@@ -756,7 +783,7 @@ static int msix_capability_init(struct pci_dev *dev, | |||
756 | 783 | ||
757 | return 0; | 784 | return 0; |
758 | 785 | ||
759 | error: | 786 | out_avail: |
760 | if (ret < 0) { | 787 | if (ret < 0) { |
761 | /* | 788 | /* |
762 | * If we had some success, report the number of irqs | 789 | * If we had some success, report the number of irqs |
@@ -773,6 +800,7 @@ error: | |||
773 | ret = avail; | 800 | ret = avail; |
774 | } | 801 | } |
775 | 802 | ||
803 | out_free: | ||
776 | free_msi_irqs(dev); | 804 | free_msi_irqs(dev); |
777 | 805 | ||
778 | return ret; | 806 | return ret; |
@@ -824,6 +852,31 @@ static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type) | |||
824 | } | 852 | } |
825 | 853 | ||
826 | /** | 854 | /** |
855 | * pci_msi_vec_count - Return the number of MSI vectors a device can send | ||
856 | * @dev: device to report about | ||
857 | * | ||
858 | * This function returns the number of MSI vectors a device requested via | ||
859 | * Multiple Message Capable register. It returns a negative errno if the | ||
860 | * device is not capable sending MSI interrupts. Otherwise, the call succeeds | ||
861 | * and returns a power of two, up to a maximum of 2^5 (32), according to the | ||
862 | * MSI specification. | ||
863 | **/ | ||
864 | int pci_msi_vec_count(struct pci_dev *dev) | ||
865 | { | ||
866 | int ret; | ||
867 | u16 msgctl; | ||
868 | |||
869 | if (!dev->msi_cap) | ||
870 | return -EINVAL; | ||
871 | |||
872 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); | ||
873 | ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1); | ||
874 | |||
875 | return ret; | ||
876 | } | ||
877 | EXPORT_SYMBOL(pci_msi_vec_count); | ||
878 | |||
879 | /** | ||
827 | * pci_enable_msi_block - configure device's MSI capability structure | 880 | * pci_enable_msi_block - configure device's MSI capability structure |
828 | * @dev: device to configure | 881 | * @dev: device to configure |
829 | * @nvec: number of interrupts to configure | 882 | * @nvec: number of interrupts to configure |
@@ -836,16 +889,16 @@ static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type) | |||
836 | * updates the @dev's irq member to the lowest new interrupt number; the | 889 | * updates the @dev's irq member to the lowest new interrupt number; the |
837 | * other interrupt numbers allocated to this device are consecutive. | 890 | * other interrupt numbers allocated to this device are consecutive. |
838 | */ | 891 | */ |
839 | int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec) | 892 | int pci_enable_msi_block(struct pci_dev *dev, int nvec) |
840 | { | 893 | { |
841 | int status, maxvec; | 894 | int status, maxvec; |
842 | u16 msgctl; | ||
843 | 895 | ||
844 | if (!dev->msi_cap || dev->current_state != PCI_D0) | 896 | if (dev->current_state != PCI_D0) |
845 | return -EINVAL; | 897 | return -EINVAL; |
846 | 898 | ||
847 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); | 899 | maxvec = pci_msi_vec_count(dev); |
848 | maxvec = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1); | 900 | if (maxvec < 0) |
901 | return maxvec; | ||
849 | if (nvec > maxvec) | 902 | if (nvec > maxvec) |
850 | return maxvec; | 903 | return maxvec; |
851 | 904 | ||
@@ -867,31 +920,6 @@ int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec) | |||
867 | } | 920 | } |
868 | EXPORT_SYMBOL(pci_enable_msi_block); | 921 | EXPORT_SYMBOL(pci_enable_msi_block); |
869 | 922 | ||
870 | int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec) | ||
871 | { | ||
872 | int ret, nvec; | ||
873 | u16 msgctl; | ||
874 | |||
875 | if (!dev->msi_cap || dev->current_state != PCI_D0) | ||
876 | return -EINVAL; | ||
877 | |||
878 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); | ||
879 | ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1); | ||
880 | |||
881 | if (maxvec) | ||
882 | *maxvec = ret; | ||
883 | |||
884 | do { | ||
885 | nvec = ret; | ||
886 | ret = pci_enable_msi_block(dev, nvec); | ||
887 | } while (ret > 0); | ||
888 | |||
889 | if (ret < 0) | ||
890 | return ret; | ||
891 | return nvec; | ||
892 | } | ||
893 | EXPORT_SYMBOL(pci_enable_msi_block_auto); | ||
894 | |||
895 | void pci_msi_shutdown(struct pci_dev *dev) | 923 | void pci_msi_shutdown(struct pci_dev *dev) |
896 | { | 924 | { |
897 | struct msi_desc *desc; | 925 | struct msi_desc *desc; |
@@ -925,25 +953,29 @@ void pci_disable_msi(struct pci_dev *dev) | |||
925 | 953 | ||
926 | pci_msi_shutdown(dev); | 954 | pci_msi_shutdown(dev); |
927 | free_msi_irqs(dev); | 955 | free_msi_irqs(dev); |
928 | kset_unregister(dev->msi_kset); | ||
929 | dev->msi_kset = NULL; | ||
930 | } | 956 | } |
931 | EXPORT_SYMBOL(pci_disable_msi); | 957 | EXPORT_SYMBOL(pci_disable_msi); |
932 | 958 | ||
933 | /** | 959 | /** |
934 | * pci_msix_table_size - return the number of device's MSI-X table entries | 960 | * pci_msix_vec_count - return the number of device's MSI-X table entries |
935 | * @dev: pointer to the pci_dev data structure of MSI-X device function | 961 | * @dev: pointer to the pci_dev data structure of MSI-X device function |
936 | */ | 962 | |
937 | int pci_msix_table_size(struct pci_dev *dev) | 963 | * This function returns the number of device's MSI-X table entries and |
964 | * therefore the number of MSI-X vectors device is capable of sending. | ||
965 | * It returns a negative errno if the device is not capable of sending MSI-X | ||
966 | * interrupts. | ||
967 | **/ | ||
968 | int pci_msix_vec_count(struct pci_dev *dev) | ||
938 | { | 969 | { |
939 | u16 control; | 970 | u16 control; |
940 | 971 | ||
941 | if (!dev->msix_cap) | 972 | if (!dev->msix_cap) |
942 | return 0; | 973 | return -EINVAL; |
943 | 974 | ||
944 | pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); | 975 | pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); |
945 | return msix_table_size(control); | 976 | return msix_table_size(control); |
946 | } | 977 | } |
978 | EXPORT_SYMBOL(pci_msix_vec_count); | ||
947 | 979 | ||
948 | /** | 980 | /** |
949 | * pci_enable_msix - configure device's MSI-X capability structure | 981 | * pci_enable_msix - configure device's MSI-X capability structure |
@@ -972,7 +1004,9 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) | |||
972 | if (status) | 1004 | if (status) |
973 | return status; | 1005 | return status; |
974 | 1006 | ||
975 | nr_entries = pci_msix_table_size(dev); | 1007 | nr_entries = pci_msix_vec_count(dev); |
1008 | if (nr_entries < 0) | ||
1009 | return nr_entries; | ||
976 | if (nvec > nr_entries) | 1010 | if (nvec > nr_entries) |
977 | return nr_entries; | 1011 | return nr_entries; |
978 | 1012 | ||
@@ -1023,8 +1057,6 @@ void pci_disable_msix(struct pci_dev *dev) | |||
1023 | 1057 | ||
1024 | pci_msix_shutdown(dev); | 1058 | pci_msix_shutdown(dev); |
1025 | free_msi_irqs(dev); | 1059 | free_msi_irqs(dev); |
1026 | kset_unregister(dev->msi_kset); | ||
1027 | dev->msi_kset = NULL; | ||
1028 | } | 1060 | } |
1029 | EXPORT_SYMBOL(pci_disable_msix); | 1061 | EXPORT_SYMBOL(pci_disable_msix); |
1030 | 1062 | ||
@@ -1079,3 +1111,77 @@ void pci_msi_init_pci_dev(struct pci_dev *dev) | |||
1079 | if (dev->msix_cap) | 1111 | if (dev->msix_cap) |
1080 | msix_set_enable(dev, 0); | 1112 | msix_set_enable(dev, 0); |
1081 | } | 1113 | } |
1114 | |||
1115 | /** | ||
1116 | * pci_enable_msi_range - configure device's MSI capability structure | ||
1117 | * @dev: device to configure | ||
1118 | * @minvec: minimal number of interrupts to configure | ||
1119 | * @maxvec: maximum number of interrupts to configure | ||
1120 | * | ||
1121 | * This function tries to allocate a maximum possible number of interrupts in a | ||
1122 | * range between @minvec and @maxvec. It returns a negative errno if an error | ||
1123 | * occurs. If it succeeds, it returns the actual number of interrupts allocated | ||
1124 | * and updates the @dev's irq member to the lowest new interrupt number; | ||
1125 | * the other interrupt numbers allocated to this device are consecutive. | ||
1126 | **/ | ||
1127 | int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) | ||
1128 | { | ||
1129 | int nvec = maxvec; | ||
1130 | int rc; | ||
1131 | |||
1132 | if (maxvec < minvec) | ||
1133 | return -ERANGE; | ||
1134 | |||
1135 | do { | ||
1136 | rc = pci_enable_msi_block(dev, nvec); | ||
1137 | if (rc < 0) { | ||
1138 | return rc; | ||
1139 | } else if (rc > 0) { | ||
1140 | if (rc < minvec) | ||
1141 | return -ENOSPC; | ||
1142 | nvec = rc; | ||
1143 | } | ||
1144 | } while (rc); | ||
1145 | |||
1146 | return nvec; | ||
1147 | } | ||
1148 | EXPORT_SYMBOL(pci_enable_msi_range); | ||
1149 | |||
1150 | /** | ||
1151 | * pci_enable_msix_range - configure device's MSI-X capability structure | ||
1152 | * @dev: pointer to the pci_dev data structure of MSI-X device function | ||
1153 | * @entries: pointer to an array of MSI-X entries | ||
1154 | * @minvec: minimum number of MSI-X irqs requested | ||
1155 | * @maxvec: maximum number of MSI-X irqs requested | ||
1156 | * | ||
1157 | * Setup the MSI-X capability structure of device function with a maximum | ||
1158 | * possible number of interrupts in the range between @minvec and @maxvec | ||
1159 | * upon its software driver call to request for MSI-X mode enabled on its | ||
1160 | * hardware device function. It returns a negative errno if an error occurs. | ||
1161 | * If it succeeds, it returns the actual number of interrupts allocated and | ||
1162 | * indicates the successful configuration of MSI-X capability structure | ||
1163 | * with new allocated MSI-X interrupts. | ||
1164 | **/ | ||
1165 | int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, | ||
1166 | int minvec, int maxvec) | ||
1167 | { | ||
1168 | int nvec = maxvec; | ||
1169 | int rc; | ||
1170 | |||
1171 | if (maxvec < minvec) | ||
1172 | return -ERANGE; | ||
1173 | |||
1174 | do { | ||
1175 | rc = pci_enable_msix(dev, entries, nvec); | ||
1176 | if (rc < 0) { | ||
1177 | return rc; | ||
1178 | } else if (rc > 0) { | ||
1179 | if (rc < minvec) | ||
1180 | return -ENOSPC; | ||
1181 | nvec = rc; | ||
1182 | } | ||
1183 | } while (rc); | ||
1184 | |||
1185 | return nvec; | ||
1186 | } | ||
1187 | EXPORT_SYMBOL(pci_enable_msix_range); | ||
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index f7ebdba14bde..2bdbc0080204 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -361,7 +361,7 @@ static void pci_acpi_cleanup(struct device *dev) | |||
361 | 361 | ||
362 | static bool pci_acpi_bus_match(struct device *dev) | 362 | static bool pci_acpi_bus_match(struct device *dev) |
363 | { | 363 | { |
364 | return dev->bus == &pci_bus_type; | 364 | return dev_is_pci(dev); |
365 | } | 365 | } |
366 | 366 | ||
367 | static struct acpi_bus_type acpi_pci_bus = { | 367 | static struct acpi_bus_type acpi_pci_bus = { |
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c index d51f45aa669e..6f5d343d251c 100644 --- a/drivers/pci/pci-label.c +++ b/drivers/pci/pci-label.c | |||
@@ -34,21 +34,7 @@ | |||
34 | 34 | ||
35 | #define DEVICE_LABEL_DSM 0x07 | 35 | #define DEVICE_LABEL_DSM 0x07 |
36 | 36 | ||
37 | #ifndef CONFIG_DMI | 37 | #ifdef CONFIG_DMI |
38 | |||
39 | static inline int | ||
40 | pci_create_smbiosname_file(struct pci_dev *pdev) | ||
41 | { | ||
42 | return -1; | ||
43 | } | ||
44 | |||
45 | static inline void | ||
46 | pci_remove_smbiosname_file(struct pci_dev *pdev) | ||
47 | { | ||
48 | } | ||
49 | |||
50 | #else | ||
51 | |||
52 | enum smbios_attr_enum { | 38 | enum smbios_attr_enum { |
53 | SMBIOS_ATTR_NONE = 0, | 39 | SMBIOS_ATTR_NONE = 0, |
54 | SMBIOS_ATTR_LABEL_SHOW, | 40 | SMBIOS_ATTR_LABEL_SHOW, |
@@ -156,31 +142,20 @@ pci_remove_smbiosname_file(struct pci_dev *pdev) | |||
156 | { | 142 | { |
157 | sysfs_remove_group(&pdev->dev.kobj, &smbios_attr_group); | 143 | sysfs_remove_group(&pdev->dev.kobj, &smbios_attr_group); |
158 | } | 144 | } |
159 | 145 | #else | |
160 | #endif | ||
161 | |||
162 | #ifndef CONFIG_ACPI | ||
163 | |||
164 | static inline int | ||
165 | pci_create_acpi_index_label_files(struct pci_dev *pdev) | ||
166 | { | ||
167 | return -1; | ||
168 | } | ||
169 | |||
170 | static inline int | 146 | static inline int |
171 | pci_remove_acpi_index_label_files(struct pci_dev *pdev) | 147 | pci_create_smbiosname_file(struct pci_dev *pdev) |
172 | { | 148 | { |
173 | return -1; | 149 | return -1; |
174 | } | 150 | } |
175 | 151 | ||
176 | static inline bool | 152 | static inline void |
177 | device_has_dsm(struct device *dev) | 153 | pci_remove_smbiosname_file(struct pci_dev *pdev) |
178 | { | 154 | { |
179 | return false; | ||
180 | } | 155 | } |
156 | #endif | ||
181 | 157 | ||
182 | #else | 158 | #ifdef CONFIG_ACPI |
183 | |||
184 | static const char device_label_dsm_uuid[] = { | 159 | static const char device_label_dsm_uuid[] = { |
185 | 0xD0, 0x37, 0xC9, 0xE5, 0x53, 0x35, 0x7A, 0x4D, | 160 | 0xD0, 0x37, 0xC9, 0xE5, 0x53, 0x35, 0x7A, 0x4D, |
186 | 0x91, 0x17, 0xEA, 0x4D, 0x19, 0xC3, 0x43, 0x4D | 161 | 0x91, 0x17, 0xEA, 0x4D, 0x19, 0xC3, 0x43, 0x4D |
@@ -364,6 +339,24 @@ pci_remove_acpi_index_label_files(struct pci_dev *pdev) | |||
364 | sysfs_remove_group(&pdev->dev.kobj, &acpi_attr_group); | 339 | sysfs_remove_group(&pdev->dev.kobj, &acpi_attr_group); |
365 | return 0; | 340 | return 0; |
366 | } | 341 | } |
342 | #else | ||
343 | static inline int | ||
344 | pci_create_acpi_index_label_files(struct pci_dev *pdev) | ||
345 | { | ||
346 | return -1; | ||
347 | } | ||
348 | |||
349 | static inline int | ||
350 | pci_remove_acpi_index_label_files(struct pci_dev *pdev) | ||
351 | { | ||
352 | return -1; | ||
353 | } | ||
354 | |||
355 | static inline bool | ||
356 | device_has_dsm(struct device *dev) | ||
357 | { | ||
358 | return false; | ||
359 | } | ||
367 | #endif | 360 | #endif |
368 | 361 | ||
369 | void pci_create_firmware_label_files(struct pci_dev *pdev) | 362 | void pci_create_firmware_label_files(struct pci_dev *pdev) |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index c91e6c18debc..276ef9c18802 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -297,7 +297,6 @@ msi_bus_store(struct device *dev, struct device_attribute *attr, | |||
297 | } | 297 | } |
298 | static DEVICE_ATTR_RW(msi_bus); | 298 | static DEVICE_ATTR_RW(msi_bus); |
299 | 299 | ||
300 | static DEFINE_MUTEX(pci_remove_rescan_mutex); | ||
301 | static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf, | 300 | static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf, |
302 | size_t count) | 301 | size_t count) |
303 | { | 302 | { |
@@ -308,10 +307,10 @@ static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf, | |||
308 | return -EINVAL; | 307 | return -EINVAL; |
309 | 308 | ||
310 | if (val) { | 309 | if (val) { |
311 | mutex_lock(&pci_remove_rescan_mutex); | 310 | pci_lock_rescan_remove(); |
312 | while ((b = pci_find_next_bus(b)) != NULL) | 311 | while ((b = pci_find_next_bus(b)) != NULL) |
313 | pci_rescan_bus(b); | 312 | pci_rescan_bus(b); |
314 | mutex_unlock(&pci_remove_rescan_mutex); | 313 | pci_unlock_rescan_remove(); |
315 | } | 314 | } |
316 | return count; | 315 | return count; |
317 | } | 316 | } |
@@ -342,9 +341,9 @@ dev_rescan_store(struct device *dev, struct device_attribute *attr, | |||
342 | return -EINVAL; | 341 | return -EINVAL; |
343 | 342 | ||
344 | if (val) { | 343 | if (val) { |
345 | mutex_lock(&pci_remove_rescan_mutex); | 344 | pci_lock_rescan_remove(); |
346 | pci_rescan_bus(pdev->bus); | 345 | pci_rescan_bus(pdev->bus); |
347 | mutex_unlock(&pci_remove_rescan_mutex); | 346 | pci_unlock_rescan_remove(); |
348 | } | 347 | } |
349 | return count; | 348 | return count; |
350 | } | 349 | } |
@@ -354,11 +353,7 @@ static struct device_attribute dev_rescan_attr = __ATTR(rescan, | |||
354 | 353 | ||
355 | static void remove_callback(struct device *dev) | 354 | static void remove_callback(struct device *dev) |
356 | { | 355 | { |
357 | struct pci_dev *pdev = to_pci_dev(dev); | 356 | pci_stop_and_remove_bus_device_locked(to_pci_dev(dev)); |
358 | |||
359 | mutex_lock(&pci_remove_rescan_mutex); | ||
360 | pci_stop_and_remove_bus_device(pdev); | ||
361 | mutex_unlock(&pci_remove_rescan_mutex); | ||
362 | } | 357 | } |
363 | 358 | ||
364 | static ssize_t | 359 | static ssize_t |
@@ -395,12 +390,12 @@ dev_bus_rescan_store(struct device *dev, struct device_attribute *attr, | |||
395 | return -EINVAL; | 390 | return -EINVAL; |
396 | 391 | ||
397 | if (val) { | 392 | if (val) { |
398 | mutex_lock(&pci_remove_rescan_mutex); | 393 | pci_lock_rescan_remove(); |
399 | if (!pci_is_root_bus(bus) && list_empty(&bus->devices)) | 394 | if (!pci_is_root_bus(bus) && list_empty(&bus->devices)) |
400 | pci_rescan_bus_bridge_resize(bus->self); | 395 | pci_rescan_bus_bridge_resize(bus->self); |
401 | else | 396 | else |
402 | pci_rescan_bus(bus); | 397 | pci_rescan_bus(bus); |
403 | mutex_unlock(&pci_remove_rescan_mutex); | 398 | pci_unlock_rescan_remove(); |
404 | } | 399 | } |
405 | return count; | 400 | return count; |
406 | } | 401 | } |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 07369f32e8bb..1febe90831b4 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -431,6 +431,32 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) | |||
431 | } | 431 | } |
432 | 432 | ||
433 | /** | 433 | /** |
434 | * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos | ||
435 | * @dev: the PCI device to operate on | ||
436 | * @pos: config space offset of status word | ||
437 | * @mask: mask of bit(s) to care about in status word | ||
438 | * | ||
439 | * Return 1 when mask bit(s) in status word clear, 0 otherwise. | ||
440 | */ | ||
441 | int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask) | ||
442 | { | ||
443 | int i; | ||
444 | |||
445 | /* Wait for Transaction Pending bit clean */ | ||
446 | for (i = 0; i < 4; i++) { | ||
447 | u16 status; | ||
448 | if (i) | ||
449 | msleep((1 << (i - 1)) * 100); | ||
450 | |||
451 | pci_read_config_word(dev, pos, &status); | ||
452 | if (!(status & mask)) | ||
453 | return 1; | ||
454 | } | ||
455 | |||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | /** | ||
434 | * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) | 460 | * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) |
435 | * @dev: PCI device to have its BARs restored | 461 | * @dev: PCI device to have its BARs restored |
436 | * | 462 | * |
@@ -657,6 +683,28 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) | |||
657 | } | 683 | } |
658 | 684 | ||
659 | /** | 685 | /** |
686 | * pci_wakeup - Wake up a PCI device | ||
687 | * @pci_dev: Device to handle. | ||
688 | * @ign: ignored parameter | ||
689 | */ | ||
690 | static int pci_wakeup(struct pci_dev *pci_dev, void *ign) | ||
691 | { | ||
692 | pci_wakeup_event(pci_dev); | ||
693 | pm_request_resume(&pci_dev->dev); | ||
694 | return 0; | ||
695 | } | ||
696 | |||
697 | /** | ||
698 | * pci_wakeup_bus - Walk given bus and wake up devices on it | ||
699 | * @bus: Top bus of the subtree to walk. | ||
700 | */ | ||
701 | static void pci_wakeup_bus(struct pci_bus *bus) | ||
702 | { | ||
703 | if (bus) | ||
704 | pci_walk_bus(bus, pci_wakeup, NULL); | ||
705 | } | ||
706 | |||
707 | /** | ||
660 | * __pci_start_power_transition - Start power transition of a PCI device | 708 | * __pci_start_power_transition - Start power transition of a PCI device |
661 | * @dev: PCI device to handle. | 709 | * @dev: PCI device to handle. |
662 | * @state: State to put the device into. | 710 | * @state: State to put the device into. |
@@ -835,18 +883,28 @@ EXPORT_SYMBOL(pci_choose_state); | |||
835 | #define PCI_EXP_SAVE_REGS 7 | 883 | #define PCI_EXP_SAVE_REGS 7 |
836 | 884 | ||
837 | 885 | ||
838 | static struct pci_cap_saved_state *pci_find_saved_cap( | 886 | static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev, |
839 | struct pci_dev *pci_dev, char cap) | 887 | u16 cap, bool extended) |
840 | { | 888 | { |
841 | struct pci_cap_saved_state *tmp; | 889 | struct pci_cap_saved_state *tmp; |
842 | 890 | ||
843 | hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) { | 891 | hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) { |
844 | if (tmp->cap.cap_nr == cap) | 892 | if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap) |
845 | return tmp; | 893 | return tmp; |
846 | } | 894 | } |
847 | return NULL; | 895 | return NULL; |
848 | } | 896 | } |
849 | 897 | ||
898 | struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap) | ||
899 | { | ||
900 | return _pci_find_saved_cap(dev, cap, false); | ||
901 | } | ||
902 | |||
903 | struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap) | ||
904 | { | ||
905 | return _pci_find_saved_cap(dev, cap, true); | ||
906 | } | ||
907 | |||
850 | static int pci_save_pcie_state(struct pci_dev *dev) | 908 | static int pci_save_pcie_state(struct pci_dev *dev) |
851 | { | 909 | { |
852 | int i = 0; | 910 | int i = 0; |
@@ -948,6 +1006,8 @@ pci_save_state(struct pci_dev *dev) | |||
948 | return i; | 1006 | return i; |
949 | if ((i = pci_save_pcix_state(dev)) != 0) | 1007 | if ((i = pci_save_pcix_state(dev)) != 0) |
950 | return i; | 1008 | return i; |
1009 | if ((i = pci_save_vc_state(dev)) != 0) | ||
1010 | return i; | ||
951 | return 0; | 1011 | return 0; |
952 | } | 1012 | } |
953 | 1013 | ||
@@ -1010,6 +1070,7 @@ void pci_restore_state(struct pci_dev *dev) | |||
1010 | /* PCI Express register must be restored first */ | 1070 | /* PCI Express register must be restored first */ |
1011 | pci_restore_pcie_state(dev); | 1071 | pci_restore_pcie_state(dev); |
1012 | pci_restore_ats_state(dev); | 1072 | pci_restore_ats_state(dev); |
1073 | pci_restore_vc_state(dev); | ||
1013 | 1074 | ||
1014 | pci_restore_config_space(dev); | 1075 | pci_restore_config_space(dev); |
1015 | 1076 | ||
@@ -1071,7 +1132,8 @@ EXPORT_SYMBOL_GPL(pci_store_saved_state); | |||
1071 | * @dev: PCI device that we're dealing with | 1132 | * @dev: PCI device that we're dealing with |
1072 | * @state: Saved state returned from pci_store_saved_state() | 1133 | * @state: Saved state returned from pci_store_saved_state() |
1073 | */ | 1134 | */ |
1074 | int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state) | 1135 | static int pci_load_saved_state(struct pci_dev *dev, |
1136 | struct pci_saved_state *state) | ||
1075 | { | 1137 | { |
1076 | struct pci_cap_saved_data *cap; | 1138 | struct pci_cap_saved_data *cap; |
1077 | 1139 | ||
@@ -1087,7 +1149,7 @@ int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state) | |||
1087 | while (cap->size) { | 1149 | while (cap->size) { |
1088 | struct pci_cap_saved_state *tmp; | 1150 | struct pci_cap_saved_state *tmp; |
1089 | 1151 | ||
1090 | tmp = pci_find_saved_cap(dev, cap->cap_nr); | 1152 | tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended); |
1091 | if (!tmp || tmp->cap.size != cap->size) | 1153 | if (!tmp || tmp->cap.size != cap->size) |
1092 | return -EINVAL; | 1154 | return -EINVAL; |
1093 | 1155 | ||
@@ -1099,7 +1161,6 @@ int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state) | |||
1099 | dev->state_saved = true; | 1161 | dev->state_saved = true; |
1100 | return 0; | 1162 | return 0; |
1101 | } | 1163 | } |
1102 | EXPORT_SYMBOL_GPL(pci_load_saved_state); | ||
1103 | 1164 | ||
1104 | /** | 1165 | /** |
1105 | * pci_load_and_free_saved_state - Reload the save state pointed to by state, | 1166 | * pci_load_and_free_saved_state - Reload the save state pointed to by state, |
@@ -1531,27 +1592,6 @@ void pci_pme_wakeup_bus(struct pci_bus *bus) | |||
1531 | pci_walk_bus(bus, pci_pme_wakeup, (void *)true); | 1592 | pci_walk_bus(bus, pci_pme_wakeup, (void *)true); |
1532 | } | 1593 | } |
1533 | 1594 | ||
1534 | /** | ||
1535 | * pci_wakeup - Wake up a PCI device | ||
1536 | * @pci_dev: Device to handle. | ||
1537 | * @ign: ignored parameter | ||
1538 | */ | ||
1539 | static int pci_wakeup(struct pci_dev *pci_dev, void *ign) | ||
1540 | { | ||
1541 | pci_wakeup_event(pci_dev); | ||
1542 | pm_request_resume(&pci_dev->dev); | ||
1543 | return 0; | ||
1544 | } | ||
1545 | |||
1546 | /** | ||
1547 | * pci_wakeup_bus - Walk given bus and wake up devices on it | ||
1548 | * @bus: Top bus of the subtree to walk. | ||
1549 | */ | ||
1550 | void pci_wakeup_bus(struct pci_bus *bus) | ||
1551 | { | ||
1552 | if (bus) | ||
1553 | pci_walk_bus(bus, pci_wakeup, NULL); | ||
1554 | } | ||
1555 | 1595 | ||
1556 | /** | 1596 | /** |
1557 | * pci_pme_capable - check the capability of PCI device to generate PME# | 1597 | * pci_pme_capable - check the capability of PCI device to generate PME# |
@@ -1765,7 +1805,7 @@ int pci_wake_from_d3(struct pci_dev *dev, bool enable) | |||
1765 | * If the platform can't manage @dev, return the deepest state from which it | 1805 | * If the platform can't manage @dev, return the deepest state from which it |
1766 | * can generate wake events, based on any available PME info. | 1806 | * can generate wake events, based on any available PME info. |
1767 | */ | 1807 | */ |
1768 | pci_power_t pci_target_state(struct pci_dev *dev) | 1808 | static pci_power_t pci_target_state(struct pci_dev *dev) |
1769 | { | 1809 | { |
1770 | pci_power_t target_state = PCI_D3hot; | 1810 | pci_power_t target_state = PCI_D3hot; |
1771 | 1811 | ||
@@ -2021,18 +2061,24 @@ static void pci_add_saved_cap(struct pci_dev *pci_dev, | |||
2021 | } | 2061 | } |
2022 | 2062 | ||
2023 | /** | 2063 | /** |
2024 | * pci_add_cap_save_buffer - allocate buffer for saving given capability registers | 2064 | * _pci_add_cap_save_buffer - allocate buffer for saving given |
2065 | * capability registers | ||
2025 | * @dev: the PCI device | 2066 | * @dev: the PCI device |
2026 | * @cap: the capability to allocate the buffer for | 2067 | * @cap: the capability to allocate the buffer for |
2068 | * @extended: Standard or Extended capability ID | ||
2027 | * @size: requested size of the buffer | 2069 | * @size: requested size of the buffer |
2028 | */ | 2070 | */ |
2029 | static int pci_add_cap_save_buffer( | 2071 | static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap, |
2030 | struct pci_dev *dev, char cap, unsigned int size) | 2072 | bool extended, unsigned int size) |
2031 | { | 2073 | { |
2032 | int pos; | 2074 | int pos; |
2033 | struct pci_cap_saved_state *save_state; | 2075 | struct pci_cap_saved_state *save_state; |
2034 | 2076 | ||
2035 | pos = pci_find_capability(dev, cap); | 2077 | if (extended) |
2078 | pos = pci_find_ext_capability(dev, cap); | ||
2079 | else | ||
2080 | pos = pci_find_capability(dev, cap); | ||
2081 | |||
2036 | if (pos <= 0) | 2082 | if (pos <= 0) |
2037 | return 0; | 2083 | return 0; |
2038 | 2084 | ||
@@ -2041,12 +2087,23 @@ static int pci_add_cap_save_buffer( | |||
2041 | return -ENOMEM; | 2087 | return -ENOMEM; |
2042 | 2088 | ||
2043 | save_state->cap.cap_nr = cap; | 2089 | save_state->cap.cap_nr = cap; |
2090 | save_state->cap.cap_extended = extended; | ||
2044 | save_state->cap.size = size; | 2091 | save_state->cap.size = size; |
2045 | pci_add_saved_cap(dev, save_state); | 2092 | pci_add_saved_cap(dev, save_state); |
2046 | 2093 | ||
2047 | return 0; | 2094 | return 0; |
2048 | } | 2095 | } |
2049 | 2096 | ||
2097 | int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size) | ||
2098 | { | ||
2099 | return _pci_add_cap_save_buffer(dev, cap, false, size); | ||
2100 | } | ||
2101 | |||
2102 | int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size) | ||
2103 | { | ||
2104 | return _pci_add_cap_save_buffer(dev, cap, true, size); | ||
2105 | } | ||
2106 | |||
2050 | /** | 2107 | /** |
2051 | * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities | 2108 | * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities |
2052 | * @dev: the PCI device | 2109 | * @dev: the PCI device |
@@ -2065,6 +2122,8 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev) | |||
2065 | if (error) | 2122 | if (error) |
2066 | dev_err(&dev->dev, | 2123 | dev_err(&dev->dev, |
2067 | "unable to preallocate PCI-X save buffer\n"); | 2124 | "unable to preallocate PCI-X save buffer\n"); |
2125 | |||
2126 | pci_allocate_vc_save_buffers(dev); | ||
2068 | } | 2127 | } |
2069 | 2128 | ||
2070 | void pci_free_cap_save_buffers(struct pci_dev *dev) | 2129 | void pci_free_cap_save_buffers(struct pci_dev *dev) |
@@ -2110,242 +2169,6 @@ void pci_configure_ari(struct pci_dev *dev) | |||
2110 | } | 2169 | } |
2111 | } | 2170 | } |
2112 | 2171 | ||
2113 | /** | ||
2114 | * pci_enable_ido - enable ID-based Ordering on a device | ||
2115 | * @dev: the PCI device | ||
2116 | * @type: which types of IDO to enable | ||
2117 | * | ||
2118 | * Enable ID-based ordering on @dev. @type can contain the bits | ||
2119 | * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate | ||
2120 | * which types of transactions are allowed to be re-ordered. | ||
2121 | */ | ||
2122 | void pci_enable_ido(struct pci_dev *dev, unsigned long type) | ||
2123 | { | ||
2124 | u16 ctrl = 0; | ||
2125 | |||
2126 | if (type & PCI_EXP_IDO_REQUEST) | ||
2127 | ctrl |= PCI_EXP_DEVCTL2_IDO_REQ_EN; | ||
2128 | if (type & PCI_EXP_IDO_COMPLETION) | ||
2129 | ctrl |= PCI_EXP_DEVCTL2_IDO_CMP_EN; | ||
2130 | if (ctrl) | ||
2131 | pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl); | ||
2132 | } | ||
2133 | EXPORT_SYMBOL(pci_enable_ido); | ||
2134 | |||
2135 | /** | ||
2136 | * pci_disable_ido - disable ID-based ordering on a device | ||
2137 | * @dev: the PCI device | ||
2138 | * @type: which types of IDO to disable | ||
2139 | */ | ||
2140 | void pci_disable_ido(struct pci_dev *dev, unsigned long type) | ||
2141 | { | ||
2142 | u16 ctrl = 0; | ||
2143 | |||
2144 | if (type & PCI_EXP_IDO_REQUEST) | ||
2145 | ctrl |= PCI_EXP_DEVCTL2_IDO_REQ_EN; | ||
2146 | if (type & PCI_EXP_IDO_COMPLETION) | ||
2147 | ctrl |= PCI_EXP_DEVCTL2_IDO_CMP_EN; | ||
2148 | if (ctrl) | ||
2149 | pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl); | ||
2150 | } | ||
2151 | EXPORT_SYMBOL(pci_disable_ido); | ||
2152 | |||
2153 | /** | ||
2154 | * pci_enable_obff - enable optimized buffer flush/fill | ||
2155 | * @dev: PCI device | ||
2156 | * @type: type of signaling to use | ||
2157 | * | ||
2158 | * Try to enable @type OBFF signaling on @dev. It will try using WAKE# | ||
2159 | * signaling if possible, falling back to message signaling only if | ||
2160 | * WAKE# isn't supported. @type should indicate whether the PCIe link | ||
2161 | * be brought out of L0s or L1 to send the message. It should be either | ||
2162 | * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0. | ||
2163 | * | ||
2164 | * If your device can benefit from receiving all messages, even at the | ||
2165 | * power cost of bringing the link back up from a low power state, use | ||
2166 | * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the | ||
2167 | * preferred type). | ||
2168 | * | ||
2169 | * RETURNS: | ||
2170 | * Zero on success, appropriate error number on failure. | ||
2171 | */ | ||
2172 | int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type) | ||
2173 | { | ||
2174 | u32 cap; | ||
2175 | u16 ctrl; | ||
2176 | int ret; | ||
2177 | |||
2178 | pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); | ||
2179 | if (!(cap & PCI_EXP_DEVCAP2_OBFF_MASK)) | ||
2180 | return -ENOTSUPP; /* no OBFF support at all */ | ||
2181 | |||
2182 | /* Make sure the topology supports OBFF as well */ | ||
2183 | if (dev->bus->self) { | ||
2184 | ret = pci_enable_obff(dev->bus->self, type); | ||
2185 | if (ret) | ||
2186 | return ret; | ||
2187 | } | ||
2188 | |||
2189 | pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctrl); | ||
2190 | if (cap & PCI_EXP_DEVCAP2_OBFF_WAKE) | ||
2191 | ctrl |= PCI_EXP_DEVCTL2_OBFF_WAKE_EN; | ||
2192 | else { | ||
2193 | switch (type) { | ||
2194 | case PCI_EXP_OBFF_SIGNAL_L0: | ||
2195 | if (!(ctrl & PCI_EXP_DEVCTL2_OBFF_WAKE_EN)) | ||
2196 | ctrl |= PCI_EXP_DEVCTL2_OBFF_MSGA_EN; | ||
2197 | break; | ||
2198 | case PCI_EXP_OBFF_SIGNAL_ALWAYS: | ||
2199 | ctrl &= ~PCI_EXP_DEVCTL2_OBFF_WAKE_EN; | ||
2200 | ctrl |= PCI_EXP_DEVCTL2_OBFF_MSGB_EN; | ||
2201 | break; | ||
2202 | default: | ||
2203 | WARN(1, "bad OBFF signal type\n"); | ||
2204 | return -ENOTSUPP; | ||
2205 | } | ||
2206 | } | ||
2207 | pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, ctrl); | ||
2208 | |||
2209 | return 0; | ||
2210 | } | ||
2211 | EXPORT_SYMBOL(pci_enable_obff); | ||
2212 | |||
2213 | /** | ||
2214 | * pci_disable_obff - disable optimized buffer flush/fill | ||
2215 | * @dev: PCI device | ||
2216 | * | ||
2217 | * Disable OBFF on @dev. | ||
2218 | */ | ||
2219 | void pci_disable_obff(struct pci_dev *dev) | ||
2220 | { | ||
2221 | pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, | ||
2222 | PCI_EXP_DEVCTL2_OBFF_WAKE_EN); | ||
2223 | } | ||
2224 | EXPORT_SYMBOL(pci_disable_obff); | ||
2225 | |||
2226 | /** | ||
2227 | * pci_ltr_supported - check whether a device supports LTR | ||
2228 | * @dev: PCI device | ||
2229 | * | ||
2230 | * RETURNS: | ||
2231 | * True if @dev supports latency tolerance reporting, false otherwise. | ||
2232 | */ | ||
2233 | static bool pci_ltr_supported(struct pci_dev *dev) | ||
2234 | { | ||
2235 | u32 cap; | ||
2236 | |||
2237 | pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); | ||
2238 | |||
2239 | return cap & PCI_EXP_DEVCAP2_LTR; | ||
2240 | } | ||
2241 | |||
2242 | /** | ||
2243 | * pci_enable_ltr - enable latency tolerance reporting | ||
2244 | * @dev: PCI device | ||
2245 | * | ||
2246 | * Enable LTR on @dev if possible, which means enabling it first on | ||
2247 | * upstream ports. | ||
2248 | * | ||
2249 | * RETURNS: | ||
2250 | * Zero on success, errno on failure. | ||
2251 | */ | ||
2252 | int pci_enable_ltr(struct pci_dev *dev) | ||
2253 | { | ||
2254 | int ret; | ||
2255 | |||
2256 | /* Only primary function can enable/disable LTR */ | ||
2257 | if (PCI_FUNC(dev->devfn) != 0) | ||
2258 | return -EINVAL; | ||
2259 | |||
2260 | if (!pci_ltr_supported(dev)) | ||
2261 | return -ENOTSUPP; | ||
2262 | |||
2263 | /* Enable upstream ports first */ | ||
2264 | if (dev->bus->self) { | ||
2265 | ret = pci_enable_ltr(dev->bus->self); | ||
2266 | if (ret) | ||
2267 | return ret; | ||
2268 | } | ||
2269 | |||
2270 | return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, | ||
2271 | PCI_EXP_DEVCTL2_LTR_EN); | ||
2272 | } | ||
2273 | EXPORT_SYMBOL(pci_enable_ltr); | ||
2274 | |||
2275 | /** | ||
2276 | * pci_disable_ltr - disable latency tolerance reporting | ||
2277 | * @dev: PCI device | ||
2278 | */ | ||
2279 | void pci_disable_ltr(struct pci_dev *dev) | ||
2280 | { | ||
2281 | /* Only primary function can enable/disable LTR */ | ||
2282 | if (PCI_FUNC(dev->devfn) != 0) | ||
2283 | return; | ||
2284 | |||
2285 | if (!pci_ltr_supported(dev)) | ||
2286 | return; | ||
2287 | |||
2288 | pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, | ||
2289 | PCI_EXP_DEVCTL2_LTR_EN); | ||
2290 | } | ||
2291 | EXPORT_SYMBOL(pci_disable_ltr); | ||
2292 | |||
2293 | static int __pci_ltr_scale(int *val) | ||
2294 | { | ||
2295 | int scale = 0; | ||
2296 | |||
2297 | while (*val > 1023) { | ||
2298 | *val = (*val + 31) / 32; | ||
2299 | scale++; | ||
2300 | } | ||
2301 | return scale; | ||
2302 | } | ||
2303 | |||
2304 | /** | ||
2305 | * pci_set_ltr - set LTR latency values | ||
2306 | * @dev: PCI device | ||
2307 | * @snoop_lat_ns: snoop latency in nanoseconds | ||
2308 | * @nosnoop_lat_ns: nosnoop latency in nanoseconds | ||
2309 | * | ||
2310 | * Figure out the scale and set the LTR values accordingly. | ||
2311 | */ | ||
2312 | int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns) | ||
2313 | { | ||
2314 | int pos, ret, snoop_scale, nosnoop_scale; | ||
2315 | u16 val; | ||
2316 | |||
2317 | if (!pci_ltr_supported(dev)) | ||
2318 | return -ENOTSUPP; | ||
2319 | |||
2320 | snoop_scale = __pci_ltr_scale(&snoop_lat_ns); | ||
2321 | nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns); | ||
2322 | |||
2323 | if (snoop_lat_ns > PCI_LTR_VALUE_MASK || | ||
2324 | nosnoop_lat_ns > PCI_LTR_VALUE_MASK) | ||
2325 | return -EINVAL; | ||
2326 | |||
2327 | if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) || | ||
2328 | (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT))) | ||
2329 | return -EINVAL; | ||
2330 | |||
2331 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); | ||
2332 | if (!pos) | ||
2333 | return -ENOTSUPP; | ||
2334 | |||
2335 | val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns; | ||
2336 | ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val); | ||
2337 | if (ret != 4) | ||
2338 | return -EIO; | ||
2339 | |||
2340 | val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns; | ||
2341 | ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val); | ||
2342 | if (ret != 4) | ||
2343 | return -EIO; | ||
2344 | |||
2345 | return 0; | ||
2346 | } | ||
2347 | EXPORT_SYMBOL(pci_set_ltr); | ||
2348 | |||
2349 | static int pci_acs_enable; | 2172 | static int pci_acs_enable; |
2350 | 2173 | ||
2351 | /** | 2174 | /** |
@@ -3138,7 +2961,7 @@ bool pci_check_and_mask_intx(struct pci_dev *dev) | |||
3138 | EXPORT_SYMBOL_GPL(pci_check_and_mask_intx); | 2961 | EXPORT_SYMBOL_GPL(pci_check_and_mask_intx); |
3139 | 2962 | ||
3140 | /** | 2963 | /** |
3141 | * pci_check_and_mask_intx - unmask INTx of no interrupt is pending | 2964 | * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending |
3142 | * @dev: the PCI device to operate on | 2965 | * @dev: the PCI device to operate on |
3143 | * | 2966 | * |
3144 | * Check if the device dev has its INTx line asserted, unmask it if not | 2967 | * Check if the device dev has its INTx line asserted, unmask it if not |
@@ -3204,20 +3027,10 @@ EXPORT_SYMBOL(pci_set_dma_seg_boundary); | |||
3204 | */ | 3027 | */ |
3205 | int pci_wait_for_pending_transaction(struct pci_dev *dev) | 3028 | int pci_wait_for_pending_transaction(struct pci_dev *dev) |
3206 | { | 3029 | { |
3207 | int i; | 3030 | if (!pci_is_pcie(dev)) |
3208 | u16 status; | 3031 | return 1; |
3209 | |||
3210 | /* Wait for Transaction Pending bit clean */ | ||
3211 | for (i = 0; i < 4; i++) { | ||
3212 | if (i) | ||
3213 | msleep((1 << (i - 1)) * 100); | ||
3214 | |||
3215 | pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); | ||
3216 | if (!(status & PCI_EXP_DEVSTA_TRPND)) | ||
3217 | return 1; | ||
3218 | } | ||
3219 | 3032 | ||
3220 | return 0; | 3033 | return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND); |
3221 | } | 3034 | } |
3222 | EXPORT_SYMBOL(pci_wait_for_pending_transaction); | 3035 | EXPORT_SYMBOL(pci_wait_for_pending_transaction); |
3223 | 3036 | ||
@@ -3244,10 +3057,8 @@ static int pcie_flr(struct pci_dev *dev, int probe) | |||
3244 | 3057 | ||
3245 | static int pci_af_flr(struct pci_dev *dev, int probe) | 3058 | static int pci_af_flr(struct pci_dev *dev, int probe) |
3246 | { | 3059 | { |
3247 | int i; | ||
3248 | int pos; | 3060 | int pos; |
3249 | u8 cap; | 3061 | u8 cap; |
3250 | u8 status; | ||
3251 | 3062 | ||
3252 | pos = pci_find_capability(dev, PCI_CAP_ID_AF); | 3063 | pos = pci_find_capability(dev, PCI_CAP_ID_AF); |
3253 | if (!pos) | 3064 | if (!pos) |
@@ -3261,14 +3072,8 @@ static int pci_af_flr(struct pci_dev *dev, int probe) | |||
3261 | return 0; | 3072 | return 0; |
3262 | 3073 | ||
3263 | /* Wait for Transaction Pending bit clean */ | 3074 | /* Wait for Transaction Pending bit clean */ |
3264 | for (i = 0; i < 4; i++) { | 3075 | if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP)) |
3265 | if (i) | 3076 | goto clear; |
3266 | msleep((1 << (i - 1)) * 100); | ||
3267 | |||
3268 | pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status); | ||
3269 | if (!(status & PCI_AF_STATUS_TP)) | ||
3270 | goto clear; | ||
3271 | } | ||
3272 | 3077 | ||
3273 | dev_err(&dev->dev, "transaction is not cleared; " | 3078 | dev_err(&dev->dev, "transaction is not cleared; " |
3274 | "proceeding with reset anyway\n"); | 3079 | "proceeding with reset anyway\n"); |
@@ -3445,6 +3250,18 @@ static void pci_dev_lock(struct pci_dev *dev) | |||
3445 | device_lock(&dev->dev); | 3250 | device_lock(&dev->dev); |
3446 | } | 3251 | } |
3447 | 3252 | ||
3253 | /* Return 1 on successful lock, 0 on contention */ | ||
3254 | static int pci_dev_trylock(struct pci_dev *dev) | ||
3255 | { | ||
3256 | if (pci_cfg_access_trylock(dev)) { | ||
3257 | if (device_trylock(&dev->dev)) | ||
3258 | return 1; | ||
3259 | pci_cfg_access_unlock(dev); | ||
3260 | } | ||
3261 | |||
3262 | return 0; | ||
3263 | } | ||
3264 | |||
3448 | static void pci_dev_unlock(struct pci_dev *dev) | 3265 | static void pci_dev_unlock(struct pci_dev *dev) |
3449 | { | 3266 | { |
3450 | device_unlock(&dev->dev); | 3267 | device_unlock(&dev->dev); |
@@ -3588,6 +3405,34 @@ int pci_reset_function(struct pci_dev *dev) | |||
3588 | } | 3405 | } |
3589 | EXPORT_SYMBOL_GPL(pci_reset_function); | 3406 | EXPORT_SYMBOL_GPL(pci_reset_function); |
3590 | 3407 | ||
3408 | /** | ||
3409 | * pci_try_reset_function - quiesce and reset a PCI device function | ||
3410 | * @dev: PCI device to reset | ||
3411 | * | ||
3412 | * Same as above, except return -EAGAIN if unable to lock device. | ||
3413 | */ | ||
3414 | int pci_try_reset_function(struct pci_dev *dev) | ||
3415 | { | ||
3416 | int rc; | ||
3417 | |||
3418 | rc = pci_dev_reset(dev, 1); | ||
3419 | if (rc) | ||
3420 | return rc; | ||
3421 | |||
3422 | pci_dev_save_and_disable(dev); | ||
3423 | |||
3424 | if (pci_dev_trylock(dev)) { | ||
3425 | rc = __pci_dev_reset(dev, 0); | ||
3426 | pci_dev_unlock(dev); | ||
3427 | } else | ||
3428 | rc = -EAGAIN; | ||
3429 | |||
3430 | pci_dev_restore(dev); | ||
3431 | |||
3432 | return rc; | ||
3433 | } | ||
3434 | EXPORT_SYMBOL_GPL(pci_try_reset_function); | ||
3435 | |||
3591 | /* Lock devices from the top of the tree down */ | 3436 | /* Lock devices from the top of the tree down */ |
3592 | static void pci_bus_lock(struct pci_bus *bus) | 3437 | static void pci_bus_lock(struct pci_bus *bus) |
3593 | { | 3438 | { |
@@ -3612,6 +3457,32 @@ static void pci_bus_unlock(struct pci_bus *bus) | |||
3612 | } | 3457 | } |
3613 | } | 3458 | } |
3614 | 3459 | ||
3460 | /* Return 1 on successful lock, 0 on contention */ | ||
3461 | static int pci_bus_trylock(struct pci_bus *bus) | ||
3462 | { | ||
3463 | struct pci_dev *dev; | ||
3464 | |||
3465 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
3466 | if (!pci_dev_trylock(dev)) | ||
3467 | goto unlock; | ||
3468 | if (dev->subordinate) { | ||
3469 | if (!pci_bus_trylock(dev->subordinate)) { | ||
3470 | pci_dev_unlock(dev); | ||
3471 | goto unlock; | ||
3472 | } | ||
3473 | } | ||
3474 | } | ||
3475 | return 1; | ||
3476 | |||
3477 | unlock: | ||
3478 | list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) { | ||
3479 | if (dev->subordinate) | ||
3480 | pci_bus_unlock(dev->subordinate); | ||
3481 | pci_dev_unlock(dev); | ||
3482 | } | ||
3483 | return 0; | ||
3484 | } | ||
3485 | |||
3615 | /* Lock devices from the top of the tree down */ | 3486 | /* Lock devices from the top of the tree down */ |
3616 | static void pci_slot_lock(struct pci_slot *slot) | 3487 | static void pci_slot_lock(struct pci_slot *slot) |
3617 | { | 3488 | { |
@@ -3640,6 +3511,37 @@ static void pci_slot_unlock(struct pci_slot *slot) | |||
3640 | } | 3511 | } |
3641 | } | 3512 | } |
3642 | 3513 | ||
3514 | /* Return 1 on successful lock, 0 on contention */ | ||
3515 | static int pci_slot_trylock(struct pci_slot *slot) | ||
3516 | { | ||
3517 | struct pci_dev *dev; | ||
3518 | |||
3519 | list_for_each_entry(dev, &slot->bus->devices, bus_list) { | ||
3520 | if (!dev->slot || dev->slot != slot) | ||
3521 | continue; | ||
3522 | if (!pci_dev_trylock(dev)) | ||
3523 | goto unlock; | ||
3524 | if (dev->subordinate) { | ||
3525 | if (!pci_bus_trylock(dev->subordinate)) { | ||
3526 | pci_dev_unlock(dev); | ||
3527 | goto unlock; | ||
3528 | } | ||
3529 | } | ||
3530 | } | ||
3531 | return 1; | ||
3532 | |||
3533 | unlock: | ||
3534 | list_for_each_entry_continue_reverse(dev, | ||
3535 | &slot->bus->devices, bus_list) { | ||
3536 | if (!dev->slot || dev->slot != slot) | ||
3537 | continue; | ||
3538 | if (dev->subordinate) | ||
3539 | pci_bus_unlock(dev->subordinate); | ||
3540 | pci_dev_unlock(dev); | ||
3541 | } | ||
3542 | return 0; | ||
3543 | } | ||
3544 | |||
3643 | /* Save and disable devices from the top of the tree down */ | 3545 | /* Save and disable devices from the top of the tree down */ |
3644 | static void pci_bus_save_and_disable(struct pci_bus *bus) | 3546 | static void pci_bus_save_and_disable(struct pci_bus *bus) |
3645 | { | 3547 | { |
@@ -3763,6 +3665,35 @@ int pci_reset_slot(struct pci_slot *slot) | |||
3763 | } | 3665 | } |
3764 | EXPORT_SYMBOL_GPL(pci_reset_slot); | 3666 | EXPORT_SYMBOL_GPL(pci_reset_slot); |
3765 | 3667 | ||
3668 | /** | ||
3669 | * pci_try_reset_slot - Try to reset a PCI slot | ||
3670 | * @slot: PCI slot to reset | ||
3671 | * | ||
3672 | * Same as above except return -EAGAIN if the slot cannot be locked | ||
3673 | */ | ||
3674 | int pci_try_reset_slot(struct pci_slot *slot) | ||
3675 | { | ||
3676 | int rc; | ||
3677 | |||
3678 | rc = pci_slot_reset(slot, 1); | ||
3679 | if (rc) | ||
3680 | return rc; | ||
3681 | |||
3682 | pci_slot_save_and_disable(slot); | ||
3683 | |||
3684 | if (pci_slot_trylock(slot)) { | ||
3685 | might_sleep(); | ||
3686 | rc = pci_reset_hotplug_slot(slot->hotplug, 0); | ||
3687 | pci_slot_unlock(slot); | ||
3688 | } else | ||
3689 | rc = -EAGAIN; | ||
3690 | |||
3691 | pci_slot_restore(slot); | ||
3692 | |||
3693 | return rc; | ||
3694 | } | ||
3695 | EXPORT_SYMBOL_GPL(pci_try_reset_slot); | ||
3696 | |||
3766 | static int pci_bus_reset(struct pci_bus *bus, int probe) | 3697 | static int pci_bus_reset(struct pci_bus *bus, int probe) |
3767 | { | 3698 | { |
3768 | if (!bus->self) | 3699 | if (!bus->self) |
@@ -3822,6 +3753,35 @@ int pci_reset_bus(struct pci_bus *bus) | |||
3822 | EXPORT_SYMBOL_GPL(pci_reset_bus); | 3753 | EXPORT_SYMBOL_GPL(pci_reset_bus); |
3823 | 3754 | ||
3824 | /** | 3755 | /** |
3756 | * pci_try_reset_bus - Try to reset a PCI bus | ||
3757 | * @bus: top level PCI bus to reset | ||
3758 | * | ||
3759 | * Same as above except return -EAGAIN if the bus cannot be locked | ||
3760 | */ | ||
3761 | int pci_try_reset_bus(struct pci_bus *bus) | ||
3762 | { | ||
3763 | int rc; | ||
3764 | |||
3765 | rc = pci_bus_reset(bus, 1); | ||
3766 | if (rc) | ||
3767 | return rc; | ||
3768 | |||
3769 | pci_bus_save_and_disable(bus); | ||
3770 | |||
3771 | if (pci_bus_trylock(bus)) { | ||
3772 | might_sleep(); | ||
3773 | pci_reset_bridge_secondary_bus(bus->self); | ||
3774 | pci_bus_unlock(bus); | ||
3775 | } else | ||
3776 | rc = -EAGAIN; | ||
3777 | |||
3778 | pci_bus_restore(bus); | ||
3779 | |||
3780 | return rc; | ||
3781 | } | ||
3782 | EXPORT_SYMBOL_GPL(pci_try_reset_bus); | ||
3783 | |||
3784 | /** | ||
3825 | * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count | 3785 | * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count |
3826 | * @dev: PCI device to query | 3786 | * @dev: PCI device to query |
3827 | * | 3787 | * |
@@ -4450,7 +4410,6 @@ EXPORT_SYMBOL(pci_restore_state); | |||
4450 | EXPORT_SYMBOL(pci_pme_capable); | 4410 | EXPORT_SYMBOL(pci_pme_capable); |
4451 | EXPORT_SYMBOL(pci_pme_active); | 4411 | EXPORT_SYMBOL(pci_pme_active); |
4452 | EXPORT_SYMBOL(pci_wake_from_d3); | 4412 | EXPORT_SYMBOL(pci_wake_from_d3); |
4453 | EXPORT_SYMBOL(pci_target_state); | ||
4454 | EXPORT_SYMBOL(pci_prepare_to_sleep); | 4413 | EXPORT_SYMBOL(pci_prepare_to_sleep); |
4455 | EXPORT_SYMBOL(pci_back_from_sleep); | 4414 | EXPORT_SYMBOL(pci_back_from_sleep); |
4456 | EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); | 4415 | EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 9c91ecc1301b..4df38df224f4 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -6,7 +6,6 @@ | |||
6 | #define PCI_CFG_SPACE_SIZE 256 | 6 | #define PCI_CFG_SPACE_SIZE 256 |
7 | #define PCI_CFG_SPACE_EXP_SIZE 4096 | 7 | #define PCI_CFG_SPACE_EXP_SIZE 4096 |
8 | 8 | ||
9 | extern const unsigned char pcix_bus_speed[]; | ||
10 | extern const unsigned char pcie_link_speed[]; | 9 | extern const unsigned char pcie_link_speed[]; |
11 | 10 | ||
12 | /* Functions internal to the PCI core code */ | 11 | /* Functions internal to the PCI core code */ |
@@ -68,7 +67,6 @@ void pci_power_up(struct pci_dev *dev); | |||
68 | void pci_disable_enabled_device(struct pci_dev *dev); | 67 | void pci_disable_enabled_device(struct pci_dev *dev); |
69 | int pci_finish_runtime_suspend(struct pci_dev *dev); | 68 | int pci_finish_runtime_suspend(struct pci_dev *dev); |
70 | int __pci_pme_wakeup(struct pci_dev *dev, void *ign); | 69 | int __pci_pme_wakeup(struct pci_dev *dev, void *ign); |
71 | void pci_wakeup_bus(struct pci_bus *bus); | ||
72 | void pci_config_pm_runtime_get(struct pci_dev *dev); | 70 | void pci_config_pm_runtime_get(struct pci_dev *dev); |
73 | void pci_config_pm_runtime_put(struct pci_dev *dev); | 71 | void pci_config_pm_runtime_put(struct pci_dev *dev); |
74 | void pci_pm_init(struct pci_dev *dev); | 72 | void pci_pm_init(struct pci_dev *dev); |
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index cf611ab2193a..01906576ab91 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c | |||
@@ -23,10 +23,10 @@ | |||
23 | static inline int hest_match_pci(struct acpi_hest_aer_common *p, | 23 | static inline int hest_match_pci(struct acpi_hest_aer_common *p, |
24 | struct pci_dev *pci) | 24 | struct pci_dev *pci) |
25 | { | 25 | { |
26 | return (0 == pci_domain_nr(pci->bus) && | 26 | return ACPI_HEST_SEGMENT(p->bus) == pci_domain_nr(pci->bus) && |
27 | p->bus == pci->bus->number && | 27 | ACPI_HEST_BUS(p->bus) == pci->bus->number && |
28 | p->device == PCI_SLOT(pci->devfn) && | 28 | p->device == PCI_SLOT(pci->devfn) && |
29 | p->function == PCI_FUNC(pci->devfn)); | 29 | p->function == PCI_FUNC(pci->devfn); |
30 | } | 30 | } |
31 | 31 | ||
32 | static inline bool hest_match_type(struct acpi_hest_header *hest_hdr, | 32 | static inline bool hest_match_type(struct acpi_hest_header *hest_hdr, |
@@ -50,14 +50,37 @@ struct aer_hest_parse_info { | |||
50 | int firmware_first; | 50 | int firmware_first; |
51 | }; | 51 | }; |
52 | 52 | ||
53 | static int hest_source_is_pcie_aer(struct acpi_hest_header *hest_hdr) | ||
54 | { | ||
55 | if (hest_hdr->type == ACPI_HEST_TYPE_AER_ROOT_PORT || | ||
56 | hest_hdr->type == ACPI_HEST_TYPE_AER_ENDPOINT || | ||
57 | hest_hdr->type == ACPI_HEST_TYPE_AER_BRIDGE) | ||
58 | return 1; | ||
59 | return 0; | ||
60 | } | ||
61 | |||
53 | static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data) | 62 | static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data) |
54 | { | 63 | { |
55 | struct aer_hest_parse_info *info = data; | 64 | struct aer_hest_parse_info *info = data; |
56 | struct acpi_hest_aer_common *p; | 65 | struct acpi_hest_aer_common *p; |
57 | int ff; | 66 | int ff; |
58 | 67 | ||
68 | if (!hest_source_is_pcie_aer(hest_hdr)) | ||
69 | return 0; | ||
70 | |||
59 | p = (struct acpi_hest_aer_common *)(hest_hdr + 1); | 71 | p = (struct acpi_hest_aer_common *)(hest_hdr + 1); |
60 | ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); | 72 | ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); |
73 | |||
74 | /* | ||
75 | * If no specific device is supplied, determine whether | ||
76 | * FIRMWARE_FIRST is set for *any* PCIe device. | ||
77 | */ | ||
78 | if (!info->pci_dev) { | ||
79 | info->firmware_first |= ff; | ||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | /* Otherwise, check the specific device */ | ||
61 | if (p->flags & ACPI_HEST_GLOBAL) { | 84 | if (p->flags & ACPI_HEST_GLOBAL) { |
62 | if (hest_match_type(hest_hdr, info->pci_dev)) | 85 | if (hest_match_type(hest_hdr, info->pci_dev)) |
63 | info->firmware_first = ff; | 86 | info->firmware_first = ff; |
@@ -97,33 +120,20 @@ int pcie_aer_get_firmware_first(struct pci_dev *dev) | |||
97 | 120 | ||
98 | static bool aer_firmware_first; | 121 | static bool aer_firmware_first; |
99 | 122 | ||
100 | static int aer_hest_parse_aff(struct acpi_hest_header *hest_hdr, void *data) | ||
101 | { | ||
102 | struct acpi_hest_aer_common *p; | ||
103 | |||
104 | if (aer_firmware_first) | ||
105 | return 0; | ||
106 | |||
107 | switch (hest_hdr->type) { | ||
108 | case ACPI_HEST_TYPE_AER_ROOT_PORT: | ||
109 | case ACPI_HEST_TYPE_AER_ENDPOINT: | ||
110 | case ACPI_HEST_TYPE_AER_BRIDGE: | ||
111 | p = (struct acpi_hest_aer_common *)(hest_hdr + 1); | ||
112 | aer_firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); | ||
113 | default: | ||
114 | return 0; | ||
115 | } | ||
116 | } | ||
117 | |||
118 | /** | 123 | /** |
119 | * aer_acpi_firmware_first - Check if APEI should control AER. | 124 | * aer_acpi_firmware_first - Check if APEI should control AER. |
120 | */ | 125 | */ |
121 | bool aer_acpi_firmware_first(void) | 126 | bool aer_acpi_firmware_first(void) |
122 | { | 127 | { |
123 | static bool parsed = false; | 128 | static bool parsed = false; |
129 | struct aer_hest_parse_info info = { | ||
130 | .pci_dev = NULL, /* Check all PCIe devices */ | ||
131 | .firmware_first = 0, | ||
132 | }; | ||
124 | 133 | ||
125 | if (!parsed) { | 134 | if (!parsed) { |
126 | apei_hest_parse(aer_hest_parse_aff, NULL); | 135 | apei_hest_parse(aer_hest_parse, &info); |
136 | aer_firmware_first = info.firmware_first; | ||
127 | parsed = true; | 137 | parsed = true; |
128 | } | 138 | } |
129 | return aer_firmware_first; | 139 | return aer_firmware_first; |
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c index 2c7c9f5f592c..34ff7026440c 100644 --- a/drivers/pci/pcie/aer/aerdrv_errprint.c +++ b/drivers/pci/pcie/aer/aerdrv_errprint.c | |||
@@ -124,6 +124,21 @@ static const char *aer_agent_string[] = { | |||
124 | "Transmitter ID" | 124 | "Transmitter ID" |
125 | }; | 125 | }; |
126 | 126 | ||
127 | static void __print_tlp_header(struct pci_dev *dev, | ||
128 | struct aer_header_log_regs *t) | ||
129 | { | ||
130 | unsigned char *tlp = (unsigned char *)&t; | ||
131 | |||
132 | dev_err(&dev->dev, " TLP Header:" | ||
133 | " %02x%02x%02x%02x %02x%02x%02x%02x" | ||
134 | " %02x%02x%02x%02x %02x%02x%02x%02x\n", | ||
135 | *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, | ||
136 | *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), | ||
137 | *(tlp + 11), *(tlp + 10), *(tlp + 9), | ||
138 | *(tlp + 8), *(tlp + 15), *(tlp + 14), | ||
139 | *(tlp + 13), *(tlp + 12)); | ||
140 | } | ||
141 | |||
127 | static void __aer_print_error(struct pci_dev *dev, | 142 | static void __aer_print_error(struct pci_dev *dev, |
128 | struct aer_err_info *info) | 143 | struct aer_err_info *info) |
129 | { | 144 | { |
@@ -153,48 +168,39 @@ static void __aer_print_error(struct pci_dev *dev, | |||
153 | 168 | ||
154 | void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) | 169 | void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) |
155 | { | 170 | { |
171 | int layer, agent; | ||
156 | int id = ((dev->bus->number << 8) | dev->devfn); | 172 | int id = ((dev->bus->number << 8) | dev->devfn); |
157 | 173 | ||
158 | if (info->status == 0) { | 174 | if (!info->status) { |
159 | dev_err(&dev->dev, | 175 | dev_err(&dev->dev, |
160 | "PCIe Bus Error: severity=%s, type=Unaccessible, " | 176 | "PCIe Bus Error: severity=%s, type=Unaccessible, " |
161 | "id=%04x(Unregistered Agent ID)\n", | 177 | "id=%04x(Unregistered Agent ID)\n", |
162 | aer_error_severity_string[info->severity], id); | 178 | aer_error_severity_string[info->severity], id); |
163 | } else { | 179 | goto out; |
164 | int layer, agent; | 180 | } |
165 | 181 | ||
166 | layer = AER_GET_LAYER_ERROR(info->severity, info->status); | 182 | layer = AER_GET_LAYER_ERROR(info->severity, info->status); |
167 | agent = AER_GET_AGENT(info->severity, info->status); | 183 | agent = AER_GET_AGENT(info->severity, info->status); |
168 | 184 | ||
169 | dev_err(&dev->dev, | 185 | dev_err(&dev->dev, |
170 | "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n", | 186 | "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n", |
171 | aer_error_severity_string[info->severity], | 187 | aer_error_severity_string[info->severity], |
172 | aer_error_layer[layer], id, aer_agent_string[agent]); | 188 | aer_error_layer[layer], id, aer_agent_string[agent]); |
173 | 189 | ||
174 | dev_err(&dev->dev, | 190 | dev_err(&dev->dev, |
175 | " device [%04x:%04x] error status/mask=%08x/%08x\n", | 191 | " device [%04x:%04x] error status/mask=%08x/%08x\n", |
176 | dev->vendor, dev->device, | 192 | dev->vendor, dev->device, |
177 | info->status, info->mask); | 193 | info->status, info->mask); |
178 | 194 | ||
179 | __aer_print_error(dev, info); | 195 | __aer_print_error(dev, info); |
180 | |||
181 | if (info->tlp_header_valid) { | ||
182 | unsigned char *tlp = (unsigned char *) &info->tlp; | ||
183 | dev_err(&dev->dev, " TLP Header:" | ||
184 | " %02x%02x%02x%02x %02x%02x%02x%02x" | ||
185 | " %02x%02x%02x%02x %02x%02x%02x%02x\n", | ||
186 | *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, | ||
187 | *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), | ||
188 | *(tlp + 11), *(tlp + 10), *(tlp + 9), | ||
189 | *(tlp + 8), *(tlp + 15), *(tlp + 14), | ||
190 | *(tlp + 13), *(tlp + 12)); | ||
191 | } | ||
192 | } | ||
193 | 196 | ||
197 | if (info->tlp_header_valid) | ||
198 | __print_tlp_header(dev, &info->tlp); | ||
199 | |||
200 | out: | ||
194 | if (info->id && info->error_dev_num > 1 && info->id == id) | 201 | if (info->id && info->error_dev_num > 1 && info->id == id) |
195 | dev_err(&dev->dev, | 202 | dev_err(&dev->dev, " Error of this Agent(%04x) is reported first\n", id); |
196 | " Error of this Agent(%04x) is reported first\n", | 203 | |
197 | id); | ||
198 | trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask), | 204 | trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask), |
199 | info->severity); | 205 | info->severity); |
200 | } | 206 | } |
@@ -228,6 +234,7 @@ void cper_print_aer(struct pci_dev *dev, int cper_severity, | |||
228 | const char **status_strs; | 234 | const char **status_strs; |
229 | 235 | ||
230 | aer_severity = cper_severity_to_aer(cper_severity); | 236 | aer_severity = cper_severity_to_aer(cper_severity); |
237 | |||
231 | if (aer_severity == AER_CORRECTABLE) { | 238 | if (aer_severity == AER_CORRECTABLE) { |
232 | status = aer->cor_status; | 239 | status = aer->cor_status; |
233 | mask = aer->cor_mask; | 240 | mask = aer->cor_mask; |
@@ -240,28 +247,22 @@ void cper_print_aer(struct pci_dev *dev, int cper_severity, | |||
240 | status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string); | 247 | status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string); |
241 | tlp_header_valid = status & AER_LOG_TLP_MASKS; | 248 | tlp_header_valid = status & AER_LOG_TLP_MASKS; |
242 | } | 249 | } |
250 | |||
243 | layer = AER_GET_LAYER_ERROR(aer_severity, status); | 251 | layer = AER_GET_LAYER_ERROR(aer_severity, status); |
244 | agent = AER_GET_AGENT(aer_severity, status); | 252 | agent = AER_GET_AGENT(aer_severity, status); |
245 | dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", | 253 | |
246 | status, mask); | 254 | dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask); |
247 | cper_print_bits("", status, status_strs, status_strs_size); | 255 | cper_print_bits("", status, status_strs, status_strs_size); |
248 | dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n", | 256 | dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n", |
249 | aer_error_layer[layer], aer_agent_string[agent]); | 257 | aer_error_layer[layer], aer_agent_string[agent]); |
258 | |||
250 | if (aer_severity != AER_CORRECTABLE) | 259 | if (aer_severity != AER_CORRECTABLE) |
251 | dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n", | 260 | dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n", |
252 | aer->uncor_severity); | 261 | aer->uncor_severity); |
253 | if (tlp_header_valid) { | 262 | |
254 | const unsigned char *tlp; | 263 | if (tlp_header_valid) |
255 | tlp = (const unsigned char *)&aer->header_log; | 264 | __print_tlp_header(dev, &aer->header_log); |
256 | dev_err(&dev->dev, "aer_tlp_header:" | 265 | |
257 | " %02x%02x%02x%02x %02x%02x%02x%02x" | ||
258 | " %02x%02x%02x%02x %02x%02x%02x%02x\n", | ||
259 | *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, | ||
260 | *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), | ||
261 | *(tlp + 11), *(tlp + 10), *(tlp + 9), | ||
262 | *(tlp + 8), *(tlp + 15), *(tlp + 14), | ||
263 | *(tlp + 13), *(tlp + 12)); | ||
264 | } | ||
265 | trace_aer_event(dev_name(&dev->dev), (status & ~mask), | 266 | trace_aer_event(dev_name(&dev->dev), (status & ~mask), |
266 | aer_severity); | 267 | aer_severity); |
267 | } | 268 | } |
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index f1272dc54de1..e1e7026b838d 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
@@ -984,18 +984,6 @@ void pcie_no_aspm(void) | |||
984 | } | 984 | } |
985 | } | 985 | } |
986 | 986 | ||
987 | /** | ||
988 | * pcie_aspm_enabled - is PCIe ASPM enabled? | ||
989 | * | ||
990 | * Returns true if ASPM has not been disabled by the command-line option | ||
991 | * pcie_aspm=off. | ||
992 | **/ | ||
993 | int pcie_aspm_enabled(void) | ||
994 | { | ||
995 | return !aspm_disabled; | ||
996 | } | ||
997 | EXPORT_SYMBOL(pcie_aspm_enabled); | ||
998 | |||
999 | bool pcie_aspm_support_enabled(void) | 987 | bool pcie_aspm_support_enabled(void) |
1000 | { | 988 | { |
1001 | return aspm_support_enabled; | 989 | return aspm_support_enabled; |
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 0b6e76604068..986f8eadfd39 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
@@ -79,9 +79,10 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) | |||
79 | u16 reg16; | 79 | u16 reg16; |
80 | u32 reg32; | 80 | u32 reg32; |
81 | 81 | ||
82 | nr_entries = pci_msix_table_size(dev); | 82 | nr_entries = pci_msix_vec_count(dev); |
83 | if (!nr_entries) | 83 | if (nr_entries < 0) |
84 | return -EINVAL; | 84 | return nr_entries; |
85 | BUG_ON(!nr_entries); | ||
85 | if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES) | 86 | if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES) |
86 | nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES; | 87 | nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES; |
87 | 88 | ||
@@ -344,11 +345,12 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq) | |||
344 | device_enable_async_suspend(device); | 345 | device_enable_async_suspend(device); |
345 | 346 | ||
346 | retval = device_register(device); | 347 | retval = device_register(device); |
347 | if (retval) | 348 | if (retval) { |
348 | kfree(pcie); | 349 | put_device(device); |
349 | else | 350 | return retval; |
350 | get_device(device); | 351 | } |
351 | return retval; | 352 | |
353 | return 0; | ||
352 | } | 354 | } |
353 | 355 | ||
354 | /** | 356 | /** |
@@ -454,10 +456,8 @@ int pcie_port_device_resume(struct device *dev) | |||
454 | 456 | ||
455 | static int remove_iter(struct device *dev, void *data) | 457 | static int remove_iter(struct device *dev, void *data) |
456 | { | 458 | { |
457 | if (dev->bus == &pcie_port_bus_type) { | 459 | if (dev->bus == &pcie_port_bus_type) |
458 | put_device(dev); | ||
459 | device_unregister(dev); | 460 | device_unregister(dev); |
460 | } | ||
461 | return 0; | 461 | return 0; |
462 | } | 462 | } |
463 | 463 | ||
@@ -498,12 +498,12 @@ static int pcie_port_probe_service(struct device *dev) | |||
498 | 498 | ||
499 | pciedev = to_pcie_device(dev); | 499 | pciedev = to_pcie_device(dev); |
500 | status = driver->probe(pciedev); | 500 | status = driver->probe(pciedev); |
501 | if (!status) { | 501 | if (status) |
502 | dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", | 502 | return status; |
503 | driver->name); | 503 | |
504 | get_device(dev); | 504 | dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", driver->name); |
505 | } | 505 | get_device(dev); |
506 | return status; | 506 | return 0; |
507 | } | 507 | } |
508 | 508 | ||
509 | /** | 509 | /** |
@@ -554,7 +554,7 @@ int pcie_port_service_register(struct pcie_port_service_driver *new) | |||
554 | if (pcie_ports_disabled) | 554 | if (pcie_ports_disabled) |
555 | return -ENODEV; | 555 | return -ENODEV; |
556 | 556 | ||
557 | new->driver.name = (char *)new->name; | 557 | new->driver.name = new->name; |
558 | new->driver.bus = &pcie_port_bus_type; | 558 | new->driver.bus = &pcie_port_bus_type; |
559 | new->driver.probe = pcie_port_probe_service; | 559 | new->driver.probe = pcie_port_probe_service; |
560 | new->driver.remove = pcie_port_remove_service; | 560 | new->driver.remove = pcie_port_remove_service; |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 38e403dddf6e..04796c056d12 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ | 16 | #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ |
17 | #define CARDBUS_RESERVE_BUSNR 3 | 17 | #define CARDBUS_RESERVE_BUSNR 3 |
18 | 18 | ||
19 | struct resource busn_resource = { | 19 | static struct resource busn_resource = { |
20 | .name = "PCI busn", | 20 | .name = "PCI busn", |
21 | .start = 0, | 21 | .start = 0, |
22 | .end = 255, | 22 | .end = 255, |
@@ -269,8 +269,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
269 | region.end = l + sz; | 269 | region.end = l + sz; |
270 | } | 270 | } |
271 | 271 | ||
272 | pcibios_bus_to_resource(dev, res, ®ion); | 272 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
273 | pcibios_resource_to_bus(dev, &inverted_region, res); | 273 | pcibios_resource_to_bus(dev->bus, &inverted_region, res); |
274 | 274 | ||
275 | /* | 275 | /* |
276 | * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is | 276 | * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is |
@@ -364,7 +364,7 @@ static void pci_read_bridge_io(struct pci_bus *child) | |||
364 | res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; | 364 | res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; |
365 | region.start = base; | 365 | region.start = base; |
366 | region.end = limit + io_granularity - 1; | 366 | region.end = limit + io_granularity - 1; |
367 | pcibios_bus_to_resource(dev, res, ®ion); | 367 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
368 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); | 368 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
369 | } | 369 | } |
370 | } | 370 | } |
@@ -386,7 +386,7 @@ static void pci_read_bridge_mmio(struct pci_bus *child) | |||
386 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; | 386 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; |
387 | region.start = base; | 387 | region.start = base; |
388 | region.end = limit + 0xfffff; | 388 | region.end = limit + 0xfffff; |
389 | pcibios_bus_to_resource(dev, res, ®ion); | 389 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
390 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); | 390 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
391 | } | 391 | } |
392 | } | 392 | } |
@@ -436,7 +436,7 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) | |||
436 | res->flags |= IORESOURCE_MEM_64; | 436 | res->flags |= IORESOURCE_MEM_64; |
437 | region.start = base; | 437 | region.start = base; |
438 | region.end = limit + 0xfffff; | 438 | region.end = limit + 0xfffff; |
439 | pcibios_bus_to_resource(dev, res, ®ion); | 439 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
440 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); | 440 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
441 | } | 441 | } |
442 | } | 442 | } |
@@ -518,7 +518,7 @@ static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b) | |||
518 | return bridge; | 518 | return bridge; |
519 | } | 519 | } |
520 | 520 | ||
521 | const unsigned char pcix_bus_speed[] = { | 521 | static const unsigned char pcix_bus_speed[] = { |
522 | PCI_SPEED_UNKNOWN, /* 0 */ | 522 | PCI_SPEED_UNKNOWN, /* 0 */ |
523 | PCI_SPEED_66MHz_PCIX, /* 1 */ | 523 | PCI_SPEED_66MHz_PCIX, /* 1 */ |
524 | PCI_SPEED_100MHz_PCIX, /* 2 */ | 524 | PCI_SPEED_100MHz_PCIX, /* 2 */ |
@@ -999,6 +999,60 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev) | |||
999 | pdev->is_hotplug_bridge = 1; | 999 | pdev->is_hotplug_bridge = 1; |
1000 | } | 1000 | } |
1001 | 1001 | ||
1002 | |||
1003 | /** | ||
1004 | * pci_cfg_space_size - get the configuration space size of the PCI device. | ||
1005 | * @dev: PCI device | ||
1006 | * | ||
1007 | * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices | ||
1008 | * have 4096 bytes. Even if the device is capable, that doesn't mean we can | ||
1009 | * access it. Maybe we don't have a way to generate extended config space | ||
1010 | * accesses, or the device is behind a reverse Express bridge. So we try | ||
1011 | * reading the dword at 0x100 which must either be 0 or a valid extended | ||
1012 | * capability header. | ||
1013 | */ | ||
1014 | static int pci_cfg_space_size_ext(struct pci_dev *dev) | ||
1015 | { | ||
1016 | u32 status; | ||
1017 | int pos = PCI_CFG_SPACE_SIZE; | ||
1018 | |||
1019 | if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) | ||
1020 | goto fail; | ||
1021 | if (status == 0xffffffff) | ||
1022 | goto fail; | ||
1023 | |||
1024 | return PCI_CFG_SPACE_EXP_SIZE; | ||
1025 | |||
1026 | fail: | ||
1027 | return PCI_CFG_SPACE_SIZE; | ||
1028 | } | ||
1029 | |||
1030 | int pci_cfg_space_size(struct pci_dev *dev) | ||
1031 | { | ||
1032 | int pos; | ||
1033 | u32 status; | ||
1034 | u16 class; | ||
1035 | |||
1036 | class = dev->class >> 8; | ||
1037 | if (class == PCI_CLASS_BRIDGE_HOST) | ||
1038 | return pci_cfg_space_size_ext(dev); | ||
1039 | |||
1040 | if (!pci_is_pcie(dev)) { | ||
1041 | pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); | ||
1042 | if (!pos) | ||
1043 | goto fail; | ||
1044 | |||
1045 | pci_read_config_dword(dev, pos + PCI_X_STATUS, &status); | ||
1046 | if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))) | ||
1047 | goto fail; | ||
1048 | } | ||
1049 | |||
1050 | return pci_cfg_space_size_ext(dev); | ||
1051 | |||
1052 | fail: | ||
1053 | return PCI_CFG_SPACE_SIZE; | ||
1054 | } | ||
1055 | |||
1002 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) | 1056 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
1003 | 1057 | ||
1004 | /** | 1058 | /** |
@@ -1084,24 +1138,24 @@ int pci_setup_device(struct pci_dev *dev) | |||
1084 | region.end = 0x1F7; | 1138 | region.end = 0x1F7; |
1085 | res = &dev->resource[0]; | 1139 | res = &dev->resource[0]; |
1086 | res->flags = LEGACY_IO_RESOURCE; | 1140 | res->flags = LEGACY_IO_RESOURCE; |
1087 | pcibios_bus_to_resource(dev, res, ®ion); | 1141 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
1088 | region.start = 0x3F6; | 1142 | region.start = 0x3F6; |
1089 | region.end = 0x3F6; | 1143 | region.end = 0x3F6; |
1090 | res = &dev->resource[1]; | 1144 | res = &dev->resource[1]; |
1091 | res->flags = LEGACY_IO_RESOURCE; | 1145 | res->flags = LEGACY_IO_RESOURCE; |
1092 | pcibios_bus_to_resource(dev, res, ®ion); | 1146 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
1093 | } | 1147 | } |
1094 | if ((progif & 4) == 0) { | 1148 | if ((progif & 4) == 0) { |
1095 | region.start = 0x170; | 1149 | region.start = 0x170; |
1096 | region.end = 0x177; | 1150 | region.end = 0x177; |
1097 | res = &dev->resource[2]; | 1151 | res = &dev->resource[2]; |
1098 | res->flags = LEGACY_IO_RESOURCE; | 1152 | res->flags = LEGACY_IO_RESOURCE; |
1099 | pcibios_bus_to_resource(dev, res, ®ion); | 1153 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
1100 | region.start = 0x376; | 1154 | region.start = 0x376; |
1101 | region.end = 0x376; | 1155 | region.end = 0x376; |
1102 | res = &dev->resource[3]; | 1156 | res = &dev->resource[3]; |
1103 | res->flags = LEGACY_IO_RESOURCE; | 1157 | res->flags = LEGACY_IO_RESOURCE; |
1104 | pcibios_bus_to_resource(dev, res, ®ion); | 1158 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
1105 | } | 1159 | } |
1106 | } | 1160 | } |
1107 | break; | 1161 | break; |
@@ -1154,6 +1208,18 @@ static void pci_release_capabilities(struct pci_dev *dev) | |||
1154 | pci_free_cap_save_buffers(dev); | 1208 | pci_free_cap_save_buffers(dev); |
1155 | } | 1209 | } |
1156 | 1210 | ||
1211 | static void pci_free_resources(struct pci_dev *dev) | ||
1212 | { | ||
1213 | int i; | ||
1214 | |||
1215 | pci_cleanup_rom(dev); | ||
1216 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
1217 | struct resource *res = dev->resource + i; | ||
1218 | if (res->parent) | ||
1219 | release_resource(res); | ||
1220 | } | ||
1221 | } | ||
1222 | |||
1157 | /** | 1223 | /** |
1158 | * pci_release_dev - free a pci device structure when all users of it are finished. | 1224 | * pci_release_dev - free a pci device structure when all users of it are finished. |
1159 | * @dev: device that's been disconnected | 1225 | * @dev: device that's been disconnected |
@@ -1163,9 +1229,14 @@ static void pci_release_capabilities(struct pci_dev *dev) | |||
1163 | */ | 1229 | */ |
1164 | static void pci_release_dev(struct device *dev) | 1230 | static void pci_release_dev(struct device *dev) |
1165 | { | 1231 | { |
1166 | struct pci_dev *pci_dev; | 1232 | struct pci_dev *pci_dev = to_pci_dev(dev); |
1233 | |||
1234 | down_write(&pci_bus_sem); | ||
1235 | list_del(&pci_dev->bus_list); | ||
1236 | up_write(&pci_bus_sem); | ||
1237 | |||
1238 | pci_free_resources(pci_dev); | ||
1167 | 1239 | ||
1168 | pci_dev = to_pci_dev(dev); | ||
1169 | pci_release_capabilities(pci_dev); | 1240 | pci_release_capabilities(pci_dev); |
1170 | pci_release_of_node(pci_dev); | 1241 | pci_release_of_node(pci_dev); |
1171 | pcibios_release_device(pci_dev); | 1242 | pcibios_release_device(pci_dev); |
@@ -1173,59 +1244,6 @@ static void pci_release_dev(struct device *dev) | |||
1173 | kfree(pci_dev); | 1244 | kfree(pci_dev); |
1174 | } | 1245 | } |
1175 | 1246 | ||
1176 | /** | ||
1177 | * pci_cfg_space_size - get the configuration space size of the PCI device. | ||
1178 | * @dev: PCI device | ||
1179 | * | ||
1180 | * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices | ||
1181 | * have 4096 bytes. Even if the device is capable, that doesn't mean we can | ||
1182 | * access it. Maybe we don't have a way to generate extended config space | ||
1183 | * accesses, or the device is behind a reverse Express bridge. So we try | ||
1184 | * reading the dword at 0x100 which must either be 0 or a valid extended | ||
1185 | * capability header. | ||
1186 | */ | ||
1187 | int pci_cfg_space_size_ext(struct pci_dev *dev) | ||
1188 | { | ||
1189 | u32 status; | ||
1190 | int pos = PCI_CFG_SPACE_SIZE; | ||
1191 | |||
1192 | if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) | ||
1193 | goto fail; | ||
1194 | if (status == 0xffffffff) | ||
1195 | goto fail; | ||
1196 | |||
1197 | return PCI_CFG_SPACE_EXP_SIZE; | ||
1198 | |||
1199 | fail: | ||
1200 | return PCI_CFG_SPACE_SIZE; | ||
1201 | } | ||
1202 | |||
1203 | int pci_cfg_space_size(struct pci_dev *dev) | ||
1204 | { | ||
1205 | int pos; | ||
1206 | u32 status; | ||
1207 | u16 class; | ||
1208 | |||
1209 | class = dev->class >> 8; | ||
1210 | if (class == PCI_CLASS_BRIDGE_HOST) | ||
1211 | return pci_cfg_space_size_ext(dev); | ||
1212 | |||
1213 | if (!pci_is_pcie(dev)) { | ||
1214 | pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); | ||
1215 | if (!pos) | ||
1216 | goto fail; | ||
1217 | |||
1218 | pci_read_config_dword(dev, pos + PCI_X_STATUS, &status); | ||
1219 | if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))) | ||
1220 | goto fail; | ||
1221 | } | ||
1222 | |||
1223 | return pci_cfg_space_size_ext(dev); | ||
1224 | |||
1225 | fail: | ||
1226 | return PCI_CFG_SPACE_SIZE; | ||
1227 | } | ||
1228 | |||
1229 | struct pci_dev *pci_alloc_dev(struct pci_bus *bus) | 1247 | struct pci_dev *pci_alloc_dev(struct pci_bus *bus) |
1230 | { | 1248 | { |
1231 | struct pci_dev *dev; | 1249 | struct pci_dev *dev; |
@@ -1242,12 +1260,6 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus) | |||
1242 | } | 1260 | } |
1243 | EXPORT_SYMBOL(pci_alloc_dev); | 1261 | EXPORT_SYMBOL(pci_alloc_dev); |
1244 | 1262 | ||
1245 | struct pci_dev *alloc_pci_dev(void) | ||
1246 | { | ||
1247 | return pci_alloc_dev(NULL); | ||
1248 | } | ||
1249 | EXPORT_SYMBOL(alloc_pci_dev); | ||
1250 | |||
1251 | bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, | 1263 | bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, |
1252 | int crs_timeout) | 1264 | int crs_timeout) |
1253 | { | 1265 | { |
@@ -1381,8 +1393,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) | |||
1381 | dev->match_driver = false; | 1393 | dev->match_driver = false; |
1382 | ret = device_add(&dev->dev); | 1394 | ret = device_add(&dev->dev); |
1383 | WARN_ON(ret < 0); | 1395 | WARN_ON(ret < 0); |
1384 | |||
1385 | pci_proc_attach_device(dev); | ||
1386 | } | 1396 | } |
1387 | 1397 | ||
1388 | struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) | 1398 | struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) |
@@ -2014,6 +2024,24 @@ EXPORT_SYMBOL(pci_scan_slot); | |||
2014 | EXPORT_SYMBOL(pci_scan_bridge); | 2024 | EXPORT_SYMBOL(pci_scan_bridge); |
2015 | EXPORT_SYMBOL_GPL(pci_scan_child_bus); | 2025 | EXPORT_SYMBOL_GPL(pci_scan_child_bus); |
2016 | 2026 | ||
2027 | /* | ||
2028 | * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal | ||
2029 | * routines should always be executed under this mutex. | ||
2030 | */ | ||
2031 | static DEFINE_MUTEX(pci_rescan_remove_lock); | ||
2032 | |||
2033 | void pci_lock_rescan_remove(void) | ||
2034 | { | ||
2035 | mutex_lock(&pci_rescan_remove_lock); | ||
2036 | } | ||
2037 | EXPORT_SYMBOL_GPL(pci_lock_rescan_remove); | ||
2038 | |||
2039 | void pci_unlock_rescan_remove(void) | ||
2040 | { | ||
2041 | mutex_unlock(&pci_rescan_remove_lock); | ||
2042 | } | ||
2043 | EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove); | ||
2044 | |||
2017 | static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b) | 2045 | static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b) |
2018 | { | 2046 | { |
2019 | const struct pci_dev *a = to_pci_dev(d_a); | 2047 | const struct pci_dev *a = to_pci_dev(d_a); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 3a02717473ad..5cb726c193de 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -339,7 +339,7 @@ static void quirk_io_region(struct pci_dev *dev, int port, | |||
339 | /* Convert from PCI bus to resource space */ | 339 | /* Convert from PCI bus to resource space */ |
340 | bus_region.start = region; | 340 | bus_region.start = region; |
341 | bus_region.end = region + size - 1; | 341 | bus_region.end = region + size - 1; |
342 | pcibios_bus_to_resource(dev, res, &bus_region); | 342 | pcibios_bus_to_resource(dev->bus, res, &bus_region); |
343 | 343 | ||
344 | if (!pci_claim_resource(dev, nr)) | 344 | if (!pci_claim_resource(dev, nr)) |
345 | dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name); | 345 | dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name); |
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index cc9337a71529..4ff36bfa785e 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c | |||
@@ -3,20 +3,6 @@ | |||
3 | #include <linux/pci-aspm.h> | 3 | #include <linux/pci-aspm.h> |
4 | #include "pci.h" | 4 | #include "pci.h" |
5 | 5 | ||
6 | static void pci_free_resources(struct pci_dev *dev) | ||
7 | { | ||
8 | int i; | ||
9 | |||
10 | msi_remove_pci_irq_vectors(dev); | ||
11 | |||
12 | pci_cleanup_rom(dev); | ||
13 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
14 | struct resource *res = dev->resource + i; | ||
15 | if (res->parent) | ||
16 | release_resource(res); | ||
17 | } | ||
18 | } | ||
19 | |||
20 | static void pci_stop_dev(struct pci_dev *dev) | 6 | static void pci_stop_dev(struct pci_dev *dev) |
21 | { | 7 | { |
22 | pci_pme_active(dev, false); | 8 | pci_pme_active(dev, false); |
@@ -34,13 +20,11 @@ static void pci_stop_dev(struct pci_dev *dev) | |||
34 | 20 | ||
35 | static void pci_destroy_dev(struct pci_dev *dev) | 21 | static void pci_destroy_dev(struct pci_dev *dev) |
36 | { | 22 | { |
37 | device_del(&dev->dev); | 23 | if (!dev->dev.kobj.parent) |
24 | return; | ||
38 | 25 | ||
39 | down_write(&pci_bus_sem); | 26 | device_del(&dev->dev); |
40 | list_del(&dev->bus_list); | ||
41 | up_write(&pci_bus_sem); | ||
42 | 27 | ||
43 | pci_free_resources(dev); | ||
44 | put_device(&dev->dev); | 28 | put_device(&dev->dev); |
45 | } | 29 | } |
46 | 30 | ||
@@ -114,6 +98,14 @@ void pci_stop_and_remove_bus_device(struct pci_dev *dev) | |||
114 | } | 98 | } |
115 | EXPORT_SYMBOL(pci_stop_and_remove_bus_device); | 99 | EXPORT_SYMBOL(pci_stop_and_remove_bus_device); |
116 | 100 | ||
101 | void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev) | ||
102 | { | ||
103 | pci_lock_rescan_remove(); | ||
104 | pci_stop_and_remove_bus_device(dev); | ||
105 | pci_unlock_rescan_remove(); | ||
106 | } | ||
107 | EXPORT_SYMBOL_GPL(pci_stop_and_remove_bus_device_locked); | ||
108 | |||
117 | void pci_stop_root_bus(struct pci_bus *bus) | 109 | void pci_stop_root_bus(struct pci_bus *bus) |
118 | { | 110 | { |
119 | struct pci_dev *child, *tmp; | 111 | struct pci_dev *child, *tmp; |
@@ -128,7 +120,7 @@ void pci_stop_root_bus(struct pci_bus *bus) | |||
128 | pci_stop_bus_device(child); | 120 | pci_stop_bus_device(child); |
129 | 121 | ||
130 | /* stop the host bridge */ | 122 | /* stop the host bridge */ |
131 | device_del(&host_bridge->dev); | 123 | device_release_driver(&host_bridge->dev); |
132 | } | 124 | } |
133 | 125 | ||
134 | void pci_remove_root_bus(struct pci_bus *bus) | 126 | void pci_remove_root_bus(struct pci_bus *bus) |
@@ -147,5 +139,5 @@ void pci_remove_root_bus(struct pci_bus *bus) | |||
147 | host_bridge->bus = NULL; | 139 | host_bridge->bus = NULL; |
148 | 140 | ||
149 | /* remove the host bridge */ | 141 | /* remove the host bridge */ |
150 | put_device(&host_bridge->dev); | 142 | device_unregister(&host_bridge->dev); |
151 | } | 143 | } |
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index c5d0a08a8747..5d595724e5f4 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c | |||
@@ -31,7 +31,7 @@ int pci_enable_rom(struct pci_dev *pdev) | |||
31 | if (!res->flags) | 31 | if (!res->flags) |
32 | return -1; | 32 | return -1; |
33 | 33 | ||
34 | pcibios_resource_to_bus(pdev, ®ion, res); | 34 | pcibios_resource_to_bus(pdev->bus, ®ion, res); |
35 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); | 35 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
36 | rom_addr &= ~PCI_ROM_ADDRESS_MASK; | 36 | rom_addr &= ~PCI_ROM_ADDRESS_MASK; |
37 | rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; | 37 | rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 219a4106480a..138bdd6393be 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -475,7 +475,7 @@ void pci_setup_cardbus(struct pci_bus *bus) | |||
475 | &bus->busn_res); | 475 | &bus->busn_res); |
476 | 476 | ||
477 | res = bus->resource[0]; | 477 | res = bus->resource[0]; |
478 | pcibios_resource_to_bus(bridge, ®ion, res); | 478 | pcibios_resource_to_bus(bridge->bus, ®ion, res); |
479 | if (res->flags & IORESOURCE_IO) { | 479 | if (res->flags & IORESOURCE_IO) { |
480 | /* | 480 | /* |
481 | * The IO resource is allocated a range twice as large as it | 481 | * The IO resource is allocated a range twice as large as it |
@@ -489,7 +489,7 @@ void pci_setup_cardbus(struct pci_bus *bus) | |||
489 | } | 489 | } |
490 | 490 | ||
491 | res = bus->resource[1]; | 491 | res = bus->resource[1]; |
492 | pcibios_resource_to_bus(bridge, ®ion, res); | 492 | pcibios_resource_to_bus(bridge->bus, ®ion, res); |
493 | if (res->flags & IORESOURCE_IO) { | 493 | if (res->flags & IORESOURCE_IO) { |
494 | dev_info(&bridge->dev, " bridge window %pR\n", res); | 494 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
495 | pci_write_config_dword(bridge, PCI_CB_IO_BASE_1, | 495 | pci_write_config_dword(bridge, PCI_CB_IO_BASE_1, |
@@ -499,7 +499,7 @@ void pci_setup_cardbus(struct pci_bus *bus) | |||
499 | } | 499 | } |
500 | 500 | ||
501 | res = bus->resource[2]; | 501 | res = bus->resource[2]; |
502 | pcibios_resource_to_bus(bridge, ®ion, res); | 502 | pcibios_resource_to_bus(bridge->bus, ®ion, res); |
503 | if (res->flags & IORESOURCE_MEM) { | 503 | if (res->flags & IORESOURCE_MEM) { |
504 | dev_info(&bridge->dev, " bridge window %pR\n", res); | 504 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
505 | pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0, | 505 | pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0, |
@@ -509,7 +509,7 @@ void pci_setup_cardbus(struct pci_bus *bus) | |||
509 | } | 509 | } |
510 | 510 | ||
511 | res = bus->resource[3]; | 511 | res = bus->resource[3]; |
512 | pcibios_resource_to_bus(bridge, ®ion, res); | 512 | pcibios_resource_to_bus(bridge->bus, ®ion, res); |
513 | if (res->flags & IORESOURCE_MEM) { | 513 | if (res->flags & IORESOURCE_MEM) { |
514 | dev_info(&bridge->dev, " bridge window %pR\n", res); | 514 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
515 | pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1, | 515 | pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1, |
@@ -538,7 +538,8 @@ static void pci_setup_bridge_io(struct pci_bus *bus) | |||
538 | struct pci_bus_region region; | 538 | struct pci_bus_region region; |
539 | unsigned long io_mask; | 539 | unsigned long io_mask; |
540 | u8 io_base_lo, io_limit_lo; | 540 | u8 io_base_lo, io_limit_lo; |
541 | u32 l, io_upper16; | 541 | u16 l; |
542 | u32 io_upper16; | ||
542 | 543 | ||
543 | io_mask = PCI_IO_RANGE_MASK; | 544 | io_mask = PCI_IO_RANGE_MASK; |
544 | if (bridge->io_window_1k) | 545 | if (bridge->io_window_1k) |
@@ -546,13 +547,12 @@ static void pci_setup_bridge_io(struct pci_bus *bus) | |||
546 | 547 | ||
547 | /* Set up the top and bottom of the PCI I/O segment for this bus. */ | 548 | /* Set up the top and bottom of the PCI I/O segment for this bus. */ |
548 | res = bus->resource[0]; | 549 | res = bus->resource[0]; |
549 | pcibios_resource_to_bus(bridge, ®ion, res); | 550 | pcibios_resource_to_bus(bridge->bus, ®ion, res); |
550 | if (res->flags & IORESOURCE_IO) { | 551 | if (res->flags & IORESOURCE_IO) { |
551 | pci_read_config_dword(bridge, PCI_IO_BASE, &l); | 552 | pci_read_config_word(bridge, PCI_IO_BASE, &l); |
552 | l &= 0xffff0000; | ||
553 | io_base_lo = (region.start >> 8) & io_mask; | 553 | io_base_lo = (region.start >> 8) & io_mask; |
554 | io_limit_lo = (region.end >> 8) & io_mask; | 554 | io_limit_lo = (region.end >> 8) & io_mask; |
555 | l |= ((u32) io_limit_lo << 8) | io_base_lo; | 555 | l = ((u16) io_limit_lo << 8) | io_base_lo; |
556 | /* Set up upper 16 bits of I/O base/limit. */ | 556 | /* Set up upper 16 bits of I/O base/limit. */ |
557 | io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); | 557 | io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); |
558 | dev_info(&bridge->dev, " bridge window %pR\n", res); | 558 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
@@ -564,7 +564,7 @@ static void pci_setup_bridge_io(struct pci_bus *bus) | |||
564 | /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ | 564 | /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ |
565 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); | 565 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); |
566 | /* Update lower 16 bits of I/O base/limit. */ | 566 | /* Update lower 16 bits of I/O base/limit. */ |
567 | pci_write_config_dword(bridge, PCI_IO_BASE, l); | 567 | pci_write_config_word(bridge, PCI_IO_BASE, l); |
568 | /* Update upper 16 bits of I/O base/limit. */ | 568 | /* Update upper 16 bits of I/O base/limit. */ |
569 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); | 569 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); |
570 | } | 570 | } |
@@ -578,7 +578,7 @@ static void pci_setup_bridge_mmio(struct pci_bus *bus) | |||
578 | 578 | ||
579 | /* Set up the top and bottom of the PCI Memory segment for this bus. */ | 579 | /* Set up the top and bottom of the PCI Memory segment for this bus. */ |
580 | res = bus->resource[1]; | 580 | res = bus->resource[1]; |
581 | pcibios_resource_to_bus(bridge, ®ion, res); | 581 | pcibios_resource_to_bus(bridge->bus, ®ion, res); |
582 | if (res->flags & IORESOURCE_MEM) { | 582 | if (res->flags & IORESOURCE_MEM) { |
583 | l = (region.start >> 16) & 0xfff0; | 583 | l = (region.start >> 16) & 0xfff0; |
584 | l |= region.end & 0xfff00000; | 584 | l |= region.end & 0xfff00000; |
@@ -604,7 +604,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus) | |||
604 | /* Set up PREF base/limit. */ | 604 | /* Set up PREF base/limit. */ |
605 | bu = lu = 0; | 605 | bu = lu = 0; |
606 | res = bus->resource[2]; | 606 | res = bus->resource[2]; |
607 | pcibios_resource_to_bus(bridge, ®ion, res); | 607 | pcibios_resource_to_bus(bridge->bus, ®ion, res); |
608 | if (res->flags & IORESOURCE_PREFETCH) { | 608 | if (res->flags & IORESOURCE_PREFETCH) { |
609 | l = (region.start >> 16) & 0xfff0; | 609 | l = (region.start >> 16) & 0xfff0; |
610 | l |= region.end & 0xfff00000; | 610 | l |= region.end & 0xfff00000; |
@@ -665,21 +665,23 @@ static void pci_bridge_check_ranges(struct pci_bus *bus) | |||
665 | 665 | ||
666 | pci_read_config_word(bridge, PCI_IO_BASE, &io); | 666 | pci_read_config_word(bridge, PCI_IO_BASE, &io); |
667 | if (!io) { | 667 | if (!io) { |
668 | pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0); | 668 | pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0); |
669 | pci_read_config_word(bridge, PCI_IO_BASE, &io); | 669 | pci_read_config_word(bridge, PCI_IO_BASE, &io); |
670 | pci_write_config_word(bridge, PCI_IO_BASE, 0x0); | 670 | pci_write_config_word(bridge, PCI_IO_BASE, 0x0); |
671 | } | 671 | } |
672 | if (io) | 672 | if (io) |
673 | b_res[0].flags |= IORESOURCE_IO; | 673 | b_res[0].flags |= IORESOURCE_IO; |
674 | |||
674 | /* DECchip 21050 pass 2 errata: the bridge may miss an address | 675 | /* DECchip 21050 pass 2 errata: the bridge may miss an address |
675 | disconnect boundary by one PCI data phase. | 676 | disconnect boundary by one PCI data phase. |
676 | Workaround: do not use prefetching on this device. */ | 677 | Workaround: do not use prefetching on this device. */ |
677 | if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) | 678 | if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) |
678 | return; | 679 | return; |
680 | |||
679 | pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); | 681 | pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); |
680 | if (!pmem) { | 682 | if (!pmem) { |
681 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, | 683 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, |
682 | 0xfff0fff0); | 684 | 0xffe0fff0); |
683 | pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); | 685 | pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); |
684 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); | 686 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); |
685 | } | 687 | } |
@@ -1422,7 +1424,7 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data) | |||
1422 | if (!r->flags) | 1424 | if (!r->flags) |
1423 | continue; | 1425 | continue; |
1424 | 1426 | ||
1425 | pcibios_resource_to_bus(dev, ®ion, r); | 1427 | pcibios_resource_to_bus(dev->bus, ®ion, r); |
1426 | if (!region.start) { | 1428 | if (!region.start) { |
1427 | *unassigned = true; | 1429 | *unassigned = true; |
1428 | return 1; /* return early from pci_walk_bus() */ | 1430 | return 1; /* return early from pci_walk_bus() */ |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 83c4d3bc47ab..5c060b152ce6 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -52,7 +52,7 @@ void pci_update_resource(struct pci_dev *dev, int resno) | |||
52 | if (res->flags & IORESOURCE_PCI_FIXED) | 52 | if (res->flags & IORESOURCE_PCI_FIXED) |
53 | return; | 53 | return; |
54 | 54 | ||
55 | pcibios_resource_to_bus(dev, ®ion, res); | 55 | pcibios_resource_to_bus(dev->bus, ®ion, res); |
56 | 56 | ||
57 | new = region.start | (res->flags & PCI_REGION_FLAG_MASK); | 57 | new = region.start | (res->flags & PCI_REGION_FLAG_MASK); |
58 | if (res->flags & IORESOURCE_IO) | 58 | if (res->flags & IORESOURCE_IO) |
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index 448ca562d1f8..7dd62fa9d0bd 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c | |||
@@ -320,32 +320,6 @@ err: | |||
320 | EXPORT_SYMBOL_GPL(pci_create_slot); | 320 | EXPORT_SYMBOL_GPL(pci_create_slot); |
321 | 321 | ||
322 | /** | 322 | /** |
323 | * pci_renumber_slot - update %struct pci_slot -> number | ||
324 | * @slot: &struct pci_slot to update | ||
325 | * @slot_nr: new number for slot | ||
326 | * | ||
327 | * The primary purpose of this interface is to allow callers who earlier | ||
328 | * created a placeholder slot in pci_create_slot() by passing a -1 as | ||
329 | * slot_nr, to update their %struct pci_slot with the correct @slot_nr. | ||
330 | */ | ||
331 | void pci_renumber_slot(struct pci_slot *slot, int slot_nr) | ||
332 | { | ||
333 | struct pci_slot *tmp; | ||
334 | |||
335 | down_write(&pci_bus_sem); | ||
336 | |||
337 | list_for_each_entry(tmp, &slot->bus->slots, list) { | ||
338 | WARN_ON(tmp->number == slot_nr); | ||
339 | goto out; | ||
340 | } | ||
341 | |||
342 | slot->number = slot_nr; | ||
343 | out: | ||
344 | up_write(&pci_bus_sem); | ||
345 | } | ||
346 | EXPORT_SYMBOL_GPL(pci_renumber_slot); | ||
347 | |||
348 | /** | ||
349 | * pci_destroy_slot - decrement refcount for physical PCI slot | 323 | * pci_destroy_slot - decrement refcount for physical PCI slot |
350 | * @slot: struct pci_slot to decrement | 324 | * @slot: struct pci_slot to decrement |
351 | * | 325 | * |
diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c new file mode 100644 index 000000000000..7e1304d2e389 --- /dev/null +++ b/drivers/pci/vc.c | |||
@@ -0,0 +1,434 @@ | |||
1 | /* | ||
2 | * PCI Virtual Channel support | ||
3 | * | ||
4 | * Copyright (C) 2013 Red Hat, Inc. All rights reserved. | ||
5 | * Author: Alex Williamson <alex.williamson@redhat.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/device.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/pci_regs.h> | ||
17 | #include <linux/types.h> | ||
18 | |||
19 | /** | ||
20 | * pci_vc_save_restore_dwords - Save or restore a series of dwords | ||
21 | * @dev: device | ||
22 | * @pos: starting config space position | ||
23 | * @buf: buffer to save to or restore from | ||
24 | * @dwords: number of dwords to save/restore | ||
25 | * @save: whether to save or restore | ||
26 | */ | ||
27 | static void pci_vc_save_restore_dwords(struct pci_dev *dev, int pos, | ||
28 | u32 *buf, int dwords, bool save) | ||
29 | { | ||
30 | int i; | ||
31 | |||
32 | for (i = 0; i < dwords; i++, buf++) { | ||
33 | if (save) | ||
34 | pci_read_config_dword(dev, pos + (i * 4), buf); | ||
35 | else | ||
36 | pci_write_config_dword(dev, pos + (i * 4), *buf); | ||
37 | } | ||
38 | } | ||
39 | |||
40 | /** | ||
41 | * pci_vc_load_arb_table - load and wait for VC arbitration table | ||
42 | * @dev: device | ||
43 | * @pos: starting position of VC capability (VC/VC9/MFVC) | ||
44 | * | ||
45 | * Set Load VC Arbitration Table bit requesting hardware to apply the VC | ||
46 | * Arbitration Table (previously loaded). When the VC Arbitration Table | ||
47 | * Status clears, hardware has latched the table into VC arbitration logic. | ||
48 | */ | ||
49 | static void pci_vc_load_arb_table(struct pci_dev *dev, int pos) | ||
50 | { | ||
51 | u16 ctrl; | ||
52 | |||
53 | pci_read_config_word(dev, pos + PCI_VC_PORT_CTRL, &ctrl); | ||
54 | pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL, | ||
55 | ctrl | PCI_VC_PORT_CTRL_LOAD_TABLE); | ||
56 | if (pci_wait_for_pending(dev, pos + PCI_VC_PORT_STATUS, | ||
57 | PCI_VC_PORT_STATUS_TABLE)) | ||
58 | return; | ||
59 | |||
60 | dev_err(&dev->dev, "VC arbitration table failed to load\n"); | ||
61 | } | ||
62 | |||
63 | /** | ||
64 | * pci_vc_load_port_arb_table - Load and wait for VC port arbitration table | ||
65 | * @dev: device | ||
66 | * @pos: starting position of VC capability (VC/VC9/MFVC) | ||
67 | * @res: VC resource number, ie. VCn (0-7) | ||
68 | * | ||
69 | * Set Load Port Arbitration Table bit requesting hardware to apply the Port | ||
70 | * Arbitration Table (previously loaded). When the Port Arbitration Table | ||
71 | * Status clears, hardware has latched the table into port arbitration logic. | ||
72 | */ | ||
73 | static void pci_vc_load_port_arb_table(struct pci_dev *dev, int pos, int res) | ||
74 | { | ||
75 | int ctrl_pos, status_pos; | ||
76 | u32 ctrl; | ||
77 | |||
78 | ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF); | ||
79 | status_pos = pos + PCI_VC_RES_STATUS + (res * PCI_CAP_VC_PER_VC_SIZEOF); | ||
80 | |||
81 | pci_read_config_dword(dev, ctrl_pos, &ctrl); | ||
82 | pci_write_config_dword(dev, ctrl_pos, | ||
83 | ctrl | PCI_VC_RES_CTRL_LOAD_TABLE); | ||
84 | |||
85 | if (pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_TABLE)) | ||
86 | return; | ||
87 | |||
88 | dev_err(&dev->dev, "VC%d port arbitration table failed to load\n", res); | ||
89 | } | ||
90 | |||
91 | /** | ||
92 | * pci_vc_enable - Enable virtual channel | ||
93 | * @dev: device | ||
94 | * @pos: starting position of VC capability (VC/VC9/MFVC) | ||
95 | * @res: VC res number, ie. VCn (0-7) | ||
96 | * | ||
97 | * A VC is enabled by setting the enable bit in matching resource control | ||
98 | * registers on both sides of a link. We therefore need to find the opposite | ||
99 | * end of the link. To keep this simple we enable from the downstream device. | ||
100 | * RC devices do not have an upstream device, nor does it seem that VC9 do | ||
101 | * (spec is unclear). Once we find the upstream device, match the VC ID to | ||
102 | * get the correct resource, disable and enable on both ends. | ||
103 | */ | ||
104 | static void pci_vc_enable(struct pci_dev *dev, int pos, int res) | ||
105 | { | ||
106 | int ctrl_pos, status_pos, id, pos2, evcc, i, ctrl_pos2, status_pos2; | ||
107 | u32 ctrl, header, cap1, ctrl2; | ||
108 | struct pci_dev *link = NULL; | ||
109 | |||
110 | /* Enable VCs from the downstream device */ | ||
111 | if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || | ||
112 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) | ||
113 | return; | ||
114 | |||
115 | ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF); | ||
116 | status_pos = pos + PCI_VC_RES_STATUS + (res * PCI_CAP_VC_PER_VC_SIZEOF); | ||
117 | |||
118 | pci_read_config_dword(dev, ctrl_pos, &ctrl); | ||
119 | id = ctrl & PCI_VC_RES_CTRL_ID; | ||
120 | |||
121 | pci_read_config_dword(dev, pos, &header); | ||
122 | |||
123 | /* If there is no opposite end of the link, skip to enable */ | ||
124 | if (PCI_EXT_CAP_ID(header) == PCI_EXT_CAP_ID_VC9 || | ||
125 | pci_is_root_bus(dev->bus)) | ||
126 | goto enable; | ||
127 | |||
128 | pos2 = pci_find_ext_capability(dev->bus->self, PCI_EXT_CAP_ID_VC); | ||
129 | if (!pos2) | ||
130 | goto enable; | ||
131 | |||
132 | pci_read_config_dword(dev->bus->self, pos2 + PCI_VC_PORT_CAP1, &cap1); | ||
133 | evcc = cap1 & PCI_VC_CAP1_EVCC; | ||
134 | |||
135 | /* VC0 is hardwired enabled, so we can start with 1 */ | ||
136 | for (i = 1; i < evcc + 1; i++) { | ||
137 | ctrl_pos2 = pos2 + PCI_VC_RES_CTRL + | ||
138 | (i * PCI_CAP_VC_PER_VC_SIZEOF); | ||
139 | status_pos2 = pos2 + PCI_VC_RES_STATUS + | ||
140 | (i * PCI_CAP_VC_PER_VC_SIZEOF); | ||
141 | pci_read_config_dword(dev->bus->self, ctrl_pos2, &ctrl2); | ||
142 | if ((ctrl2 & PCI_VC_RES_CTRL_ID) == id) { | ||
143 | link = dev->bus->self; | ||
144 | break; | ||
145 | } | ||
146 | } | ||
147 | |||
148 | if (!link) | ||
149 | goto enable; | ||
150 | |||
151 | /* Disable if enabled */ | ||
152 | if (ctrl2 & PCI_VC_RES_CTRL_ENABLE) { | ||
153 | ctrl2 &= ~PCI_VC_RES_CTRL_ENABLE; | ||
154 | pci_write_config_dword(link, ctrl_pos2, ctrl2); | ||
155 | } | ||
156 | |||
157 | /* Enable on both ends */ | ||
158 | ctrl2 |= PCI_VC_RES_CTRL_ENABLE; | ||
159 | pci_write_config_dword(link, ctrl_pos2, ctrl2); | ||
160 | enable: | ||
161 | ctrl |= PCI_VC_RES_CTRL_ENABLE; | ||
162 | pci_write_config_dword(dev, ctrl_pos, ctrl); | ||
163 | |||
164 | if (!pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_NEGO)) | ||
165 | dev_err(&dev->dev, "VC%d negotiation stuck pending\n", id); | ||
166 | |||
167 | if (link && !pci_wait_for_pending(link, status_pos2, | ||
168 | PCI_VC_RES_STATUS_NEGO)) | ||
169 | dev_err(&link->dev, "VC%d negotiation stuck pending\n", id); | ||
170 | } | ||
171 | |||
172 | /** | ||
173 | * pci_vc_do_save_buffer - Size, save, or restore VC state | ||
174 | * @dev: device | ||
175 | * @pos: starting position of VC capability (VC/VC9/MFVC) | ||
176 | * @save_state: buffer for save/restore | ||
177 | * @name: for error message | ||
178 | * @save: if provided a buffer, this indicates what to do with it | ||
179 | * | ||
180 | * Walking Virtual Channel config space to size, save, or restore it | ||
181 | * is complicated, so we do it all from one function to reduce code and | ||
182 | * guarantee ordering matches in the buffer. When called with NULL | ||
183 | * @save_state, return the size of the necessary save buffer. When called | ||
184 | * with a non-NULL @save_state, @save determines whether we save to the | ||
185 | * buffer or restore from it. | ||
186 | */ | ||
187 | static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos, | ||
188 | struct pci_cap_saved_state *save_state, | ||
189 | bool save) | ||
190 | { | ||
191 | u32 cap1; | ||
192 | char evcc, lpevcc, parb_size; | ||
193 | int i, len = 0; | ||
194 | u8 *buf = save_state ? (u8 *)save_state->cap.data : NULL; | ||
195 | |||
196 | /* Sanity check buffer size for save/restore */ | ||
197 | if (buf && save_state->cap.size != | ||
198 | pci_vc_do_save_buffer(dev, pos, NULL, save)) { | ||
199 | dev_err(&dev->dev, | ||
200 | "VC save buffer size does not match @0x%x\n", pos); | ||
201 | return -ENOMEM; | ||
202 | } | ||
203 | |||
204 | pci_read_config_dword(dev, pos + PCI_VC_PORT_CAP1, &cap1); | ||
205 | /* Extended VC Count (not counting VC0) */ | ||
206 | evcc = cap1 & PCI_VC_CAP1_EVCC; | ||
207 | /* Low Priority Extended VC Count (not counting VC0) */ | ||
208 | lpevcc = (cap1 & PCI_VC_CAP1_LPEVCC) >> 4; | ||
209 | /* Port Arbitration Table Entry Size (bits) */ | ||
210 | parb_size = 1 << ((cap1 & PCI_VC_CAP1_ARB_SIZE) >> 10); | ||
211 | |||
212 | /* | ||
213 | * Port VC Control Register contains VC Arbitration Select, which | ||
214 | * cannot be modified when more than one LPVC is in operation. We | ||
215 | * therefore save/restore it first, as only VC0 should be enabled | ||
216 | * after device reset. | ||
217 | */ | ||
218 | if (buf) { | ||
219 | if (save) | ||
220 | pci_read_config_word(dev, pos + PCI_VC_PORT_CTRL, | ||
221 | (u16 *)buf); | ||
222 | else | ||
223 | pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL, | ||
224 | *(u16 *)buf); | ||
225 | buf += 2; | ||
226 | } | ||
227 | len += 2; | ||
228 | |||
229 | /* | ||
230 | * If we have any Low Priority VCs and a VC Arbitration Table Offset | ||
231 | * in Port VC Capability Register 2 then save/restore it next. | ||
232 | */ | ||
233 | if (lpevcc) { | ||
234 | u32 cap2; | ||
235 | int vcarb_offset; | ||
236 | |||
237 | pci_read_config_dword(dev, pos + PCI_VC_PORT_CAP2, &cap2); | ||
238 | vcarb_offset = ((cap2 & PCI_VC_CAP2_ARB_OFF) >> 24) * 16; | ||
239 | |||
240 | if (vcarb_offset) { | ||
241 | int size, vcarb_phases = 0; | ||
242 | |||
243 | if (cap2 & PCI_VC_CAP2_128_PHASE) | ||
244 | vcarb_phases = 128; | ||
245 | else if (cap2 & PCI_VC_CAP2_64_PHASE) | ||
246 | vcarb_phases = 64; | ||
247 | else if (cap2 & PCI_VC_CAP2_32_PHASE) | ||
248 | vcarb_phases = 32; | ||
249 | |||
250 | /* Fixed 4 bits per phase per lpevcc (plus VC0) */ | ||
251 | size = ((lpevcc + 1) * vcarb_phases * 4) / 8; | ||
252 | |||
253 | if (size && buf) { | ||
254 | pci_vc_save_restore_dwords(dev, | ||
255 | pos + vcarb_offset, | ||
256 | (u32 *)buf, | ||
257 | size / 4, save); | ||
258 | /* | ||
259 | * On restore, we need to signal hardware to | ||
260 | * re-load the VC Arbitration Table. | ||
261 | */ | ||
262 | if (!save) | ||
263 | pci_vc_load_arb_table(dev, pos); | ||
264 | |||
265 | buf += size; | ||
266 | } | ||
267 | len += size; | ||
268 | } | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * In addition to each VC Resource Control Register, we may have a | ||
273 | * Port Arbitration Table attached to each VC. The Port Arbitration | ||
274 | * Table Offset in each VC Resource Capability Register tells us if | ||
275 | * it exists. The entry size is global from the Port VC Capability | ||
276 | * Register1 above. The number of phases is determined per VC. | ||
277 | */ | ||
278 | for (i = 0; i < evcc + 1; i++) { | ||
279 | u32 cap; | ||
280 | int parb_offset; | ||
281 | |||
282 | pci_read_config_dword(dev, pos + PCI_VC_RES_CAP + | ||
283 | (i * PCI_CAP_VC_PER_VC_SIZEOF), &cap); | ||
284 | parb_offset = ((cap & PCI_VC_RES_CAP_ARB_OFF) >> 24) * 16; | ||
285 | if (parb_offset) { | ||
286 | int size, parb_phases = 0; | ||
287 | |||
288 | if (cap & PCI_VC_RES_CAP_256_PHASE) | ||
289 | parb_phases = 256; | ||
290 | else if (cap & (PCI_VC_RES_CAP_128_PHASE | | ||
291 | PCI_VC_RES_CAP_128_PHASE_TB)) | ||
292 | parb_phases = 128; | ||
293 | else if (cap & PCI_VC_RES_CAP_64_PHASE) | ||
294 | parb_phases = 64; | ||
295 | else if (cap & PCI_VC_RES_CAP_32_PHASE) | ||
296 | parb_phases = 32; | ||
297 | |||
298 | size = (parb_size * parb_phases) / 8; | ||
299 | |||
300 | if (size && buf) { | ||
301 | pci_vc_save_restore_dwords(dev, | ||
302 | pos + parb_offset, | ||
303 | (u32 *)buf, | ||
304 | size / 4, save); | ||
305 | buf += size; | ||
306 | } | ||
307 | len += size; | ||
308 | } | ||
309 | |||
310 | /* VC Resource Control Register */ | ||
311 | if (buf) { | ||
312 | int ctrl_pos = pos + PCI_VC_RES_CTRL + | ||
313 | (i * PCI_CAP_VC_PER_VC_SIZEOF); | ||
314 | if (save) | ||
315 | pci_read_config_dword(dev, ctrl_pos, | ||
316 | (u32 *)buf); | ||
317 | else { | ||
318 | u32 tmp, ctrl = *(u32 *)buf; | ||
319 | /* | ||
320 | * For an FLR case, the VC config may remain. | ||
321 | * Preserve enable bit, restore the rest. | ||
322 | */ | ||
323 | pci_read_config_dword(dev, ctrl_pos, &tmp); | ||
324 | tmp &= PCI_VC_RES_CTRL_ENABLE; | ||
325 | tmp |= ctrl & ~PCI_VC_RES_CTRL_ENABLE; | ||
326 | pci_write_config_dword(dev, ctrl_pos, tmp); | ||
327 | /* Load port arbitration table if used */ | ||
328 | if (ctrl & PCI_VC_RES_CTRL_ARB_SELECT) | ||
329 | pci_vc_load_port_arb_table(dev, pos, i); | ||
330 | /* Re-enable if needed */ | ||
331 | if ((ctrl ^ tmp) & PCI_VC_RES_CTRL_ENABLE) | ||
332 | pci_vc_enable(dev, pos, i); | ||
333 | } | ||
334 | buf += 4; | ||
335 | } | ||
336 | len += 4; | ||
337 | } | ||
338 | |||
339 | return buf ? 0 : len; | ||
340 | } | ||
341 | |||
342 | static struct { | ||
343 | u16 id; | ||
344 | const char *name; | ||
345 | } vc_caps[] = { { PCI_EXT_CAP_ID_MFVC, "MFVC" }, | ||
346 | { PCI_EXT_CAP_ID_VC, "VC" }, | ||
347 | { PCI_EXT_CAP_ID_VC9, "VC9" } }; | ||
348 | |||
349 | /** | ||
350 | * pci_save_vc_state - Save VC state to pre-allocate save buffer | ||
351 | * @dev: device | ||
352 | * | ||
353 | * For each type of VC capability, VC/VC9/MFVC, find the capability and | ||
354 | * save it to the pre-allocated save buffer. | ||
355 | */ | ||
356 | int pci_save_vc_state(struct pci_dev *dev) | ||
357 | { | ||
358 | int i; | ||
359 | |||
360 | for (i = 0; i < ARRAY_SIZE(vc_caps); i++) { | ||
361 | int pos, ret; | ||
362 | struct pci_cap_saved_state *save_state; | ||
363 | |||
364 | pos = pci_find_ext_capability(dev, vc_caps[i].id); | ||
365 | if (!pos) | ||
366 | continue; | ||
367 | |||
368 | save_state = pci_find_saved_ext_cap(dev, vc_caps[i].id); | ||
369 | if (!save_state) { | ||
370 | dev_err(&dev->dev, "%s buffer not found in %s\n", | ||
371 | vc_caps[i].name, __func__); | ||
372 | return -ENOMEM; | ||
373 | } | ||
374 | |||
375 | ret = pci_vc_do_save_buffer(dev, pos, save_state, true); | ||
376 | if (ret) { | ||
377 | dev_err(&dev->dev, "%s save unsuccessful %s\n", | ||
378 | vc_caps[i].name, __func__); | ||
379 | return ret; | ||
380 | } | ||
381 | } | ||
382 | |||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | /** | ||
387 | * pci_restore_vc_state - Restore VC state from save buffer | ||
388 | * @dev: device | ||
389 | * | ||
390 | * For each type of VC capability, VC/VC9/MFVC, find the capability and | ||
391 | * restore it from the previously saved buffer. | ||
392 | */ | ||
393 | void pci_restore_vc_state(struct pci_dev *dev) | ||
394 | { | ||
395 | int i; | ||
396 | |||
397 | for (i = 0; i < ARRAY_SIZE(vc_caps); i++) { | ||
398 | int pos; | ||
399 | struct pci_cap_saved_state *save_state; | ||
400 | |||
401 | pos = pci_find_ext_capability(dev, vc_caps[i].id); | ||
402 | save_state = pci_find_saved_ext_cap(dev, vc_caps[i].id); | ||
403 | if (!save_state || !pos) | ||
404 | continue; | ||
405 | |||
406 | pci_vc_do_save_buffer(dev, pos, save_state, false); | ||
407 | } | ||
408 | } | ||
409 | |||
410 | /** | ||
411 | * pci_allocate_vc_save_buffers - Allocate save buffers for VC caps | ||
412 | * @dev: device | ||
413 | * | ||
414 | * For each type of VC capability, VC/VC9/MFVC, find the capability, size | ||
415 | * it, and allocate a buffer for save/restore. | ||
416 | */ | ||
417 | |||
418 | void pci_allocate_vc_save_buffers(struct pci_dev *dev) | ||
419 | { | ||
420 | int i; | ||
421 | |||
422 | for (i = 0; i < ARRAY_SIZE(vc_caps); i++) { | ||
423 | int len, pos = pci_find_ext_capability(dev, vc_caps[i].id); | ||
424 | |||
425 | if (!pos) | ||
426 | continue; | ||
427 | |||
428 | len = pci_vc_do_save_buffer(dev, pos, NULL, false); | ||
429 | if (pci_add_ext_cap_save_buffer(dev, vc_caps[i].id, len)) | ||
430 | dev_err(&dev->dev, | ||
431 | "unable to preallocate %s save buffer\n", | ||
432 | vc_caps[i].name); | ||
433 | } | ||
434 | } | ||
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index f7197a790341..d1cd60f51f87 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c | |||
@@ -471,12 +471,15 @@ static int pcifront_scan_root(struct pcifront_device *pdev, | |||
471 | } | 471 | } |
472 | pcifront_init_sd(sd, domain, bus, pdev); | 472 | pcifront_init_sd(sd, domain, bus, pdev); |
473 | 473 | ||
474 | pci_lock_rescan_remove(); | ||
475 | |||
474 | b = pci_scan_bus_parented(&pdev->xdev->dev, bus, | 476 | b = pci_scan_bus_parented(&pdev->xdev->dev, bus, |
475 | &pcifront_bus_ops, sd); | 477 | &pcifront_bus_ops, sd); |
476 | if (!b) { | 478 | if (!b) { |
477 | dev_err(&pdev->xdev->dev, | 479 | dev_err(&pdev->xdev->dev, |
478 | "Error creating PCI Frontend Bus!\n"); | 480 | "Error creating PCI Frontend Bus!\n"); |
479 | err = -ENOMEM; | 481 | err = -ENOMEM; |
482 | pci_unlock_rescan_remove(); | ||
480 | goto err_out; | 483 | goto err_out; |
481 | } | 484 | } |
482 | 485 | ||
@@ -494,6 +497,7 @@ static int pcifront_scan_root(struct pcifront_device *pdev, | |||
494 | /* Create SysFS and notify udev of the devices. Aka: "going live" */ | 497 | /* Create SysFS and notify udev of the devices. Aka: "going live" */ |
495 | pci_bus_add_devices(b); | 498 | pci_bus_add_devices(b); |
496 | 499 | ||
500 | pci_unlock_rescan_remove(); | ||
497 | return err; | 501 | return err; |
498 | 502 | ||
499 | err_out: | 503 | err_out: |
@@ -556,6 +560,7 @@ static void pcifront_free_roots(struct pcifront_device *pdev) | |||
556 | 560 | ||
557 | dev_dbg(&pdev->xdev->dev, "cleaning up root buses\n"); | 561 | dev_dbg(&pdev->xdev->dev, "cleaning up root buses\n"); |
558 | 562 | ||
563 | pci_lock_rescan_remove(); | ||
559 | list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) { | 564 | list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) { |
560 | list_del(&bus_entry->list); | 565 | list_del(&bus_entry->list); |
561 | 566 | ||
@@ -568,6 +573,7 @@ static void pcifront_free_roots(struct pcifront_device *pdev) | |||
568 | 573 | ||
569 | kfree(bus_entry); | 574 | kfree(bus_entry); |
570 | } | 575 | } |
576 | pci_unlock_rescan_remove(); | ||
571 | } | 577 | } |
572 | 578 | ||
573 | static pci_ers_result_t pcifront_common_process(int cmd, | 579 | static pci_ers_result_t pcifront_common_process(int cmd, |
@@ -1043,8 +1049,10 @@ static int pcifront_detach_devices(struct pcifront_device *pdev) | |||
1043 | domain, bus, slot, func); | 1049 | domain, bus, slot, func); |
1044 | continue; | 1050 | continue; |
1045 | } | 1051 | } |
1052 | pci_lock_rescan_remove(); | ||
1046 | pci_stop_and_remove_bus_device(pci_dev); | 1053 | pci_stop_and_remove_bus_device(pci_dev); |
1047 | pci_dev_put(pci_dev); | 1054 | pci_dev_put(pci_dev); |
1055 | pci_unlock_rescan_remove(); | ||
1048 | 1056 | ||
1049 | dev_dbg(&pdev->xdev->dev, | 1057 | dev_dbg(&pdev->xdev->dev, |
1050 | "PCI device %04x:%02x:%02x.%d removed.\n", | 1058 | "PCI device %04x:%02x:%02x.%d removed.\n", |
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c index b2a98cdbd0d2..8bde61952d20 100644 --- a/drivers/pcmcia/cardbus.c +++ b/drivers/pcmcia/cardbus.c | |||
@@ -70,6 +70,8 @@ int __ref cb_alloc(struct pcmcia_socket *s) | |||
70 | struct pci_dev *dev; | 70 | struct pci_dev *dev; |
71 | unsigned int max, pass; | 71 | unsigned int max, pass; |
72 | 72 | ||
73 | pci_lock_rescan_remove(); | ||
74 | |||
73 | s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0)); | 75 | s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0)); |
74 | pci_fixup_cardbus(bus); | 76 | pci_fixup_cardbus(bus); |
75 | 77 | ||
@@ -93,6 +95,7 @@ int __ref cb_alloc(struct pcmcia_socket *s) | |||
93 | 95 | ||
94 | pci_bus_add_devices(bus); | 96 | pci_bus_add_devices(bus); |
95 | 97 | ||
98 | pci_unlock_rescan_remove(); | ||
96 | return 0; | 99 | return 0; |
97 | } | 100 | } |
98 | 101 | ||
@@ -115,6 +118,10 @@ void cb_free(struct pcmcia_socket *s) | |||
115 | if (!bus) | 118 | if (!bus) |
116 | return; | 119 | return; |
117 | 120 | ||
121 | pci_lock_rescan_remove(); | ||
122 | |||
118 | list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) | 123 | list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) |
119 | pci_stop_and_remove_bus_device(dev); | 124 | pci_stop_and_remove_bus_device(dev); |
125 | |||
126 | pci_unlock_rescan_remove(); | ||
120 | } | 127 | } |
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c index 519c4d6003a6..7d47456429a1 100644 --- a/drivers/pcmcia/i82092.c +++ b/drivers/pcmcia/i82092.c | |||
@@ -608,7 +608,7 @@ static int i82092aa_set_mem_map(struct pcmcia_socket *socket, struct pccard_mem_ | |||
608 | 608 | ||
609 | enter("i82092aa_set_mem_map"); | 609 | enter("i82092aa_set_mem_map"); |
610 | 610 | ||
611 | pcibios_resource_to_bus(sock_info->dev, ®ion, mem->res); | 611 | pcibios_resource_to_bus(sock_info->dev->bus, ®ion, mem->res); |
612 | 612 | ||
613 | map = mem->map; | 613 | map = mem->map; |
614 | if (map > 4) { | 614 | if (map > 4) { |
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index dc18a3a5e010..8485761e76af 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c | |||
@@ -445,7 +445,7 @@ static int yenta_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map * | |||
445 | unsigned int start, stop, card_start; | 445 | unsigned int start, stop, card_start; |
446 | unsigned short word; | 446 | unsigned short word; |
447 | 447 | ||
448 | pcibios_resource_to_bus(socket->dev, ®ion, mem->res); | 448 | pcibios_resource_to_bus(socket->dev->bus, ®ion, mem->res); |
449 | 449 | ||
450 | map = mem->map; | 450 | map = mem->map; |
451 | start = region.start; | 451 | start = region.start; |
@@ -709,7 +709,7 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type | |||
709 | region.start = config_readl(socket, addr_start) & mask; | 709 | region.start = config_readl(socket, addr_start) & mask; |
710 | region.end = config_readl(socket, addr_end) | ~mask; | 710 | region.end = config_readl(socket, addr_end) | ~mask; |
711 | if (region.start && region.end > region.start && !override_bios) { | 711 | if (region.start && region.end > region.start && !override_bios) { |
712 | pcibios_bus_to_resource(dev, res, ®ion); | 712 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
713 | if (pci_claim_resource(dev, PCI_BRIDGE_RESOURCES + nr) == 0) | 713 | if (pci_claim_resource(dev, PCI_BRIDGE_RESOURCES + nr) == 0) |
714 | return 0; | 714 | return 0; |
715 | dev_printk(KERN_INFO, &dev->dev, | 715 | dev_printk(KERN_INFO, &dev->dev, |
@@ -1033,7 +1033,7 @@ static void yenta_config_init(struct yenta_socket *socket) | |||
1033 | struct pci_dev *dev = socket->dev; | 1033 | struct pci_dev *dev = socket->dev; |
1034 | struct pci_bus_region region; | 1034 | struct pci_bus_region region; |
1035 | 1035 | ||
1036 | pcibios_resource_to_bus(socket->dev, ®ion, &dev->resource[0]); | 1036 | pcibios_resource_to_bus(socket->dev->bus, ®ion, &dev->resource[0]); |
1037 | 1037 | ||
1038 | config_writel(socket, CB_LEGACY_MODE_BASE, 0); | 1038 | config_writel(socket, CB_LEGACY_MODE_BASE, 0); |
1039 | config_writel(socket, PCI_BASE_ADDRESS_0, region.start); | 1039 | config_writel(socket, PCI_BASE_ADDRESS_0, region.start); |
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 19c313b056c3..6fe268f6af91 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c | |||
@@ -606,6 +606,7 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus) | |||
606 | mutex_unlock(&asus->wmi_lock); | 606 | mutex_unlock(&asus->wmi_lock); |
607 | 607 | ||
608 | mutex_lock(&asus->hotplug_lock); | 608 | mutex_lock(&asus->hotplug_lock); |
609 | pci_lock_rescan_remove(); | ||
609 | 610 | ||
610 | if (asus->wlan.rfkill) | 611 | if (asus->wlan.rfkill) |
611 | rfkill_set_sw_state(asus->wlan.rfkill, blocked); | 612 | rfkill_set_sw_state(asus->wlan.rfkill, blocked); |
@@ -656,6 +657,7 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus) | |||
656 | } | 657 | } |
657 | 658 | ||
658 | out_unlock: | 659 | out_unlock: |
660 | pci_unlock_rescan_remove(); | ||
659 | mutex_unlock(&asus->hotplug_lock); | 661 | mutex_unlock(&asus->hotplug_lock); |
660 | } | 662 | } |
661 | 663 | ||
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index dec68e7a99c7..7029cba7025b 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c | |||
@@ -592,6 +592,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle) | |||
592 | rfkill_set_sw_state(eeepc->wlan_rfkill, blocked); | 592 | rfkill_set_sw_state(eeepc->wlan_rfkill, blocked); |
593 | 593 | ||
594 | mutex_lock(&eeepc->hotplug_lock); | 594 | mutex_lock(&eeepc->hotplug_lock); |
595 | pci_lock_rescan_remove(); | ||
595 | 596 | ||
596 | if (eeepc->hotplug_slot) { | 597 | if (eeepc->hotplug_slot) { |
597 | port = acpi_get_pci_dev(handle); | 598 | port = acpi_get_pci_dev(handle); |
@@ -649,6 +650,7 @@ out_put_dev: | |||
649 | } | 650 | } |
650 | 651 | ||
651 | out_unlock: | 652 | out_unlock: |
653 | pci_unlock_rescan_remove(); | ||
652 | mutex_unlock(&eeepc->hotplug_lock); | 654 | mutex_unlock(&eeepc->hotplug_lock); |
653 | } | 655 | } |
654 | 656 | ||
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 3901edc35812..bde63f7452bd 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
@@ -128,7 +128,7 @@ static int mpt2sas_remove_dead_ioc_func(void *arg) | |||
128 | pdev = ioc->pdev; | 128 | pdev = ioc->pdev; |
129 | if ((pdev == NULL)) | 129 | if ((pdev == NULL)) |
130 | return -1; | 130 | return -1; |
131 | pci_stop_and_remove_bus_device(pdev); | 131 | pci_stop_and_remove_bus_device_locked(pdev); |
132 | return 0; | 132 | return 0; |
133 | } | 133 | } |
134 | 134 | ||
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index fa785062e97b..0cf4f7000f94 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c | |||
@@ -131,7 +131,7 @@ static int mpt3sas_remove_dead_ioc_func(void *arg) | |||
131 | pdev = ioc->pdev; | 131 | pdev = ioc->pdev; |
132 | if ((pdev == NULL)) | 132 | if ((pdev == NULL)) |
133 | return -1; | 133 | return -1; |
134 | pci_stop_and_remove_bus_device(pdev); | 134 | pci_stop_and_remove_bus_device_locked(pdev); |
135 | return 0; | 135 | return 0; |
136 | } | 136 | } |
137 | 137 | ||
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index bac55f7f69f9..6d3ee1ab6362 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c | |||
@@ -1531,7 +1531,7 @@ static int sym_iomap_device(struct sym_device *device) | |||
1531 | struct pci_bus_region bus_addr; | 1531 | struct pci_bus_region bus_addr; |
1532 | int i = 2; | 1532 | int i = 2; |
1533 | 1533 | ||
1534 | pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[1]); | 1534 | pcibios_resource_to_bus(pdev->bus, &bus_addr, &pdev->resource[1]); |
1535 | device->mmio_base = bus_addr.start; | 1535 | device->mmio_base = bus_addr.start; |
1536 | 1536 | ||
1537 | if (device->chip.features & FE_RAM) { | 1537 | if (device->chip.features & FE_RAM) { |
@@ -1541,7 +1541,8 @@ static int sym_iomap_device(struct sym_device *device) | |||
1541 | */ | 1541 | */ |
1542 | if (!pdev->resource[i].flags) | 1542 | if (!pdev->resource[i].flags) |
1543 | i++; | 1543 | i++; |
1544 | pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[i]); | 1544 | pcibios_resource_to_bus(pdev->bus, &bus_addr, |
1545 | &pdev->resource[i]); | ||
1545 | device->ram_base = bus_addr.start; | 1546 | device->ram_base = bus_addr.start; |
1546 | } | 1547 | } |
1547 | 1548 | ||
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 6ab71b9fcf8d..2319d206f630 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c | |||
@@ -139,25 +139,14 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev) | |||
139 | pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); | 139 | pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); |
140 | 140 | ||
141 | /* | 141 | /* |
142 | * Careful, device_lock may already be held. This is the case if | 142 | * Try to reset the device. The success of this is dependent on |
143 | * a driver unbind is blocked. Try to get the locks ourselves to | 143 | * being able to lock the device, which is not always possible. |
144 | * prevent a deadlock. | ||
145 | */ | 144 | */ |
146 | if (vdev->reset_works) { | 145 | if (vdev->reset_works) { |
147 | bool reset_done = false; | 146 | int ret = pci_try_reset_function(pdev); |
148 | 147 | if (ret) | |
149 | if (pci_cfg_access_trylock(pdev)) { | 148 | pr_warn("%s: Failed to reset device %s (%d)\n", |
150 | if (device_trylock(&pdev->dev)) { | 149 | __func__, dev_name(&pdev->dev), ret); |
151 | __pci_reset_function_locked(pdev); | ||
152 | reset_done = true; | ||
153 | device_unlock(&pdev->dev); | ||
154 | } | ||
155 | pci_cfg_access_unlock(pdev); | ||
156 | } | ||
157 | |||
158 | if (!reset_done) | ||
159 | pr_warn("%s: Unable to acquire locks for reset of %s\n", | ||
160 | __func__, dev_name(&pdev->dev)); | ||
161 | } | 150 | } |
162 | 151 | ||
163 | pci_restore_state(pdev); | 152 | pci_restore_state(pdev); |
@@ -514,7 +503,7 @@ static long vfio_pci_ioctl(void *device_data, | |||
514 | 503 | ||
515 | } else if (cmd == VFIO_DEVICE_RESET) { | 504 | } else if (cmd == VFIO_DEVICE_RESET) { |
516 | return vdev->reset_works ? | 505 | return vdev->reset_works ? |
517 | pci_reset_function(vdev->pdev) : -EINVAL; | 506 | pci_try_reset_function(vdev->pdev) : -EINVAL; |
518 | 507 | ||
519 | } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) { | 508 | } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) { |
520 | struct vfio_pci_hot_reset_info hdr; | 509 | struct vfio_pci_hot_reset_info hdr; |
@@ -684,8 +673,8 @@ reset_info_exit: | |||
684 | &info, slot); | 673 | &info, slot); |
685 | if (!ret) | 674 | if (!ret) |
686 | /* User has access, do the reset */ | 675 | /* User has access, do the reset */ |
687 | ret = slot ? pci_reset_slot(vdev->pdev->slot) : | 676 | ret = slot ? pci_try_reset_slot(vdev->pdev->slot) : |
688 | pci_reset_bus(vdev->pdev->bus); | 677 | pci_try_reset_bus(vdev->pdev->bus); |
689 | 678 | ||
690 | hot_reset_release: | 679 | hot_reset_release: |
691 | for (i--; i >= 0; i--) | 680 | for (i--; i >= 0; i--) |
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index ffd0632c3cbc..83cd1574c810 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c | |||
@@ -975,20 +975,20 @@ static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos) | |||
975 | int ret, evcc, phases, vc_arb; | 975 | int ret, evcc, phases, vc_arb; |
976 | int len = PCI_CAP_VC_BASE_SIZEOF; | 976 | int len = PCI_CAP_VC_BASE_SIZEOF; |
977 | 977 | ||
978 | ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_REG1, &tmp); | 978 | ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP1, &tmp); |
979 | if (ret) | 979 | if (ret) |
980 | return pcibios_err_to_errno(ret); | 980 | return pcibios_err_to_errno(ret); |
981 | 981 | ||
982 | evcc = tmp & PCI_VC_REG1_EVCC; /* extended vc count */ | 982 | evcc = tmp & PCI_VC_CAP1_EVCC; /* extended vc count */ |
983 | ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_REG2, &tmp); | 983 | ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP2, &tmp); |
984 | if (ret) | 984 | if (ret) |
985 | return pcibios_err_to_errno(ret); | 985 | return pcibios_err_to_errno(ret); |
986 | 986 | ||
987 | if (tmp & PCI_VC_REG2_128_PHASE) | 987 | if (tmp & PCI_VC_CAP2_128_PHASE) |
988 | phases = 128; | 988 | phases = 128; |
989 | else if (tmp & PCI_VC_REG2_64_PHASE) | 989 | else if (tmp & PCI_VC_CAP2_64_PHASE) |
990 | phases = 64; | 990 | phases = 64; |
991 | else if (tmp & PCI_VC_REG2_32_PHASE) | 991 | else if (tmp & PCI_VC_CAP2_32_PHASE) |
992 | phases = 32; | 992 | phases = 32; |
993 | else | 993 | else |
994 | phases = 0; | 994 | phases = 0; |
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c index a6b29bd4a12a..adc4ea2cc5a0 100644 --- a/drivers/video/arkfb.c +++ b/drivers/video/arkfb.c | |||
@@ -1014,7 +1014,7 @@ static int ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
1014 | 1014 | ||
1015 | vga_res.flags = IORESOURCE_IO; | 1015 | vga_res.flags = IORESOURCE_IO; |
1016 | 1016 | ||
1017 | pcibios_bus_to_resource(dev, &vga_res, &bus_reg); | 1017 | pcibios_bus_to_resource(dev->bus, &vga_res, &bus_reg); |
1018 | 1018 | ||
1019 | par->state.vgabase = (void __iomem *) vga_res.start; | 1019 | par->state.vgabase = (void __iomem *) vga_res.start; |
1020 | 1020 | ||
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c index 968b2997175a..9a3f8f1c6aab 100644 --- a/drivers/video/s3fb.c +++ b/drivers/video/s3fb.c | |||
@@ -1180,7 +1180,7 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
1180 | 1180 | ||
1181 | vga_res.flags = IORESOURCE_IO; | 1181 | vga_res.flags = IORESOURCE_IO; |
1182 | 1182 | ||
1183 | pcibios_bus_to_resource(dev, &vga_res, &bus_reg); | 1183 | pcibios_bus_to_resource(dev->bus, &vga_res, &bus_reg); |
1184 | 1184 | ||
1185 | par->state.vgabase = (void __iomem *) vga_res.start; | 1185 | par->state.vgabase = (void __iomem *) vga_res.start; |
1186 | 1186 | ||
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c index 8bc6e0958a09..5c7cbc6c6236 100644 --- a/drivers/video/vt8623fb.c +++ b/drivers/video/vt8623fb.c | |||
@@ -729,7 +729,7 @@ static int vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
729 | 729 | ||
730 | vga_res.flags = IORESOURCE_IO; | 730 | vga_res.flags = IORESOURCE_IO; |
731 | 731 | ||
732 | pcibios_bus_to_resource(dev, &vga_res, &bus_reg); | 732 | pcibios_bus_to_resource(dev->bus, &vga_res, &bus_reg); |
733 | 733 | ||
734 | par->state.vgabase = (void __iomem *) vga_res.start; | 734 | par->state.vgabase = (void __iomem *) vga_res.start; |
735 | 735 | ||
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 556c83ee6b42..4ec8c194bfe5 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h | |||
@@ -457,7 +457,7 @@ struct acpi_hest_aer_common { | |||
457 | u8 enabled; | 457 | u8 enabled; |
458 | u32 records_to_preallocate; | 458 | u32 records_to_preallocate; |
459 | u32 max_sections_per_record; | 459 | u32 max_sections_per_record; |
460 | u32 bus; | 460 | u32 bus; /* Bus and Segment numbers */ |
461 | u16 device; | 461 | u16 device; |
462 | u16 function; | 462 | u16 function; |
463 | u16 device_control; | 463 | u16 device_control; |
@@ -473,6 +473,14 @@ struct acpi_hest_aer_common { | |||
473 | #define ACPI_HEST_FIRMWARE_FIRST (1) | 473 | #define ACPI_HEST_FIRMWARE_FIRST (1) |
474 | #define ACPI_HEST_GLOBAL (1<<1) | 474 | #define ACPI_HEST_GLOBAL (1<<1) |
475 | 475 | ||
476 | /* | ||
477 | * Macros to access the bus/segment numbers in Bus field above: | ||
478 | * Bus number is encoded in bits 7:0 | ||
479 | * Segment number is encoded in bits 23:8 | ||
480 | */ | ||
481 | #define ACPI_HEST_BUS(bus) ((bus) & 0xFF) | ||
482 | #define ACPI_HEST_SEGMENT(bus) (((bus) >> 8) & 0xFFFF) | ||
483 | |||
476 | /* Hardware Error Notification */ | 484 | /* Hardware Error Notification */ |
477 | 485 | ||
478 | struct acpi_hest_notify { | 486 | struct acpi_hest_notify { |
diff --git a/include/linux/msi.h b/include/linux/msi.h index 009b02481436..92a2f991262a 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
@@ -60,10 +60,10 @@ void arch_teardown_msi_irq(unsigned int irq); | |||
60 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); | 60 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); |
61 | void arch_teardown_msi_irqs(struct pci_dev *dev); | 61 | void arch_teardown_msi_irqs(struct pci_dev *dev); |
62 | int arch_msi_check_device(struct pci_dev* dev, int nvec, int type); | 62 | int arch_msi_check_device(struct pci_dev* dev, int nvec, int type); |
63 | void arch_restore_msi_irqs(struct pci_dev *dev, int irq); | 63 | void arch_restore_msi_irqs(struct pci_dev *dev); |
64 | 64 | ||
65 | void default_teardown_msi_irqs(struct pci_dev *dev); | 65 | void default_teardown_msi_irqs(struct pci_dev *dev); |
66 | void default_restore_msi_irqs(struct pci_dev *dev, int irq); | 66 | void default_restore_msi_irqs(struct pci_dev *dev); |
67 | u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); | 67 | u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); |
68 | u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag); | 68 | u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag); |
69 | 69 | ||
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h index 68bcefd7fca0..72031785fe1d 100644 --- a/include/linux/pci-ats.h +++ b/include/linux/pci-ats.h | |||
@@ -56,10 +56,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev) | |||
56 | 56 | ||
57 | int pci_enable_pri(struct pci_dev *pdev, u32 reqs); | 57 | int pci_enable_pri(struct pci_dev *pdev, u32 reqs); |
58 | void pci_disable_pri(struct pci_dev *pdev); | 58 | void pci_disable_pri(struct pci_dev *pdev); |
59 | bool pci_pri_enabled(struct pci_dev *pdev); | ||
60 | int pci_reset_pri(struct pci_dev *pdev); | 59 | int pci_reset_pri(struct pci_dev *pdev); |
61 | bool pci_pri_stopped(struct pci_dev *pdev); | ||
62 | int pci_pri_status(struct pci_dev *pdev); | ||
63 | 60 | ||
64 | #else /* CONFIG_PCI_PRI */ | 61 | #else /* CONFIG_PCI_PRI */ |
65 | 62 | ||
@@ -72,25 +69,11 @@ static inline void pci_disable_pri(struct pci_dev *pdev) | |||
72 | { | 69 | { |
73 | } | 70 | } |
74 | 71 | ||
75 | static inline bool pci_pri_enabled(struct pci_dev *pdev) | ||
76 | { | ||
77 | return false; | ||
78 | } | ||
79 | |||
80 | static inline int pci_reset_pri(struct pci_dev *pdev) | 72 | static inline int pci_reset_pri(struct pci_dev *pdev) |
81 | { | 73 | { |
82 | return -ENODEV; | 74 | return -ENODEV; |
83 | } | 75 | } |
84 | 76 | ||
85 | static inline bool pci_pri_stopped(struct pci_dev *pdev) | ||
86 | { | ||
87 | return true; | ||
88 | } | ||
89 | |||
90 | static inline int pci_pri_status(struct pci_dev *pdev) | ||
91 | { | ||
92 | return -ENODEV; | ||
93 | } | ||
94 | #endif /* CONFIG_PCI_PRI */ | 77 | #endif /* CONFIG_PCI_PRI */ |
95 | 78 | ||
96 | #ifdef CONFIG_PCI_PASID | 79 | #ifdef CONFIG_PCI_PASID |
diff --git a/include/linux/pci.h b/include/linux/pci.h index a13d6825e586..fb57c892b214 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -224,7 +224,8 @@ enum pci_bus_speed { | |||
224 | }; | 224 | }; |
225 | 225 | ||
226 | struct pci_cap_saved_data { | 226 | struct pci_cap_saved_data { |
227 | char cap_nr; | 227 | u16 cap_nr; |
228 | bool cap_extended; | ||
228 | unsigned int size; | 229 | unsigned int size; |
229 | u32 data[0]; | 230 | u32 data[0]; |
230 | }; | 231 | }; |
@@ -351,7 +352,7 @@ struct pci_dev { | |||
351 | struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ | 352 | struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ |
352 | #ifdef CONFIG_PCI_MSI | 353 | #ifdef CONFIG_PCI_MSI |
353 | struct list_head msi_list; | 354 | struct list_head msi_list; |
354 | struct kset *msi_kset; | 355 | const struct attribute_group **msi_irq_groups; |
355 | #endif | 356 | #endif |
356 | struct pci_vpd *vpd; | 357 | struct pci_vpd *vpd; |
357 | #ifdef CONFIG_PCI_ATS | 358 | #ifdef CONFIG_PCI_ATS |
@@ -375,7 +376,6 @@ static inline struct pci_dev *pci_physfn(struct pci_dev *dev) | |||
375 | } | 376 | } |
376 | 377 | ||
377 | struct pci_dev *pci_alloc_dev(struct pci_bus *bus); | 378 | struct pci_dev *pci_alloc_dev(struct pci_bus *bus); |
378 | struct pci_dev * __deprecated alloc_pci_dev(void); | ||
379 | 379 | ||
380 | #define to_pci_dev(n) container_of(n, struct pci_dev, dev) | 380 | #define to_pci_dev(n) container_of(n, struct pci_dev, dev) |
381 | #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) | 381 | #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) |
@@ -385,8 +385,6 @@ static inline int pci_channel_offline(struct pci_dev *pdev) | |||
385 | return (pdev->error_state != pci_channel_io_normal); | 385 | return (pdev->error_state != pci_channel_io_normal); |
386 | } | 386 | } |
387 | 387 | ||
388 | extern struct resource busn_resource; | ||
389 | |||
390 | struct pci_host_bridge_window { | 388 | struct pci_host_bridge_window { |
391 | struct list_head list; | 389 | struct list_head list; |
392 | struct resource *res; /* host bridge aperture (CPU address) */ | 390 | struct resource *res; /* host bridge aperture (CPU address) */ |
@@ -551,8 +549,8 @@ int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, | |||
551 | int reg, int len, u32 val); | 549 | int reg, int len, u32 val); |
552 | 550 | ||
553 | struct pci_bus_region { | 551 | struct pci_bus_region { |
554 | resource_size_t start; | 552 | dma_addr_t start; |
555 | resource_size_t end; | 553 | dma_addr_t end; |
556 | }; | 554 | }; |
557 | 555 | ||
558 | struct pci_dynids { | 556 | struct pci_dynids { |
@@ -634,8 +632,7 @@ struct pci_driver { | |||
634 | * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table | 632 | * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table |
635 | * @_table: device table name | 633 | * @_table: device table name |
636 | * | 634 | * |
637 | * This macro is used to create a struct pci_device_id array (a device table) | 635 | * This macro is deprecated and should not be used in new code. |
638 | * in a generic manner. | ||
639 | */ | 636 | */ |
640 | #define DEFINE_PCI_DEVICE_TABLE(_table) \ | 637 | #define DEFINE_PCI_DEVICE_TABLE(_table) \ |
641 | const struct pci_device_id _table[] | 638 | const struct pci_device_id _table[] |
@@ -737,9 +734,9 @@ void pci_fixup_cardbus(struct pci_bus *); | |||
737 | 734 | ||
738 | /* Generic PCI functions used internally */ | 735 | /* Generic PCI functions used internally */ |
739 | 736 | ||
740 | void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, | 737 | void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region, |
741 | struct resource *res); | 738 | struct resource *res); |
742 | void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, | 739 | void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res, |
743 | struct pci_bus_region *region); | 740 | struct pci_bus_region *region); |
744 | void pcibios_scan_specific_bus(int busn); | 741 | void pcibios_scan_specific_bus(int busn); |
745 | struct pci_bus *pci_find_bus(int domain, int busnr); | 742 | struct pci_bus *pci_find_bus(int domain, int busnr); |
@@ -763,7 +760,6 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, | |||
763 | const char *name, | 760 | const char *name, |
764 | struct hotplug_slot *hotplug); | 761 | struct hotplug_slot *hotplug); |
765 | void pci_destroy_slot(struct pci_slot *slot); | 762 | void pci_destroy_slot(struct pci_slot *slot); |
766 | void pci_renumber_slot(struct pci_slot *slot, int slot_nr); | ||
767 | int pci_scan_slot(struct pci_bus *bus, int devfn); | 763 | int pci_scan_slot(struct pci_bus *bus, int devfn); |
768 | struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); | 764 | struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); |
769 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); | 765 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); |
@@ -779,6 +775,7 @@ struct pci_dev *pci_dev_get(struct pci_dev *dev); | |||
779 | void pci_dev_put(struct pci_dev *dev); | 775 | void pci_dev_put(struct pci_dev *dev); |
780 | void pci_remove_bus(struct pci_bus *b); | 776 | void pci_remove_bus(struct pci_bus *b); |
781 | void pci_stop_and_remove_bus_device(struct pci_dev *dev); | 777 | void pci_stop_and_remove_bus_device(struct pci_dev *dev); |
778 | void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev); | ||
782 | void pci_stop_root_bus(struct pci_bus *bus); | 779 | void pci_stop_root_bus(struct pci_bus *bus); |
783 | void pci_remove_root_bus(struct pci_bus *bus); | 780 | void pci_remove_root_bus(struct pci_bus *bus); |
784 | void pci_setup_cardbus(struct pci_bus *bus); | 781 | void pci_setup_cardbus(struct pci_bus *bus); |
@@ -938,6 +935,7 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev); | |||
938 | void pci_msi_off(struct pci_dev *dev); | 935 | void pci_msi_off(struct pci_dev *dev); |
939 | int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size); | 936 | int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size); |
940 | int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask); | 937 | int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask); |
938 | int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask); | ||
941 | int pci_wait_for_pending_transaction(struct pci_dev *dev); | 939 | int pci_wait_for_pending_transaction(struct pci_dev *dev); |
942 | int pcix_get_max_mmrbc(struct pci_dev *dev); | 940 | int pcix_get_max_mmrbc(struct pci_dev *dev); |
943 | int pcix_get_mmrbc(struct pci_dev *dev); | 941 | int pcix_get_mmrbc(struct pci_dev *dev); |
@@ -951,10 +949,13 @@ int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, | |||
951 | int __pci_reset_function(struct pci_dev *dev); | 949 | int __pci_reset_function(struct pci_dev *dev); |
952 | int __pci_reset_function_locked(struct pci_dev *dev); | 950 | int __pci_reset_function_locked(struct pci_dev *dev); |
953 | int pci_reset_function(struct pci_dev *dev); | 951 | int pci_reset_function(struct pci_dev *dev); |
952 | int pci_try_reset_function(struct pci_dev *dev); | ||
954 | int pci_probe_reset_slot(struct pci_slot *slot); | 953 | int pci_probe_reset_slot(struct pci_slot *slot); |
955 | int pci_reset_slot(struct pci_slot *slot); | 954 | int pci_reset_slot(struct pci_slot *slot); |
955 | int pci_try_reset_slot(struct pci_slot *slot); | ||
956 | int pci_probe_reset_bus(struct pci_bus *bus); | 956 | int pci_probe_reset_bus(struct pci_bus *bus); |
957 | int pci_reset_bus(struct pci_bus *bus); | 957 | int pci_reset_bus(struct pci_bus *bus); |
958 | int pci_try_reset_bus(struct pci_bus *bus); | ||
958 | void pci_reset_bridge_secondary_bus(struct pci_dev *dev); | 959 | void pci_reset_bridge_secondary_bus(struct pci_dev *dev); |
959 | void pci_update_resource(struct pci_dev *dev, int resno); | 960 | void pci_update_resource(struct pci_dev *dev, int resno); |
960 | int __must_check pci_assign_resource(struct pci_dev *dev, int i); | 961 | int __must_check pci_assign_resource(struct pci_dev *dev, int i); |
@@ -974,9 +975,14 @@ void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); | |||
974 | int pci_save_state(struct pci_dev *dev); | 975 | int pci_save_state(struct pci_dev *dev); |
975 | void pci_restore_state(struct pci_dev *dev); | 976 | void pci_restore_state(struct pci_dev *dev); |
976 | struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); | 977 | struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); |
977 | int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state); | ||
978 | int pci_load_and_free_saved_state(struct pci_dev *dev, | 978 | int pci_load_and_free_saved_state(struct pci_dev *dev, |
979 | struct pci_saved_state **state); | 979 | struct pci_saved_state **state); |
980 | struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); | ||
981 | struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, | ||
982 | u16 cap); | ||
983 | int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size); | ||
984 | int pci_add_ext_cap_save_buffer(struct pci_dev *dev, | ||
985 | u16 cap, unsigned int size); | ||
980 | int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state); | 986 | int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state); |
981 | int pci_set_power_state(struct pci_dev *dev, pci_power_t state); | 987 | int pci_set_power_state(struct pci_dev *dev, pci_power_t state); |
982 | pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); | 988 | pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); |
@@ -985,7 +991,6 @@ void pci_pme_active(struct pci_dev *dev, bool enable); | |||
985 | int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, | 991 | int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, |
986 | bool runtime, bool enable); | 992 | bool runtime, bool enable); |
987 | int pci_wake_from_d3(struct pci_dev *dev, bool enable); | 993 | int pci_wake_from_d3(struct pci_dev *dev, bool enable); |
988 | pci_power_t pci_target_state(struct pci_dev *dev); | ||
989 | int pci_prepare_to_sleep(struct pci_dev *dev); | 994 | int pci_prepare_to_sleep(struct pci_dev *dev); |
990 | int pci_back_from_sleep(struct pci_dev *dev); | 995 | int pci_back_from_sleep(struct pci_dev *dev); |
991 | bool pci_dev_run_wake(struct pci_dev *dev); | 996 | bool pci_dev_run_wake(struct pci_dev *dev); |
@@ -998,21 +1003,10 @@ static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, | |||
998 | return __pci_enable_wake(dev, state, false, enable); | 1003 | return __pci_enable_wake(dev, state, false, enable); |
999 | } | 1004 | } |
1000 | 1005 | ||
1001 | #define PCI_EXP_IDO_REQUEST (1<<0) | 1006 | /* PCI Virtual Channel */ |
1002 | #define PCI_EXP_IDO_COMPLETION (1<<1) | 1007 | int pci_save_vc_state(struct pci_dev *dev); |
1003 | void pci_enable_ido(struct pci_dev *dev, unsigned long type); | 1008 | void pci_restore_vc_state(struct pci_dev *dev); |
1004 | void pci_disable_ido(struct pci_dev *dev, unsigned long type); | 1009 | void pci_allocate_vc_save_buffers(struct pci_dev *dev); |
1005 | |||
1006 | enum pci_obff_signal_type { | ||
1007 | PCI_EXP_OBFF_SIGNAL_L0 = 0, | ||
1008 | PCI_EXP_OBFF_SIGNAL_ALWAYS = 1, | ||
1009 | }; | ||
1010 | int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type); | ||
1011 | void pci_disable_obff(struct pci_dev *dev); | ||
1012 | |||
1013 | int pci_enable_ltr(struct pci_dev *dev); | ||
1014 | void pci_disable_ltr(struct pci_dev *dev); | ||
1015 | int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns); | ||
1016 | 1010 | ||
1017 | /* For use by arch with custom probe code */ | 1011 | /* For use by arch with custom probe code */ |
1018 | void set_pcie_port_type(struct pci_dev *pdev); | 1012 | void set_pcie_port_type(struct pci_dev *pdev); |
@@ -1022,11 +1016,12 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev); | |||
1022 | int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); | 1016 | int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); |
1023 | unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge); | 1017 | unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge); |
1024 | unsigned int pci_rescan_bus(struct pci_bus *bus); | 1018 | unsigned int pci_rescan_bus(struct pci_bus *bus); |
1019 | void pci_lock_rescan_remove(void); | ||
1020 | void pci_unlock_rescan_remove(void); | ||
1025 | 1021 | ||
1026 | /* Vital product data routines */ | 1022 | /* Vital product data routines */ |
1027 | ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); | 1023 | ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); |
1028 | ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); | 1024 | ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); |
1029 | int pci_vpd_truncate(struct pci_dev *dev, size_t size); | ||
1030 | 1025 | ||
1031 | /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ | 1026 | /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ |
1032 | resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx); | 1027 | resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx); |
@@ -1078,6 +1073,14 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus, | |||
1078 | resource_size_t), | 1073 | resource_size_t), |
1079 | void *alignf_data); | 1074 | void *alignf_data); |
1080 | 1075 | ||
1076 | static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar) | ||
1077 | { | ||
1078 | struct pci_bus_region region; | ||
1079 | |||
1080 | pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]); | ||
1081 | return region.start; | ||
1082 | } | ||
1083 | |||
1081 | /* Proper probing supporting hot-pluggable devices */ | 1084 | /* Proper probing supporting hot-pluggable devices */ |
1082 | int __must_check __pci_register_driver(struct pci_driver *, struct module *, | 1085 | int __must_check __pci_register_driver(struct pci_driver *, struct module *, |
1083 | const char *mod_name); | 1086 | const char *mod_name); |
@@ -1115,7 +1118,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, | |||
1115 | 1118 | ||
1116 | void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), | 1119 | void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), |
1117 | void *userdata); | 1120 | void *userdata); |
1118 | int pci_cfg_space_size_ext(struct pci_dev *dev); | ||
1119 | int pci_cfg_space_size(struct pci_dev *dev); | 1121 | int pci_cfg_space_size(struct pci_dev *dev); |
1120 | unsigned char pci_bus_max_busnr(struct pci_bus *bus); | 1122 | unsigned char pci_bus_max_busnr(struct pci_bus *bus); |
1121 | void pci_setup_bridge(struct pci_bus *bus); | 1123 | void pci_setup_bridge(struct pci_bus *bus); |
@@ -1154,59 +1156,42 @@ struct msix_entry { | |||
1154 | }; | 1156 | }; |
1155 | 1157 | ||
1156 | 1158 | ||
1157 | #ifndef CONFIG_PCI_MSI | 1159 | #ifdef CONFIG_PCI_MSI |
1158 | static inline int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec) | 1160 | int pci_msi_vec_count(struct pci_dev *dev); |
1159 | { | 1161 | int pci_enable_msi_block(struct pci_dev *dev, int nvec); |
1160 | return -1; | ||
1161 | } | ||
1162 | |||
1163 | static inline int | ||
1164 | pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec) | ||
1165 | { | ||
1166 | return -1; | ||
1167 | } | ||
1168 | |||
1169 | static inline void pci_msi_shutdown(struct pci_dev *dev) | ||
1170 | { } | ||
1171 | static inline void pci_disable_msi(struct pci_dev *dev) | ||
1172 | { } | ||
1173 | |||
1174 | static inline int pci_msix_table_size(struct pci_dev *dev) | ||
1175 | { | ||
1176 | return 0; | ||
1177 | } | ||
1178 | static inline int pci_enable_msix(struct pci_dev *dev, | ||
1179 | struct msix_entry *entries, int nvec) | ||
1180 | { | ||
1181 | return -1; | ||
1182 | } | ||
1183 | |||
1184 | static inline void pci_msix_shutdown(struct pci_dev *dev) | ||
1185 | { } | ||
1186 | static inline void pci_disable_msix(struct pci_dev *dev) | ||
1187 | { } | ||
1188 | |||
1189 | static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) | ||
1190 | { } | ||
1191 | |||
1192 | static inline void pci_restore_msi_state(struct pci_dev *dev) | ||
1193 | { } | ||
1194 | static inline int pci_msi_enabled(void) | ||
1195 | { | ||
1196 | return 0; | ||
1197 | } | ||
1198 | #else | ||
1199 | int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec); | ||
1200 | int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec); | ||
1201 | void pci_msi_shutdown(struct pci_dev *dev); | 1162 | void pci_msi_shutdown(struct pci_dev *dev); |
1202 | void pci_disable_msi(struct pci_dev *dev); | 1163 | void pci_disable_msi(struct pci_dev *dev); |
1203 | int pci_msix_table_size(struct pci_dev *dev); | 1164 | int pci_msix_vec_count(struct pci_dev *dev); |
1204 | int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec); | 1165 | int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec); |
1205 | void pci_msix_shutdown(struct pci_dev *dev); | 1166 | void pci_msix_shutdown(struct pci_dev *dev); |
1206 | void pci_disable_msix(struct pci_dev *dev); | 1167 | void pci_disable_msix(struct pci_dev *dev); |
1207 | void msi_remove_pci_irq_vectors(struct pci_dev *dev); | 1168 | void msi_remove_pci_irq_vectors(struct pci_dev *dev); |
1208 | void pci_restore_msi_state(struct pci_dev *dev); | 1169 | void pci_restore_msi_state(struct pci_dev *dev); |
1209 | int pci_msi_enabled(void); | 1170 | int pci_msi_enabled(void); |
1171 | int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec); | ||
1172 | int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, | ||
1173 | int minvec, int maxvec); | ||
1174 | #else | ||
1175 | static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } | ||
1176 | static inline int pci_enable_msi_block(struct pci_dev *dev, int nvec) | ||
1177 | { return -ENOSYS; } | ||
1178 | static inline void pci_msi_shutdown(struct pci_dev *dev) { } | ||
1179 | static inline void pci_disable_msi(struct pci_dev *dev) { } | ||
1180 | static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; } | ||
1181 | static inline int pci_enable_msix(struct pci_dev *dev, | ||
1182 | struct msix_entry *entries, int nvec) | ||
1183 | { return -ENOSYS; } | ||
1184 | static inline void pci_msix_shutdown(struct pci_dev *dev) { } | ||
1185 | static inline void pci_disable_msix(struct pci_dev *dev) { } | ||
1186 | static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) { } | ||
1187 | static inline void pci_restore_msi_state(struct pci_dev *dev) { } | ||
1188 | static inline int pci_msi_enabled(void) { return 0; } | ||
1189 | static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec, | ||
1190 | int maxvec) | ||
1191 | { return -ENOSYS; } | ||
1192 | static inline int pci_enable_msix_range(struct pci_dev *dev, | ||
1193 | struct msix_entry *entries, int minvec, int maxvec) | ||
1194 | { return -ENOSYS; } | ||
1210 | #endif | 1195 | #endif |
1211 | 1196 | ||
1212 | #ifdef CONFIG_PCIEPORTBUS | 1197 | #ifdef CONFIG_PCIEPORTBUS |
@@ -1217,12 +1202,10 @@ extern bool pcie_ports_auto; | |||
1217 | #define pcie_ports_auto false | 1202 | #define pcie_ports_auto false |
1218 | #endif | 1203 | #endif |
1219 | 1204 | ||
1220 | #ifndef CONFIG_PCIEASPM | 1205 | #ifdef CONFIG_PCIEASPM |
1221 | static inline int pcie_aspm_enabled(void) { return 0; } | ||
1222 | static inline bool pcie_aspm_support_enabled(void) { return false; } | ||
1223 | #else | ||
1224 | int pcie_aspm_enabled(void); | ||
1225 | bool pcie_aspm_support_enabled(void); | 1206 | bool pcie_aspm_support_enabled(void); |
1207 | #else | ||
1208 | static inline bool pcie_aspm_support_enabled(void) { return false; } | ||
1226 | #endif | 1209 | #endif |
1227 | 1210 | ||
1228 | #ifdef CONFIG_PCIEAER | 1211 | #ifdef CONFIG_PCIEAER |
@@ -1233,15 +1216,12 @@ static inline void pci_no_aer(void) { } | |||
1233 | static inline bool pci_aer_available(void) { return false; } | 1216 | static inline bool pci_aer_available(void) { return false; } |
1234 | #endif | 1217 | #endif |
1235 | 1218 | ||
1236 | #ifndef CONFIG_PCIE_ECRC | 1219 | #ifdef CONFIG_PCIE_ECRC |
1237 | static inline void pcie_set_ecrc_checking(struct pci_dev *dev) | ||
1238 | { | ||
1239 | return; | ||
1240 | } | ||
1241 | static inline void pcie_ecrc_get_policy(char *str) {}; | ||
1242 | #else | ||
1243 | void pcie_set_ecrc_checking(struct pci_dev *dev); | 1220 | void pcie_set_ecrc_checking(struct pci_dev *dev); |
1244 | void pcie_ecrc_get_policy(char *str); | 1221 | void pcie_ecrc_get_policy(char *str); |
1222 | #else | ||
1223 | static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } | ||
1224 | static inline void pcie_ecrc_get_policy(char *str) { } | ||
1245 | #endif | 1225 | #endif |
1246 | 1226 | ||
1247 | #define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1) | 1227 | #define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1) |
@@ -1265,15 +1245,8 @@ void pci_cfg_access_unlock(struct pci_dev *dev); | |||
1265 | extern int pci_domains_supported; | 1245 | extern int pci_domains_supported; |
1266 | #else | 1246 | #else |
1267 | enum { pci_domains_supported = 0 }; | 1247 | enum { pci_domains_supported = 0 }; |
1268 | static inline int pci_domain_nr(struct pci_bus *bus) | 1248 | static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } |
1269 | { | 1249 | static inline int pci_proc_domain(struct pci_bus *bus) { return 0; } |
1270 | return 0; | ||
1271 | } | ||
1272 | |||
1273 | static inline int pci_proc_domain(struct pci_bus *bus) | ||
1274 | { | ||
1275 | return 0; | ||
1276 | } | ||
1277 | #endif /* CONFIG_PCI_DOMAINS */ | 1250 | #endif /* CONFIG_PCI_DOMAINS */ |
1278 | 1251 | ||
1279 | /* some architectures require additional setup to direct VGA traffic */ | 1252 | /* some architectures require additional setup to direct VGA traffic */ |
@@ -1302,180 +1275,88 @@ _PCI_NOP_ALL(write,) | |||
1302 | static inline struct pci_dev *pci_get_device(unsigned int vendor, | 1275 | static inline struct pci_dev *pci_get_device(unsigned int vendor, |
1303 | unsigned int device, | 1276 | unsigned int device, |
1304 | struct pci_dev *from) | 1277 | struct pci_dev *from) |
1305 | { | 1278 | { return NULL; } |
1306 | return NULL; | ||
1307 | } | ||
1308 | 1279 | ||
1309 | static inline struct pci_dev *pci_get_subsys(unsigned int vendor, | 1280 | static inline struct pci_dev *pci_get_subsys(unsigned int vendor, |
1310 | unsigned int device, | 1281 | unsigned int device, |
1311 | unsigned int ss_vendor, | 1282 | unsigned int ss_vendor, |
1312 | unsigned int ss_device, | 1283 | unsigned int ss_device, |
1313 | struct pci_dev *from) | 1284 | struct pci_dev *from) |
1314 | { | 1285 | { return NULL; } |
1315 | return NULL; | ||
1316 | } | ||
1317 | 1286 | ||
1318 | static inline struct pci_dev *pci_get_class(unsigned int class, | 1287 | static inline struct pci_dev *pci_get_class(unsigned int class, |
1319 | struct pci_dev *from) | 1288 | struct pci_dev *from) |
1320 | { | 1289 | { return NULL; } |
1321 | return NULL; | ||
1322 | } | ||
1323 | 1290 | ||
1324 | #define pci_dev_present(ids) (0) | 1291 | #define pci_dev_present(ids) (0) |
1325 | #define no_pci_devices() (1) | 1292 | #define no_pci_devices() (1) |
1326 | #define pci_dev_put(dev) do { } while (0) | 1293 | #define pci_dev_put(dev) do { } while (0) |
1327 | 1294 | ||
1328 | static inline void pci_set_master(struct pci_dev *dev) | 1295 | static inline void pci_set_master(struct pci_dev *dev) { } |
1329 | { } | 1296 | static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; } |
1330 | 1297 | static inline void pci_disable_device(struct pci_dev *dev) { } | |
1331 | static inline int pci_enable_device(struct pci_dev *dev) | ||
1332 | { | ||
1333 | return -EIO; | ||
1334 | } | ||
1335 | |||
1336 | static inline void pci_disable_device(struct pci_dev *dev) | ||
1337 | { } | ||
1338 | |||
1339 | static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) | 1298 | static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) |
1340 | { | 1299 | { return -EIO; } |
1341 | return -EIO; | ||
1342 | } | ||
1343 | |||
1344 | static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | 1300 | static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) |
1345 | { | 1301 | { return -EIO; } |
1346 | return -EIO; | ||
1347 | } | ||
1348 | |||
1349 | static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, | 1302 | static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, |
1350 | unsigned int size) | 1303 | unsigned int size) |
1351 | { | 1304 | { return -EIO; } |
1352 | return -EIO; | ||
1353 | } | ||
1354 | |||
1355 | static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, | 1305 | static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, |
1356 | unsigned long mask) | 1306 | unsigned long mask) |
1357 | { | 1307 | { return -EIO; } |
1358 | return -EIO; | ||
1359 | } | ||
1360 | |||
1361 | static inline int pci_assign_resource(struct pci_dev *dev, int i) | 1308 | static inline int pci_assign_resource(struct pci_dev *dev, int i) |
1362 | { | 1309 | { return -EBUSY; } |
1363 | return -EBUSY; | ||
1364 | } | ||
1365 | |||
1366 | static inline int __pci_register_driver(struct pci_driver *drv, | 1310 | static inline int __pci_register_driver(struct pci_driver *drv, |
1367 | struct module *owner) | 1311 | struct module *owner) |
1368 | { | 1312 | { return 0; } |
1369 | return 0; | ||
1370 | } | ||
1371 | |||
1372 | static inline int pci_register_driver(struct pci_driver *drv) | 1313 | static inline int pci_register_driver(struct pci_driver *drv) |
1373 | { | 1314 | { return 0; } |
1374 | return 0; | 1315 | static inline void pci_unregister_driver(struct pci_driver *drv) { } |
1375 | } | ||
1376 | |||
1377 | static inline void pci_unregister_driver(struct pci_driver *drv) | ||
1378 | { } | ||
1379 | |||
1380 | static inline int pci_find_capability(struct pci_dev *dev, int cap) | 1316 | static inline int pci_find_capability(struct pci_dev *dev, int cap) |
1381 | { | 1317 | { return 0; } |
1382 | return 0; | ||
1383 | } | ||
1384 | |||
1385 | static inline int pci_find_next_capability(struct pci_dev *dev, u8 post, | 1318 | static inline int pci_find_next_capability(struct pci_dev *dev, u8 post, |
1386 | int cap) | 1319 | int cap) |
1387 | { | 1320 | { return 0; } |
1388 | return 0; | ||
1389 | } | ||
1390 | |||
1391 | static inline int pci_find_ext_capability(struct pci_dev *dev, int cap) | 1321 | static inline int pci_find_ext_capability(struct pci_dev *dev, int cap) |
1392 | { | 1322 | { return 0; } |
1393 | return 0; | ||
1394 | } | ||
1395 | 1323 | ||
1396 | /* Power management related routines */ | 1324 | /* Power management related routines */ |
1397 | static inline int pci_save_state(struct pci_dev *dev) | 1325 | static inline int pci_save_state(struct pci_dev *dev) { return 0; } |
1398 | { | 1326 | static inline void pci_restore_state(struct pci_dev *dev) { } |
1399 | return 0; | ||
1400 | } | ||
1401 | |||
1402 | static inline void pci_restore_state(struct pci_dev *dev) | ||
1403 | { } | ||
1404 | |||
1405 | static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) | 1327 | static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) |
1406 | { | 1328 | { return 0; } |
1407 | return 0; | ||
1408 | } | ||
1409 | |||
1410 | static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable) | 1329 | static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable) |
1411 | { | 1330 | { return 0; } |
1412 | return 0; | ||
1413 | } | ||
1414 | |||
1415 | static inline pci_power_t pci_choose_state(struct pci_dev *dev, | 1331 | static inline pci_power_t pci_choose_state(struct pci_dev *dev, |
1416 | pm_message_t state) | 1332 | pm_message_t state) |
1417 | { | 1333 | { return PCI_D0; } |
1418 | return PCI_D0; | ||
1419 | } | ||
1420 | |||
1421 | static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, | 1334 | static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, |
1422 | int enable) | 1335 | int enable) |
1423 | { | 1336 | { return 0; } |
1424 | return 0; | ||
1425 | } | ||
1426 | |||
1427 | static inline void pci_enable_ido(struct pci_dev *dev, unsigned long type) | ||
1428 | { | ||
1429 | } | ||
1430 | |||
1431 | static inline void pci_disable_ido(struct pci_dev *dev, unsigned long type) | ||
1432 | { | ||
1433 | } | ||
1434 | |||
1435 | static inline int pci_enable_obff(struct pci_dev *dev, unsigned long type) | ||
1436 | { | ||
1437 | return 0; | ||
1438 | } | ||
1439 | |||
1440 | static inline void pci_disable_obff(struct pci_dev *dev) | ||
1441 | { | ||
1442 | } | ||
1443 | 1337 | ||
1444 | static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) | 1338 | static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) |
1445 | { | 1339 | { return -EIO; } |
1446 | return -EIO; | 1340 | static inline void pci_release_regions(struct pci_dev *dev) { } |
1447 | } | ||
1448 | |||
1449 | static inline void pci_release_regions(struct pci_dev *dev) | ||
1450 | { } | ||
1451 | 1341 | ||
1452 | #define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0) | 1342 | #define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0) |
1453 | 1343 | ||
1454 | static inline void pci_block_cfg_access(struct pci_dev *dev) | 1344 | static inline void pci_block_cfg_access(struct pci_dev *dev) { } |
1455 | { } | ||
1456 | |||
1457 | static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev) | 1345 | static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev) |
1458 | { return 0; } | 1346 | { return 0; } |
1459 | 1347 | static inline void pci_unblock_cfg_access(struct pci_dev *dev) { } | |
1460 | static inline void pci_unblock_cfg_access(struct pci_dev *dev) | ||
1461 | { } | ||
1462 | 1348 | ||
1463 | static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) | 1349 | static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) |
1464 | { return NULL; } | 1350 | { return NULL; } |
1465 | |||
1466 | static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, | 1351 | static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, |
1467 | unsigned int devfn) | 1352 | unsigned int devfn) |
1468 | { return NULL; } | 1353 | { return NULL; } |
1469 | |||
1470 | static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, | 1354 | static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, |
1471 | unsigned int devfn) | 1355 | unsigned int devfn) |
1472 | { return NULL; } | 1356 | { return NULL; } |
1473 | 1357 | ||
1474 | static inline int pci_domain_nr(struct pci_bus *bus) | 1358 | static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } |
1475 | { return 0; } | 1359 | static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } |
1476 | |||
1477 | static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) | ||
1478 | { return NULL; } | ||
1479 | 1360 | ||
1480 | #define dev_is_pci(d) (false) | 1361 | #define dev_is_pci(d) (false) |
1481 | #define dev_is_pf(d) (false) | 1362 | #define dev_is_pf(d) (false) |
@@ -1486,10 +1367,6 @@ static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) | |||
1486 | 1367 | ||
1487 | #include <asm/pci.h> | 1368 | #include <asm/pci.h> |
1488 | 1369 | ||
1489 | #ifndef PCIBIOS_MAX_MEM_32 | ||
1490 | #define PCIBIOS_MAX_MEM_32 (-1) | ||
1491 | #endif | ||
1492 | |||
1493 | /* these helpers provide future and backwards compatibility | 1370 | /* these helpers provide future and backwards compatibility |
1494 | * for accessing popular PCI BAR info */ | 1371 | * for accessing popular PCI BAR info */ |
1495 | #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) | 1372 | #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) |
@@ -1635,7 +1512,7 @@ struct pci_dev *pci_get_dma_source(struct pci_dev *dev); | |||
1635 | int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); | 1512 | int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); |
1636 | #else | 1513 | #else |
1637 | static inline void pci_fixup_device(enum pci_fixup_pass pass, | 1514 | static inline void pci_fixup_device(enum pci_fixup_pass pass, |
1638 | struct pci_dev *dev) {} | 1515 | struct pci_dev *dev) { } |
1639 | static inline struct pci_dev *pci_get_dma_source(struct pci_dev *dev) | 1516 | static inline struct pci_dev *pci_get_dma_source(struct pci_dev *dev) |
1640 | { | 1517 | { |
1641 | return pci_dev_get(dev); | 1518 | return pci_dev_get(dev); |
@@ -1707,32 +1584,17 @@ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); | |||
1707 | int pci_sriov_get_totalvfs(struct pci_dev *dev); | 1584 | int pci_sriov_get_totalvfs(struct pci_dev *dev); |
1708 | #else | 1585 | #else |
1709 | static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) | 1586 | static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) |
1710 | { | 1587 | { return -ENODEV; } |
1711 | return -ENODEV; | 1588 | static inline void pci_disable_sriov(struct pci_dev *dev) { } |
1712 | } | ||
1713 | static inline void pci_disable_sriov(struct pci_dev *dev) | ||
1714 | { | ||
1715 | } | ||
1716 | static inline irqreturn_t pci_sriov_migration(struct pci_dev *dev) | 1589 | static inline irqreturn_t pci_sriov_migration(struct pci_dev *dev) |
1717 | { | 1590 | { return IRQ_NONE; } |
1718 | return IRQ_NONE; | 1591 | static inline int pci_num_vf(struct pci_dev *dev) { return 0; } |
1719 | } | ||
1720 | static inline int pci_num_vf(struct pci_dev *dev) | ||
1721 | { | ||
1722 | return 0; | ||
1723 | } | ||
1724 | static inline int pci_vfs_assigned(struct pci_dev *dev) | 1592 | static inline int pci_vfs_assigned(struct pci_dev *dev) |
1725 | { | 1593 | { return 0; } |
1726 | return 0; | ||
1727 | } | ||
1728 | static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) | 1594 | static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) |
1729 | { | 1595 | { return 0; } |
1730 | return 0; | ||
1731 | } | ||
1732 | static inline int pci_sriov_get_totalvfs(struct pci_dev *dev) | 1596 | static inline int pci_sriov_get_totalvfs(struct pci_dev *dev) |
1733 | { | 1597 | { return 0; } |
1734 | return 0; | ||
1735 | } | ||
1736 | #endif | 1598 | #endif |
1737 | 1599 | ||
1738 | #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) | 1600 | #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) |
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 4a98e85438a7..ab6b4e7f6657 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h | |||
@@ -518,8 +518,16 @@ | |||
518 | #define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */ | 518 | #define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */ |
519 | #define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */ | 519 | #define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */ |
520 | #define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */ | 520 | #define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */ |
521 | #define PCI_EXP_SLTCTL_ATTN_IND_ON 0x0040 /* Attention Indicator on */ | ||
522 | #define PCI_EXP_SLTCTL_ATTN_IND_BLINK 0x0080 /* Attention Indicator blinking */ | ||
523 | #define PCI_EXP_SLTCTL_ATTN_IND_OFF 0x00c0 /* Attention Indicator off */ | ||
521 | #define PCI_EXP_SLTCTL_PIC 0x0300 /* Power Indicator Control */ | 524 | #define PCI_EXP_SLTCTL_PIC 0x0300 /* Power Indicator Control */ |
525 | #define PCI_EXP_SLTCTL_PWR_IND_ON 0x0100 /* Power Indicator on */ | ||
526 | #define PCI_EXP_SLTCTL_PWR_IND_BLINK 0x0200 /* Power Indicator blinking */ | ||
527 | #define PCI_EXP_SLTCTL_PWR_IND_OFF 0x0300 /* Power Indicator off */ | ||
522 | #define PCI_EXP_SLTCTL_PCC 0x0400 /* Power Controller Control */ | 528 | #define PCI_EXP_SLTCTL_PCC 0x0400 /* Power Controller Control */ |
529 | #define PCI_EXP_SLTCTL_PWR_ON 0x0000 /* Power On */ | ||
530 | #define PCI_EXP_SLTCTL_PWR_OFF 0x0400 /* Power Off */ | ||
523 | #define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */ | 531 | #define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */ |
524 | #define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */ | 532 | #define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */ |
525 | #define PCI_EXP_SLTSTA 26 /* Slot Status */ | 533 | #define PCI_EXP_SLTSTA 26 /* Slot Status */ |
@@ -677,17 +685,34 @@ | |||
677 | #define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */ | 685 | #define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */ |
678 | 686 | ||
679 | /* Virtual Channel */ | 687 | /* Virtual Channel */ |
680 | #define PCI_VC_PORT_REG1 4 | 688 | #define PCI_VC_PORT_CAP1 4 |
681 | #define PCI_VC_REG1_EVCC 0x7 /* extended VC count */ | 689 | #define PCI_VC_CAP1_EVCC 0x00000007 /* extended VC count */ |
682 | #define PCI_VC_PORT_REG2 8 | 690 | #define PCI_VC_CAP1_LPEVCC 0x00000070 /* low prio extended VC count */ |
683 | #define PCI_VC_REG2_32_PHASE 0x2 | 691 | #define PCI_VC_CAP1_ARB_SIZE 0x00000c00 |
684 | #define PCI_VC_REG2_64_PHASE 0x4 | 692 | #define PCI_VC_PORT_CAP2 8 |
685 | #define PCI_VC_REG2_128_PHASE 0x8 | 693 | #define PCI_VC_CAP2_32_PHASE 0x00000002 |
694 | #define PCI_VC_CAP2_64_PHASE 0x00000004 | ||
695 | #define PCI_VC_CAP2_128_PHASE 0x00000008 | ||
696 | #define PCI_VC_CAP2_ARB_OFF 0xff000000 | ||
686 | #define PCI_VC_PORT_CTRL 12 | 697 | #define PCI_VC_PORT_CTRL 12 |
698 | #define PCI_VC_PORT_CTRL_LOAD_TABLE 0x00000001 | ||
687 | #define PCI_VC_PORT_STATUS 14 | 699 | #define PCI_VC_PORT_STATUS 14 |
700 | #define PCI_VC_PORT_STATUS_TABLE 0x00000001 | ||
688 | #define PCI_VC_RES_CAP 16 | 701 | #define PCI_VC_RES_CAP 16 |
702 | #define PCI_VC_RES_CAP_32_PHASE 0x00000002 | ||
703 | #define PCI_VC_RES_CAP_64_PHASE 0x00000004 | ||
704 | #define PCI_VC_RES_CAP_128_PHASE 0x00000008 | ||
705 | #define PCI_VC_RES_CAP_128_PHASE_TB 0x00000010 | ||
706 | #define PCI_VC_RES_CAP_256_PHASE 0x00000020 | ||
707 | #define PCI_VC_RES_CAP_ARB_OFF 0xff000000 | ||
689 | #define PCI_VC_RES_CTRL 20 | 708 | #define PCI_VC_RES_CTRL 20 |
709 | #define PCI_VC_RES_CTRL_LOAD_TABLE 0x00010000 | ||
710 | #define PCI_VC_RES_CTRL_ARB_SELECT 0x000e0000 | ||
711 | #define PCI_VC_RES_CTRL_ID 0x07000000 | ||
712 | #define PCI_VC_RES_CTRL_ENABLE 0x80000000 | ||
690 | #define PCI_VC_RES_STATUS 26 | 713 | #define PCI_VC_RES_STATUS 26 |
714 | #define PCI_VC_RES_STATUS_TABLE 0x00000001 | ||
715 | #define PCI_VC_RES_STATUS_NEGO 0x00000002 | ||
691 | #define PCI_CAP_VC_BASE_SIZEOF 0x10 | 716 | #define PCI_CAP_VC_BASE_SIZEOF 0x10 |
692 | #define PCI_CAP_VC_PER_VC_SIZEOF 0x0C | 717 | #define PCI_CAP_VC_PER_VC_SIZEOF 0x0C |
693 | 718 | ||
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 9c9810030377..9fb30b15c9dc 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -2634,10 +2634,13 @@ sub process { | |||
2634 | $herecurr); | 2634 | $herecurr); |
2635 | } | 2635 | } |
2636 | 2636 | ||
2637 | # check for declarations of struct pci_device_id | 2637 | # check for uses of DEFINE_PCI_DEVICE_TABLE |
2638 | if ($line =~ /\bstruct\s+pci_device_id\s+\w+\s*\[\s*\]\s*\=\s*\{/) { | 2638 | if ($line =~ /\bDEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=/) { |
2639 | WARN("DEFINE_PCI_DEVICE_TABLE", | 2639 | if (WARN("DEFINE_PCI_DEVICE_TABLE", |
2640 | "Use DEFINE_PCI_DEVICE_TABLE for struct pci_device_id\n" . $herecurr); | 2640 | "Prefer struct pci_device_id over deprecated DEFINE_PCI_DEVICE_TABLE\n" . $herecurr) && |
2641 | $fix) { | ||
2642 | $fixed[$linenr - 1] =~ s/\b(?:static\s+|)DEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=\s*/static const struct pci_device_id $1\[\] = /; | ||
2643 | } | ||
2641 | } | 2644 | } |
2642 | 2645 | ||
2643 | # check for new typedefs, only function parameters and sparse annotations | 2646 | # check for new typedefs, only function parameters and sparse annotations |