aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci11
-rw-r--r--Documentation/PCI/00-INDEX4
-rw-r--r--Documentation/PCI/MSI-HOWTO.txt308
-rw-r--r--Documentation/PCI/pci.txt6
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt2
-rw-r--r--MAINTAINERS33
-rw-r--r--arch/alpha/kernel/pci_iommu.c2
-rw-r--r--arch/arm/common/it8152.c4
-rw-r--r--arch/arm/mach-ixp4xx/common-pci.c6
-rw-r--r--arch/ia64/hp/common/sba_iommu.c2
-rw-r--r--arch/ia64/sn/pci/pci_dma.c24
-rw-r--r--arch/parisc/kernel/drivers.c22
-rw-r--r--arch/s390/pci/pci.c4
-rw-r--r--arch/sparc/kernel/iommu.c2
-rw-r--r--arch/sparc/kernel/ioport.c5
-rw-r--r--arch/x86/include/asm/pci.h2
-rw-r--r--arch/x86/include/asm/x86_init.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c4
-rw-r--r--arch/x86/kernel/x86_init.c4
-rw-r--r--arch/x86/pci/xen.c2
-rw-r--r--drivers/ata/ahci.c56
-rw-r--r--drivers/eisa/eisa-bus.c4
-rw-r--r--drivers/pci/Kconfig3
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/bus.c1
-rw-r--r--drivers/pci/host/pci-exynos.c5
-rw-r--r--drivers/pci/host/pci-imx6.c225
-rw-r--r--drivers/pci/host/pci-mvebu.c104
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c12
-rw-r--r--drivers/pci/host/pci-tegra.c2
-rw-r--r--drivers/pci/host/pcie-designware.c91
-rw-r--r--drivers/pci/host/pcie-designware.h4
-rw-r--r--drivers/pci/hotplug/pciehp.h14
-rw-r--r--drivers/pci/hotplug/pciehp_core.c15
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c90
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c380
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c6
-rw-r--r--drivers/pci/ioapic.c6
-rw-r--r--drivers/pci/iov.c1
-rw-r--r--drivers/pci/msi.c348
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/pci/pci-driver.c38
-rw-r--r--drivers/pci/pci.c102
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c48
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c95
-rw-r--r--drivers/pci/pcie/portdrv_core.c36
-rw-r--r--drivers/pci/probe.c23
-rw-r--r--drivers/pci/remove.c25
-rw-r--r--drivers/pci/setup-bus.c16
-rw-r--r--drivers/pci/vc.c434
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c12
-rw-r--r--include/linux/kexec.h3
-rw-r--r--include/linux/msi.h4
-rw-r--r--include/linux/pci.h85
-rw-r--r--include/uapi/linux/pci_regs.h37
-rw-r--r--kernel/kexec.c4
-rw-r--r--kernel/workqueue.c32
-rwxr-xr-xscripts/checkpatch.pl11
58 files changed, 1809 insertions, 1016 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index 5210a51c90fd..a3c5a6685036 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -70,18 +70,15 @@ Date: September, 2011
70Contact: Neil Horman <nhorman@tuxdriver.com> 70Contact: Neil Horman <nhorman@tuxdriver.com>
71Description: 71Description:
72 The /sys/devices/.../msi_irqs directory contains a variable set 72 The /sys/devices/.../msi_irqs directory contains a variable set
73 of sub-directories, with each sub-directory being named after a 73 of files, with each file being named after a corresponding msi
74 corresponding msi irq vector allocated to that device. Each 74 irq vector allocated to that device.
75 numbered sub-directory N contains attributes of that irq.
76 Note that this directory is not created for device drivers which
77 do not support msi irqs
78 75
79What: /sys/bus/pci/devices/.../msi_irqs/<N>/mode 76What: /sys/bus/pci/devices/.../msi_irqs/<N>
80Date: September 2011 77Date: September 2011
81Contact: Neil Horman <nhorman@tuxdriver.com> 78Contact: Neil Horman <nhorman@tuxdriver.com>
82Description: 79Description:
83 This attribute indicates the mode that the irq vector named by 80 This attribute indicates the mode that the irq vector named by
84 the parent directory is in (msi vs. msix) 81 the file is in (msi vs. msix)
85 82
86What: /sys/bus/pci/devices/.../remove 83What: /sys/bus/pci/devices/.../remove
87Date: January 2009 84Date: January 2009
diff --git a/Documentation/PCI/00-INDEX b/Documentation/PCI/00-INDEX
index 812b17fe3ed0..147231f1613e 100644
--- a/Documentation/PCI/00-INDEX
+++ b/Documentation/PCI/00-INDEX
@@ -2,12 +2,12 @@
2 - this file 2 - this file
3MSI-HOWTO.txt 3MSI-HOWTO.txt
4 - the Message Signaled Interrupts (MSI) Driver Guide HOWTO and FAQ. 4 - the Message Signaled Interrupts (MSI) Driver Guide HOWTO and FAQ.
5PCI-DMA-mapping.txt
6 - info for PCI drivers using DMA portably across all platforms
7PCIEBUS-HOWTO.txt 5PCIEBUS-HOWTO.txt
8 - a guide describing the PCI Express Port Bus driver 6 - a guide describing the PCI Express Port Bus driver
9pci-error-recovery.txt 7pci-error-recovery.txt
10 - info on PCI error recovery 8 - info on PCI error recovery
9pci-iov-howto.txt
10 - the PCI Express I/O Virtualization HOWTO
11pci.txt 11pci.txt
12 - info on the PCI subsystem for device driver authors 12 - info on the PCI subsystem for device driver authors
13pcieaer-howto.txt 13pcieaer-howto.txt
diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt
index a09178086c30..a8d01005f480 100644
--- a/Documentation/PCI/MSI-HOWTO.txt
+++ b/Documentation/PCI/MSI-HOWTO.txt
@@ -82,93 +82,111 @@ Most of the hard work is done for the driver in the PCI layer. It simply
82has to request that the PCI layer set up the MSI capability for this 82has to request that the PCI layer set up the MSI capability for this
83device. 83device.
84 84
854.2.1 pci_enable_msi 854.2.1 pci_enable_msi_range
86 86
87int pci_enable_msi(struct pci_dev *dev) 87int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
88 88
89A successful call allocates ONE interrupt to the device, regardless 89This function allows a device driver to request any number of MSI
90of how many MSIs the device supports. The device is switched from 90interrupts within specified range from 'minvec' to 'maxvec'.
91pin-based interrupt mode to MSI mode. The dev->irq number is changed
92to a new number which represents the message signaled interrupt;
93consequently, this function should be called before the driver calls
94request_irq(), because an MSI is delivered via a vector that is
95different from the vector of a pin-based interrupt.
96 91
974.2.2 pci_enable_msi_block 92If this function returns a positive number it indicates the number of
93MSI interrupts that have been successfully allocated. In this case
94the device is switched from pin-based interrupt mode to MSI mode and
95updates dev->irq to be the lowest of the new interrupts assigned to it.
96The other interrupts assigned to the device are in the range dev->irq
97to dev->irq + returned value - 1. Device driver can use the returned
98number of successfully allocated MSI interrupts to further allocate
99and initialize device resources.
98 100
99int pci_enable_msi_block(struct pci_dev *dev, int count) 101If this function returns a negative number, it indicates an error and
102the driver should not attempt to request any more MSI interrupts for
103this device.
100 104
101This variation on the above call allows a device driver to request multiple 105This function should be called before the driver calls request_irq(),
102MSIs. The MSI specification only allows interrupts to be allocated in 106because MSI interrupts are delivered via vectors that are different
103powers of two, up to a maximum of 2^5 (32). 107from the vector of a pin-based interrupt.
104 108
105If this function returns 0, it has succeeded in allocating at least as many 109It is ideal if drivers can cope with a variable number of MSI interrupts;
106interrupts as the driver requested (it may have allocated more in order 110there are many reasons why the platform may not be able to provide the
107to satisfy the power-of-two requirement). In this case, the function 111exact number that a driver asks for.
108enables MSI on this device and updates dev->irq to be the lowest of
109the new interrupts assigned to it. The other interrupts assigned to
110the device are in the range dev->irq to dev->irq + count - 1.
111 112
112If this function returns a negative number, it indicates an error and 113There could be devices that can not operate with just any number of MSI
113the driver should not attempt to request any more MSI interrupts for 114interrupts within a range. See chapter 4.3.1.3 to get the idea how to
114this device. If this function returns a positive number, it is 115handle such devices for MSI-X - the same logic applies to MSI.
115less than 'count' and indicates the number of interrupts that could have
116been allocated. In neither case is the irq value updated or the device
117switched into MSI mode.
118
119The device driver must decide what action to take if
120pci_enable_msi_block() returns a value less than the number requested.
121For instance, the driver could still make use of fewer interrupts;
122in this case the driver should call pci_enable_msi_block()
123again. Note that it is not guaranteed to succeed, even when the
124'count' has been reduced to the value returned from a previous call to
125pci_enable_msi_block(). This is because there are multiple constraints
126on the number of vectors that can be allocated; pci_enable_msi_block()
127returns as soon as it finds any constraint that doesn't allow the
128call to succeed.
129
1304.2.3 pci_enable_msi_block_auto
131
132int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *count)
133
134This variation on pci_enable_msi() call allows a device driver to request
135the maximum possible number of MSIs. The MSI specification only allows
136interrupts to be allocated in powers of two, up to a maximum of 2^5 (32).
137
138If this function returns a positive number, it indicates that it has
139succeeded and the returned value is the number of allocated interrupts. In
140this case, the function enables MSI on this device and updates dev->irq to
141be the lowest of the new interrupts assigned to it. The other interrupts
142assigned to the device are in the range dev->irq to dev->irq + returned
143value - 1.
144 116
145If this function returns a negative number, it indicates an error and 1174.2.1.1 Maximum possible number of MSI interrupts
146the driver should not attempt to request any more MSI interrupts for 118
147this device. 119The typical usage of MSI interrupts is to allocate as many vectors as
120possible, likely up to the limit returned by pci_msi_vec_count() function:
121
122static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec)
123{
124 return pci_enable_msi_range(pdev, 1, nvec);
125}
126
127Note the value of 'minvec' parameter is 1. As 'minvec' is inclusive,
128the value of 0 would be meaningless and could result in error.
148 129
149If the device driver needs to know the number of interrupts the device 130Some devices have a minimal limit on number of MSI interrupts.
150supports it can pass the pointer count where that number is stored. The 131In this case the function could look like this:
151device driver must decide what action to take if pci_enable_msi_block_auto()
152succeeds, but returns a value less than the number of interrupts supported.
153If the device driver does not need to know the number of interrupts
154supported, it can set the pointer count to NULL.
155 132
1564.2.4 pci_disable_msi 133static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec)
134{
135 return pci_enable_msi_range(pdev, FOO_DRIVER_MINIMUM_NVEC, nvec);
136}
137
1384.2.1.2 Exact number of MSI interrupts
139
140If a driver is unable or unwilling to deal with a variable number of MSI
141interrupts it could request a particular number of interrupts by passing
142that number to pci_enable_msi_range() function as both 'minvec' and 'maxvec'
143parameters:
144
145static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec)
146{
147 return pci_enable_msi_range(pdev, nvec, nvec);
148}
149
1504.2.1.3 Single MSI mode
151
152The most notorious example of the request type described above is
153enabling the single MSI mode for a device. It could be done by passing
154two 1s as 'minvec' and 'maxvec':
155
156static int foo_driver_enable_single_msi(struct pci_dev *pdev)
157{
158 return pci_enable_msi_range(pdev, 1, 1);
159}
160
1614.2.2 pci_disable_msi
157 162
158void pci_disable_msi(struct pci_dev *dev) 163void pci_disable_msi(struct pci_dev *dev)
159 164
160This function should be used to undo the effect of pci_enable_msi() or 165This function should be used to undo the effect of pci_enable_msi_range().
161pci_enable_msi_block() or pci_enable_msi_block_auto(). Calling it restores 166Calling it restores dev->irq to the pin-based interrupt number and frees
162dev->irq to the pin-based interrupt number and frees the previously 167the previously allocated MSIs. The interrupts may subsequently be assigned
163allocated message signaled interrupt(s). The interrupt may subsequently be 168to another device, so drivers should not cache the value of dev->irq.
164assigned to another device, so drivers should not cache the value of
165dev->irq.
166 169
167Before calling this function, a device driver must always call free_irq() 170Before calling this function, a device driver must always call free_irq()
168on any interrupt for which it previously called request_irq(). 171on any interrupt for which it previously called request_irq().
169Failure to do so results in a BUG_ON(), leaving the device with 172Failure to do so results in a BUG_ON(), leaving the device with
170MSI enabled and thus leaking its vector. 173MSI enabled and thus leaking its vector.
171 174
1754.2.3 pci_msi_vec_count
176
177int pci_msi_vec_count(struct pci_dev *dev)
178
179This function could be used to retrieve the number of MSI vectors the
180device requested (via the Multiple Message Capable register). The MSI
181specification only allows the returned value to be a power of two,
182up to a maximum of 2^5 (32).
183
184If this function returns a negative number, it indicates the device is
185not capable of sending MSIs.
186
187If this function returns a positive number, it indicates the maximum
188number of MSI interrupt vectors that could be allocated.
189
1724.3 Using MSI-X 1904.3 Using MSI-X
173 191
174The MSI-X capability is much more flexible than the MSI capability. 192The MSI-X capability is much more flexible than the MSI capability.
@@ -188,26 +206,31 @@ in each element of the array to indicate for which entries the kernel
188should assign interrupts; it is invalid to fill in two entries with the 206should assign interrupts; it is invalid to fill in two entries with the
189same number. 207same number.
190 208
1914.3.1 pci_enable_msix 2094.3.1 pci_enable_msix_range
192 210
193int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) 211int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
212 int minvec, int maxvec)
194 213
195Calling this function asks the PCI subsystem to allocate 'nvec' MSIs. 214Calling this function asks the PCI subsystem to allocate any number of
215MSI-X interrupts within specified range from 'minvec' to 'maxvec'.
196The 'entries' argument is a pointer to an array of msix_entry structs 216The 'entries' argument is a pointer to an array of msix_entry structs
197which should be at least 'nvec' entries in size. On success, the 217which should be at least 'maxvec' entries in size.
198device is switched into MSI-X mode and the function returns 0. 218
199The 'vector' member in each entry is populated with the interrupt number; 219On success, the device is switched into MSI-X mode and the function
220returns the number of MSI-X interrupts that have been successfully
221allocated. In this case the 'vector' member in entries numbered from
2220 to the returned value - 1 is populated with the interrupt number;
200the driver should then call request_irq() for each 'vector' that it 223the driver should then call request_irq() for each 'vector' that it
201decides to use. The device driver is responsible for keeping track of the 224decides to use. The device driver is responsible for keeping track of the
202interrupts assigned to the MSI-X vectors so it can free them again later. 225interrupts assigned to the MSI-X vectors so it can free them again later.
226Device driver can use the returned number of successfully allocated MSI-X
227interrupts to further allocate and initialize device resources.
203 228
204If this function returns a negative number, it indicates an error and 229If this function returns a negative number, it indicates an error and
205the driver should not attempt to allocate any more MSI-X interrupts for 230the driver should not attempt to allocate any more MSI-X interrupts for
206this device. If it returns a positive number, it indicates the maximum 231this device.
207number of interrupt vectors that could have been allocated. See example
208below.
209 232
210This function, in contrast with pci_enable_msi(), does not adjust 233This function, in contrast with pci_enable_msi_range(), does not adjust
211dev->irq. The device will not generate interrupts for this interrupt 234dev->irq. The device will not generate interrupts for this interrupt
212number once MSI-X is enabled. 235number once MSI-X is enabled.
213 236
@@ -218,28 +241,103 @@ It is ideal if drivers can cope with a variable number of MSI-X interrupts;
218there are many reasons why the platform may not be able to provide the 241there are many reasons why the platform may not be able to provide the
219exact number that a driver asks for. 242exact number that a driver asks for.
220 243
221A request loop to achieve that might look like: 244There could be devices that can not operate with just any number of MSI-X
245interrupts within a range. E.g., an network adapter might need let's say
246four vectors per each queue it provides. Therefore, a number of MSI-X
247interrupts allocated should be a multiple of four. In this case interface
248pci_enable_msix_range() can not be used alone to request MSI-X interrupts
249(since it can allocate any number within the range, without any notion of
250the multiple of four) and the device driver should master a custom logic
251to request the required number of MSI-X interrupts.
252
2534.3.1.1 Maximum possible number of MSI-X interrupts
254
255The typical usage of MSI-X interrupts is to allocate as many vectors as
256possible, likely up to the limit returned by pci_msix_vec_count() function:
222 257
223static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec) 258static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
224{ 259{
225 while (nvec >= FOO_DRIVER_MINIMUM_NVEC) { 260 return pci_enable_msi_range(adapter->pdev, adapter->msix_entries,
226 rc = pci_enable_msix(adapter->pdev, 261 1, nvec);
227 adapter->msix_entries, nvec); 262}
228 if (rc > 0) 263
229 nvec = rc; 264Note the value of 'minvec' parameter is 1. As 'minvec' is inclusive,
230 else 265the value of 0 would be meaningless and could result in error.
231 return rc; 266
267Some devices have a minimal limit on number of MSI-X interrupts.
268In this case the function could look like this:
269
270static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
271{
272 return pci_enable_msi_range(adapter->pdev, adapter->msix_entries,
273 FOO_DRIVER_MINIMUM_NVEC, nvec);
274}
275
2764.3.1.2 Exact number of MSI-X interrupts
277
278If a driver is unable or unwilling to deal with a variable number of MSI-X
279interrupts it could request a particular number of interrupts by passing
280that number to pci_enable_msix_range() function as both 'minvec' and 'maxvec'
281parameters:
282
283static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
284{
285 return pci_enable_msi_range(adapter->pdev, adapter->msix_entries,
286 nvec, nvec);
287}
288
2894.3.1.3 Specific requirements to the number of MSI-X interrupts
290
291As noted above, there could be devices that can not operate with just any
292number of MSI-X interrupts within a range. E.g., let's assume a device that
293is only capable sending the number of MSI-X interrupts which is a power of
294two. A routine that enables MSI-X mode for such device might look like this:
295
296/*
297 * Assume 'minvec' and 'maxvec' are non-zero
298 */
299static int foo_driver_enable_msix(struct foo_adapter *adapter,
300 int minvec, int maxvec)
301{
302 int rc;
303
304 minvec = roundup_pow_of_two(minvec);
305 maxvec = rounddown_pow_of_two(maxvec);
306
307 if (minvec > maxvec)
308 return -ERANGE;
309
310retry:
311 rc = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
312 maxvec, maxvec);
313 /*
314 * -ENOSPC is the only error code allowed to be analized
315 */
316 if (rc == -ENOSPC) {
317 if (maxvec == 1)
318 return -ENOSPC;
319
320 maxvec /= 2;
321
322 if (minvec > maxvec)
323 return -ENOSPC;
324
325 goto retry;
232 } 326 }
233 327
234 return -ENOSPC; 328 return rc;
235} 329}
236 330
331Note how pci_enable_msix_range() return value is analized for a fallback -
332any error code other than -ENOSPC indicates a fatal error and should not
333be retried.
334
2374.3.2 pci_disable_msix 3354.3.2 pci_disable_msix
238 336
239void pci_disable_msix(struct pci_dev *dev) 337void pci_disable_msix(struct pci_dev *dev)
240 338
241This function should be used to undo the effect of pci_enable_msix(). It frees 339This function should be used to undo the effect of pci_enable_msix_range().
242the previously allocated message signaled interrupts. The interrupts may 340It frees the previously allocated MSI-X interrupts. The interrupts may
243subsequently be assigned to another device, so drivers should not cache 341subsequently be assigned to another device, so drivers should not cache
244the value of the 'vector' elements over a call to pci_disable_msix(). 342the value of the 'vector' elements over a call to pci_disable_msix().
245 343
@@ -255,18 +353,32 @@ MSI-X Table. This address is mapped by the PCI subsystem, and should not
255be accessed directly by the device driver. If the driver wishes to 353be accessed directly by the device driver. If the driver wishes to
256mask or unmask an interrupt, it should call disable_irq() / enable_irq(). 354mask or unmask an interrupt, it should call disable_irq() / enable_irq().
257 355
3564.3.4 pci_msix_vec_count
357
358int pci_msix_vec_count(struct pci_dev *dev)
359
360This function could be used to retrieve number of entries in the device
361MSI-X table.
362
363If this function returns a negative number, it indicates the device is
364not capable of sending MSI-Xs.
365
366If this function returns a positive number, it indicates the maximum
367number of MSI-X interrupt vectors that could be allocated.
368
2584.4 Handling devices implementing both MSI and MSI-X capabilities 3694.4 Handling devices implementing both MSI and MSI-X capabilities
259 370
260If a device implements both MSI and MSI-X capabilities, it can 371If a device implements both MSI and MSI-X capabilities, it can
261run in either MSI mode or MSI-X mode, but not both simultaneously. 372run in either MSI mode or MSI-X mode, but not both simultaneously.
262This is a requirement of the PCI spec, and it is enforced by the 373This is a requirement of the PCI spec, and it is enforced by the
263PCI layer. Calling pci_enable_msi() when MSI-X is already enabled or 374PCI layer. Calling pci_enable_msi_range() when MSI-X is already
264pci_enable_msix() when MSI is already enabled results in an error. 375enabled or pci_enable_msix_range() when MSI is already enabled
265If a device driver wishes to switch between MSI and MSI-X at runtime, 376results in an error. If a device driver wishes to switch between MSI
266it must first quiesce the device, then switch it back to pin-interrupt 377and MSI-X at runtime, it must first quiesce the device, then switch
267mode, before calling pci_enable_msi() or pci_enable_msix() and resuming 378it back to pin-interrupt mode, before calling pci_enable_msi_range()
268operation. This is not expected to be a common operation but may be 379or pci_enable_msix_range() and resuming operation. This is not expected
269useful for debugging or testing during development. 380to be a common operation but may be useful for debugging or testing
381during development.
270 382
2714.5 Considerations when using MSIs 3834.5 Considerations when using MSIs
272 384
@@ -381,5 +493,5 @@ or disabled (0). If 0 is found in any of the msi_bus files belonging
381to bridges between the PCI root and the device, MSIs are disabled. 493to bridges between the PCI root and the device, MSIs are disabled.
382 494
383It is also worth checking the device driver to see whether it supports MSIs. 495It is also worth checking the device driver to see whether it supports MSIs.
384For example, it may contain calls to pci_enable_msi(), pci_enable_msix() or 496For example, it may contain calls to pci_enable_msi_range() or
385pci_enable_msi_block(). 497pci_enable_msix_range().
diff --git a/Documentation/PCI/pci.txt b/Documentation/PCI/pci.txt
index 6f458564d625..9518006f6675 100644
--- a/Documentation/PCI/pci.txt
+++ b/Documentation/PCI/pci.txt
@@ -123,8 +123,10 @@ initialization with a pointer to a structure describing the driver
123 123
124 124
125The ID table is an array of struct pci_device_id entries ending with an 125The ID table is an array of struct pci_device_id entries ending with an
126all-zero entry; use of the macro DEFINE_PCI_DEVICE_TABLE is the preferred 126all-zero entry. Definitions with static const are generally preferred.
127method of declaring the table. Each entry consists of: 127Use of the deprecated macro DEFINE_PCI_DEVICE_TABLE should be avoided.
128
129Each entry consists of:
128 130
129 vendor,device Vendor and device ID to match (or PCI_ANY_ID) 131 vendor,device Vendor and device ID to match (or PCI_ANY_ID)
130 132
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index d5d26d443693..d6fae13ff062 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -19,6 +19,8 @@ Required properties:
19 to define the mapping of the PCIe interface to interrupt 19 to define the mapping of the PCIe interface to interrupt
20 numbers. 20 numbers.
21- num-lanes: number of lanes to use 21- num-lanes: number of lanes to use
22
23Optional properties:
22- reset-gpio: gpio pin number of power good signal 24- reset-gpio: gpio pin number of power good signal
23 25
24Optional properties for fsl,imx6q-pcie 26Optional properties for fsl,imx6q-pcie
diff --git a/MAINTAINERS b/MAINTAINERS
index 8285ed4676b6..624e6516fdd3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6449,19 +6449,52 @@ F: drivers/pci/
6449F: include/linux/pci* 6449F: include/linux/pci*
6450F: arch/x86/pci/ 6450F: arch/x86/pci/
6451 6451
6452PCI DRIVER FOR IMX6
6453M: Richard Zhu <r65037@freescale.com>
6454M: Shawn Guo <shawn.guo@linaro.org>
6455L: linux-pci@vger.kernel.org
6456L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6457S: Maintained
6458F: drivers/pci/host/*imx6*
6459
6460PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
6461M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
6462M: Jason Cooper <jason@lakedaemon.net>
6463L: linux-pci@vger.kernel.org
6464L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6465S: Maintained
6466F: drivers/pci/host/*mvebu*
6467
6452PCI DRIVER FOR NVIDIA TEGRA 6468PCI DRIVER FOR NVIDIA TEGRA
6453M: Thierry Reding <thierry.reding@gmail.com> 6469M: Thierry Reding <thierry.reding@gmail.com>
6454L: linux-tegra@vger.kernel.org 6470L: linux-tegra@vger.kernel.org
6471L: linux-pci@vger.kernel.org
6455S: Supported 6472S: Supported
6456F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt 6473F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
6457F: drivers/pci/host/pci-tegra.c 6474F: drivers/pci/host/pci-tegra.c
6458 6475
6476PCI DRIVER FOR RENESAS R-CAR
6477M: Simon Horman <horms@verge.net.au>
6478L: linux-pci@vger.kernel.org
6479L: linux-sh@vger.kernel.org
6480S: Maintained
6481F: drivers/pci/host/*rcar*
6482
6459PCI DRIVER FOR SAMSUNG EXYNOS 6483PCI DRIVER FOR SAMSUNG EXYNOS
6460M: Jingoo Han <jg1.han@samsung.com> 6484M: Jingoo Han <jg1.han@samsung.com>
6461L: linux-pci@vger.kernel.org 6485L: linux-pci@vger.kernel.org
6486L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6487L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
6462S: Maintained 6488S: Maintained
6463F: drivers/pci/host/pci-exynos.c 6489F: drivers/pci/host/pci-exynos.c
6464 6490
6491PCI DRIVER FOR SYNOPSIS DESIGNWARE
6492M: Mohit Kumar <mohit.kumar@st.com>
6493M: Jingoo Han <jg1.han@samsung.com>
6494L: linux-pci@vger.kernel.org
6495S: Maintained
6496F: drivers/pci/host/*designware*
6497
6465PCMCIA SUBSYSTEM 6498PCMCIA SUBSYSTEM
6466P: Linux PCMCIA Team 6499P: Linux PCMCIA Team
6467L: linux-pcmcia@lists.infradead.org 6500L: linux-pcmcia@lists.infradead.org
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index a21d0ab3b19e..eddee7720343 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -325,7 +325,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
325/* Helper for generic DMA-mapping functions. */ 325/* Helper for generic DMA-mapping functions. */
326static struct pci_dev *alpha_gendev_to_pci(struct device *dev) 326static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
327{ 327{
328 if (dev && dev->bus == &pci_bus_type) 328 if (dev && dev_is_pci(dev))
329 return to_pci_dev(dev); 329 return to_pci_dev(dev);
330 330
331 /* Assume that non-PCI devices asking for DMA are either ISA or EISA, 331 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index 001f4913799c..5114b68e99d5 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -257,7 +257,7 @@ static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t s
257 */ 257 */
258static int it8152_pci_platform_notify(struct device *dev) 258static int it8152_pci_platform_notify(struct device *dev)
259{ 259{
260 if (dev->bus == &pci_bus_type) { 260 if (dev_is_pci(dev)) {
261 if (dev->dma_mask) 261 if (dev->dma_mask)
262 *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; 262 *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
263 dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; 263 dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
@@ -268,7 +268,7 @@ static int it8152_pci_platform_notify(struct device *dev)
268 268
269static int it8152_pci_platform_notify_remove(struct device *dev) 269static int it8152_pci_platform_notify_remove(struct device *dev)
270{ 270{
271 if (dev->bus == &pci_bus_type) 271 if (dev_is_pci(dev))
272 dmabounce_unregister_dev(dev); 272 dmabounce_unregister_dev(dev);
273 273
274 return 0; 274 return 0;
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index 6d6bde3e15fa..200970d56f6d 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -326,7 +326,7 @@ static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t s
326 */ 326 */
327static int ixp4xx_pci_platform_notify(struct device *dev) 327static int ixp4xx_pci_platform_notify(struct device *dev)
328{ 328{
329 if(dev->bus == &pci_bus_type) { 329 if (dev_is_pci(dev)) {
330 *dev->dma_mask = SZ_64M - 1; 330 *dev->dma_mask = SZ_64M - 1;
331 dev->coherent_dma_mask = SZ_64M - 1; 331 dev->coherent_dma_mask = SZ_64M - 1;
332 dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce); 332 dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce);
@@ -336,9 +336,9 @@ static int ixp4xx_pci_platform_notify(struct device *dev)
336 336
337static int ixp4xx_pci_platform_notify_remove(struct device *dev) 337static int ixp4xx_pci_platform_notify_remove(struct device *dev)
338{ 338{
339 if(dev->bus == &pci_bus_type) { 339 if (dev_is_pci(dev))
340 dmabounce_unregister_dev(dev); 340 dmabounce_unregister_dev(dev);
341 } 341
342 return 0; 342 return 0;
343} 343}
344 344
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 4c530a82fc46..8e858b593e4f 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -255,7 +255,7 @@ static u64 prefetch_spill_page;
255#endif 255#endif
256 256
257#ifdef CONFIG_PCI 257#ifdef CONFIG_PCI
258# define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \ 258# define GET_IOC(dev) ((dev_is_pci(dev)) \
259 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL) 259 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
260#else 260#else
261# define GET_IOC(dev) NULL 261# define GET_IOC(dev) NULL
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 3290d6e00c31..d0853e8e8623 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -34,7 +34,7 @@
34 */ 34 */
35static int sn_dma_supported(struct device *dev, u64 mask) 35static int sn_dma_supported(struct device *dev, u64 mask)
36{ 36{
37 BUG_ON(dev->bus != &pci_bus_type); 37 BUG_ON(!dev_is_pci(dev));
38 38
39 if (mask < 0x7fffffff) 39 if (mask < 0x7fffffff)
40 return 0; 40 return 0;
@@ -50,7 +50,7 @@ static int sn_dma_supported(struct device *dev, u64 mask)
50 */ 50 */
51int sn_dma_set_mask(struct device *dev, u64 dma_mask) 51int sn_dma_set_mask(struct device *dev, u64 dma_mask)
52{ 52{
53 BUG_ON(dev->bus != &pci_bus_type); 53 BUG_ON(!dev_is_pci(dev));
54 54
55 if (!sn_dma_supported(dev, dma_mask)) 55 if (!sn_dma_supported(dev, dma_mask))
56 return 0; 56 return 0;
@@ -85,7 +85,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
85 struct pci_dev *pdev = to_pci_dev(dev); 85 struct pci_dev *pdev = to_pci_dev(dev);
86 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 86 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
87 87
88 BUG_ON(dev->bus != &pci_bus_type); 88 BUG_ON(!dev_is_pci(dev));
89 89
90 /* 90 /*
91 * Allocate the memory. 91 * Allocate the memory.
@@ -143,7 +143,7 @@ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr
143 struct pci_dev *pdev = to_pci_dev(dev); 143 struct pci_dev *pdev = to_pci_dev(dev);
144 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 144 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
145 145
146 BUG_ON(dev->bus != &pci_bus_type); 146 BUG_ON(!dev_is_pci(dev));
147 147
148 provider->dma_unmap(pdev, dma_handle, 0); 148 provider->dma_unmap(pdev, dma_handle, 0);
149 free_pages((unsigned long)cpu_addr, get_order(size)); 149 free_pages((unsigned long)cpu_addr, get_order(size));
@@ -187,7 +187,7 @@ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
187 187
188 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); 188 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
189 189
190 BUG_ON(dev->bus != &pci_bus_type); 190 BUG_ON(!dev_is_pci(dev));
191 191
192 phys_addr = __pa(cpu_addr); 192 phys_addr = __pa(cpu_addr);
193 if (dmabarr) 193 if (dmabarr)
@@ -223,7 +223,7 @@ static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
223 struct pci_dev *pdev = to_pci_dev(dev); 223 struct pci_dev *pdev = to_pci_dev(dev);
224 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 224 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
225 225
226 BUG_ON(dev->bus != &pci_bus_type); 226 BUG_ON(!dev_is_pci(dev));
227 227
228 provider->dma_unmap(pdev, dma_addr, dir); 228 provider->dma_unmap(pdev, dma_addr, dir);
229} 229}
@@ -247,7 +247,7 @@ static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
247 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 247 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
248 struct scatterlist *sg; 248 struct scatterlist *sg;
249 249
250 BUG_ON(dev->bus != &pci_bus_type); 250 BUG_ON(!dev_is_pci(dev));
251 251
252 for_each_sg(sgl, sg, nhwentries, i) { 252 for_each_sg(sgl, sg, nhwentries, i) {
253 provider->dma_unmap(pdev, sg->dma_address, dir); 253 provider->dma_unmap(pdev, sg->dma_address, dir);
@@ -284,7 +284,7 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
284 284
285 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); 285 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
286 286
287 BUG_ON(dev->bus != &pci_bus_type); 287 BUG_ON(!dev_is_pci(dev));
288 288
289 /* 289 /*
290 * Setup a DMA address for each entry in the scatterlist. 290 * Setup a DMA address for each entry in the scatterlist.
@@ -323,26 +323,26 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
323static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 323static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
324 size_t size, enum dma_data_direction dir) 324 size_t size, enum dma_data_direction dir)
325{ 325{
326 BUG_ON(dev->bus != &pci_bus_type); 326 BUG_ON(!dev_is_pci(dev));
327} 327}
328 328
329static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 329static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
330 size_t size, 330 size_t size,
331 enum dma_data_direction dir) 331 enum dma_data_direction dir)
332{ 332{
333 BUG_ON(dev->bus != &pci_bus_type); 333 BUG_ON(!dev_is_pci(dev));
334} 334}
335 335
336static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 336static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
337 int nelems, enum dma_data_direction dir) 337 int nelems, enum dma_data_direction dir)
338{ 338{
339 BUG_ON(dev->bus != &pci_bus_type); 339 BUG_ON(!dev_is_pci(dev));
340} 340}
341 341
342static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 342static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
343 int nelems, enum dma_data_direction dir) 343 int nelems, enum dma_data_direction dir)
344{ 344{
345 BUG_ON(dev->bus != &pci_bus_type); 345 BUG_ON(!dev_is_pci(dev));
346} 346}
347 347
348static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 348static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 14285caec71a..dba508fe1683 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -282,18 +282,6 @@ find_pa_parent_type(const struct parisc_device *padev, int type)
282 return NULL; 282 return NULL;
283} 283}
284 284
285#ifdef CONFIG_PCI
286static inline int is_pci_dev(struct device *dev)
287{
288 return dev->bus == &pci_bus_type;
289}
290#else
291static inline int is_pci_dev(struct device *dev)
292{
293 return 0;
294}
295#endif
296
297/* 285/*
298 * get_node_path fills in @path with the firmware path to the device. 286 * get_node_path fills in @path with the firmware path to the device.
299 * Note that if @node is a parisc device, we don't fill in the 'mod' field. 287 * Note that if @node is a parisc device, we don't fill in the 'mod' field.
@@ -306,7 +294,7 @@ static void get_node_path(struct device *dev, struct hardware_path *path)
306 int i = 5; 294 int i = 5;
307 memset(&path->bc, -1, 6); 295 memset(&path->bc, -1, 6);
308 296
309 if (is_pci_dev(dev)) { 297 if (dev_is_pci(dev)) {
310 unsigned int devfn = to_pci_dev(dev)->devfn; 298 unsigned int devfn = to_pci_dev(dev)->devfn;
311 path->mod = PCI_FUNC(devfn); 299 path->mod = PCI_FUNC(devfn);
312 path->bc[i--] = PCI_SLOT(devfn); 300 path->bc[i--] = PCI_SLOT(devfn);
@@ -314,7 +302,7 @@ static void get_node_path(struct device *dev, struct hardware_path *path)
314 } 302 }
315 303
316 while (dev != &root) { 304 while (dev != &root) {
317 if (is_pci_dev(dev)) { 305 if (dev_is_pci(dev)) {
318 unsigned int devfn = to_pci_dev(dev)->devfn; 306 unsigned int devfn = to_pci_dev(dev)->devfn;
319 path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5); 307 path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5);
320 } else if (dev->bus == &parisc_bus_type) { 308 } else if (dev->bus == &parisc_bus_type) {
@@ -695,7 +683,7 @@ static int check_parent(struct device * dev, void * data)
695 if (dev->bus == &parisc_bus_type) { 683 if (dev->bus == &parisc_bus_type) {
696 if (match_parisc_device(dev, d->index, d->modpath)) 684 if (match_parisc_device(dev, d->index, d->modpath))
697 d->dev = dev; 685 d->dev = dev;
698 } else if (is_pci_dev(dev)) { 686 } else if (dev_is_pci(dev)) {
699 if (match_pci_device(dev, d->index, d->modpath)) 687 if (match_pci_device(dev, d->index, d->modpath))
700 d->dev = dev; 688 d->dev = dev;
701 } else if (dev->bus == NULL) { 689 } else if (dev->bus == NULL) {
@@ -753,7 +741,7 @@ struct device *hwpath_to_device(struct hardware_path *modpath)
753 if (!parent) 741 if (!parent)
754 return NULL; 742 return NULL;
755 } 743 }
756 if (is_pci_dev(parent)) /* pci devices already parse MOD */ 744 if (dev_is_pci(parent)) /* pci devices already parse MOD */
757 return parent; 745 return parent;
758 else 746 else
759 return parse_tree_node(parent, 6, modpath); 747 return parse_tree_node(parent, 6, modpath);
@@ -772,7 +760,7 @@ void device_to_hwpath(struct device *dev, struct hardware_path *path)
772 padev = to_parisc_device(dev); 760 padev = to_parisc_device(dev);
773 get_node_path(dev->parent, path); 761 get_node_path(dev->parent, path);
774 path->mod = padev->hw_path; 762 path->mod = padev->hw_path;
775 } else if (is_pci_dev(dev)) { 763 } else if (dev_is_pci(dev)) {
776 get_node_path(dev, path); 764 get_node_path(dev, path);
777 } 765 }
778} 766}
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index bf7c73d71eef..4859c401b75e 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -407,8 +407,8 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
407 struct msi_msg msg; 407 struct msi_msg msg;
408 int rc; 408 int rc;
409 409
410 if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI) 410 if (type == PCI_CAP_ID_MSI && nvec > 1)
411 return -EINVAL; 411 return 1;
412 msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX); 412 msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX);
413 msi_vecs = min_t(unsigned int, msi_vecs, CONFIG_PCI_NR_MSI); 413 msi_vecs = min_t(unsigned int, msi_vecs, CONFIG_PCI_NR_MSI);
414 414
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 070ed141aac7..76663b019eb5 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -854,7 +854,7 @@ int dma_supported(struct device *dev, u64 device_mask)
854 return 1; 854 return 1;
855 855
856#ifdef CONFIG_PCI 856#ifdef CONFIG_PCI
857 if (dev->bus == &pci_bus_type) 857 if (dev_is_pci(dev))
858 return pci64_dma_supported(to_pci_dev(dev), device_mask); 858 return pci64_dma_supported(to_pci_dev(dev), device_mask);
859#endif 859#endif
860 860
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 2096468de9b2..e7e215dfa866 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -666,10 +666,9 @@ EXPORT_SYMBOL(dma_ops);
666 */ 666 */
667int dma_supported(struct device *dev, u64 mask) 667int dma_supported(struct device *dev, u64 mask)
668{ 668{
669#ifdef CONFIG_PCI 669 if (dev_is_pci(dev))
670 if (dev->bus == &pci_bus_type)
671 return 1; 670 return 1;
672#endif 671
673 return 0; 672 return 0;
674} 673}
675EXPORT_SYMBOL(dma_supported); 674EXPORT_SYMBOL(dma_supported);
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 122c299e90c8..1ac6114c9ea5 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -104,7 +104,7 @@ extern void pci_iommu_alloc(void);
104struct msi_desc; 104struct msi_desc;
105int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); 105int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
106void native_teardown_msi_irq(unsigned int irq); 106void native_teardown_msi_irq(unsigned int irq);
107void native_restore_msi_irqs(struct pci_dev *dev, int irq); 107void native_restore_msi_irqs(struct pci_dev *dev);
108int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, 108int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
109 unsigned int irq_base, unsigned int irq_offset); 109 unsigned int irq_base, unsigned int irq_offset);
110#else 110#else
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 0f1be11e43d2..e45e4da96bf1 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -181,7 +181,7 @@ struct x86_msi_ops {
181 u8 hpet_id); 181 u8 hpet_id);
182 void (*teardown_msi_irq)(unsigned int irq); 182 void (*teardown_msi_irq)(unsigned int irq);
183 void (*teardown_msi_irqs)(struct pci_dev *dev); 183 void (*teardown_msi_irqs)(struct pci_dev *dev);
184 void (*restore_msi_irqs)(struct pci_dev *dev, int irq); 184 void (*restore_msi_irqs)(struct pci_dev *dev);
185 int (*setup_hpet_msi)(unsigned int irq, unsigned int id); 185 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
186 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag); 186 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
187 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag); 187 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 6c0b43bd024b..d359d0fffa50 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1034,9 +1034,7 @@ static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger,
1034 1034
1035 if (!acpi_ioapic) 1035 if (!acpi_ioapic)
1036 return 0; 1036 return 0;
1037 if (!dev) 1037 if (!dev || !dev_is_pci(dev))
1038 return 0;
1039 if (dev->bus != &pci_bus_type)
1040 return 0; 1038 return 0;
1041 1039
1042 pdev = to_pci_dev(dev); 1040 pdev = to_pci_dev(dev);
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 021783b1f46a..e48b674639cc 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -136,9 +136,9 @@ void arch_teardown_msi_irq(unsigned int irq)
136 x86_msi.teardown_msi_irq(irq); 136 x86_msi.teardown_msi_irq(irq);
137} 137}
138 138
139void arch_restore_msi_irqs(struct pci_dev *dev, int irq) 139void arch_restore_msi_irqs(struct pci_dev *dev)
140{ 140{
141 x86_msi.restore_msi_irqs(dev, irq); 141 x86_msi.restore_msi_irqs(dev);
142} 142}
143u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) 143u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
144{ 144{
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 5eee4959785d..103e702ec5a7 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -337,7 +337,7 @@ out:
337 return ret; 337 return ret;
338} 338}
339 339
340static void xen_initdom_restore_msi_irqs(struct pci_dev *dev, int irq) 340static void xen_initdom_restore_msi_irqs(struct pci_dev *dev)
341{ 341{
342 int ret = 0; 342 int ret = 0;
343 343
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index e2903d03180e..8516f4d47893 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1095,26 +1095,40 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
1095{} 1095{}
1096#endif 1096#endif
1097 1097
1098int ahci_init_interrupts(struct pci_dev *pdev, struct ahci_host_priv *hpriv) 1098int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
1099 struct ahci_host_priv *hpriv)
1099{ 1100{
1100 int rc; 1101 int rc, nvec;
1101 unsigned int maxvec;
1102 1102
1103 if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) { 1103 if (hpriv->flags & AHCI_HFLAG_NO_MSI)
1104 rc = pci_enable_msi_block_auto(pdev, &maxvec); 1104 goto intx;
1105 if (rc > 0) { 1105
1106 if ((rc == maxvec) || (rc == 1)) 1106 rc = pci_msi_vec_count(pdev);
1107 return rc; 1107 if (rc < 0)
1108 /* 1108 goto intx;
1109 * Assume that advantage of multipe MSIs is negated, 1109
1110 * so fallback to single MSI mode to save resources 1110 /*
1111 */ 1111 * If number of MSIs is less than number of ports then Sharing Last
1112 pci_disable_msi(pdev); 1112 * Message mode could be enforced. In this case assume that advantage
1113 if (!pci_enable_msi(pdev)) 1113 * of multipe MSIs is negated and use single MSI mode instead.
1114 return 1; 1114 */
1115 } 1115 if (rc < n_ports)
1116 } 1116 goto single_msi;
1117
1118 nvec = rc;
1119 rc = pci_enable_msi_block(pdev, nvec);
1120 if (rc)
1121 goto intx;
1117 1122
1123 return nvec;
1124
1125single_msi:
1126 rc = pci_enable_msi(pdev);
1127 if (rc)
1128 goto intx;
1129 return 1;
1130
1131intx:
1118 pci_intx(pdev, 1); 1132 pci_intx(pdev, 1);
1119 return 0; 1133 return 0;
1120} 1134}
@@ -1281,10 +1295,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1281 1295
1282 hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar]; 1296 hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
1283 1297
1284 n_msis = ahci_init_interrupts(pdev, hpriv);
1285 if (n_msis > 1)
1286 hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
1287
1288 /* save initial config */ 1298 /* save initial config */
1289 ahci_pci_save_initial_config(pdev, hpriv); 1299 ahci_pci_save_initial_config(pdev, hpriv);
1290 1300
@@ -1339,6 +1349,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1339 */ 1349 */
1340 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); 1350 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
1341 1351
1352 n_msis = ahci_init_interrupts(pdev, n_ports, hpriv);
1353 if (n_msis > 1)
1354 hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
1355
1342 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 1356 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
1343 if (!host) 1357 if (!host)
1344 return -ENOMEM; 1358 return -ENOMEM;
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
index 272a3ec35957..8842cde69177 100644
--- a/drivers/eisa/eisa-bus.c
+++ b/drivers/eisa/eisa-bus.c
@@ -232,8 +232,10 @@ static int __init eisa_init_device(struct eisa_root_device *root,
232static int __init eisa_register_device(struct eisa_device *edev) 232static int __init eisa_register_device(struct eisa_device *edev)
233{ 233{
234 int rc = device_register(&edev->dev); 234 int rc = device_register(&edev->dev);
235 if (rc) 235 if (rc) {
236 put_device(&edev->dev);
236 return rc; 237 return rc;
238 }
237 239
238 rc = device_create_file(&edev->dev, &dev_attr_signature); 240 rc = device_create_file(&edev->dev, &dev_attr_signature);
239 if (rc) 241 if (rc)
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index b6a99f7a9b20..893503fa1782 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -105,9 +105,10 @@ config PCI_PASID
105 If unsure, say N. 105 If unsure, say N.
106 106
107config PCI_IOAPIC 107config PCI_IOAPIC
108 tristate "PCI IO-APIC hotplug support" if X86 108 bool "PCI IO-APIC hotplug support" if X86
109 depends on PCI 109 depends on PCI
110 depends on ACPI 110 depends on ACPI
111 depends on X86_IO_APIC
111 default !X86 112 default !X86
112 113
113config PCI_LABEL 114config PCI_LABEL
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 6ebf5bf8e7a7..17d2b07ee67c 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \ 5obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \
6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ 6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
7 irq.o vpd.o setup-bus.o 7 irq.o vpd.o setup-bus.o vc.o
8obj-$(CONFIG_PROC_FS) += proc.o 8obj-$(CONFIG_PROC_FS) += proc.o
9obj-$(CONFIG_SYSFS) += slot.o 9obj-$(CONFIG_SYSFS) += slot.o
10 10
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 86fb8ec5e448..00660cc502c5 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -248,6 +248,7 @@ int pci_bus_add_device(struct pci_dev *dev)
248 */ 248 */
249 pci_fixup_device(pci_fixup_final, dev); 249 pci_fixup_device(pci_fixup_final, dev);
250 pci_create_sysfs_dev_files(dev); 250 pci_create_sysfs_dev_files(dev);
251 pci_proc_attach_device(dev);
251 252
252 dev->match_driver = true; 253 dev->match_driver = true;
253 retval = device_attach(&dev->dev); 254 retval = device_attach(&dev->dev);
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index 24beed38ddc7..3de6bfbbe8e9 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -468,7 +468,7 @@ static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
468 int ret; 468 int ret;
469 469
470 exynos_pcie_sideband_dbi_r_mode(pp, true); 470 exynos_pcie_sideband_dbi_r_mode(pp, true);
471 ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val); 471 ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
472 exynos_pcie_sideband_dbi_r_mode(pp, false); 472 exynos_pcie_sideband_dbi_r_mode(pp, false);
473 return ret; 473 return ret;
474} 474}
@@ -479,7 +479,8 @@ static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
479 int ret; 479 int ret;
480 480
481 exynos_pcie_sideband_dbi_w_mode(pp, true); 481 exynos_pcie_sideband_dbi_w_mode(pp, true);
482 ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, val); 482 ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3),
483 where, size, val);
483 exynos_pcie_sideband_dbi_w_mode(pp, false); 484 exynos_pcie_sideband_dbi_w_mode(pp, false);
484 return ret; 485 return ret;
485} 486}
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index bd70af8f31ac..e8663a8c3406 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -44,10 +44,18 @@ struct imx6_pcie {
44 void __iomem *mem_base; 44 void __iomem *mem_base;
45}; 45};
46 46
47/* PCIe Root Complex registers (memory-mapped) */
48#define PCIE_RC_LCR 0x7c
49#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
50#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
51#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
52
47/* PCIe Port Logic registers (memory-mapped) */ 53/* PCIe Port Logic registers (memory-mapped) */
48#define PL_OFFSET 0x700 54#define PL_OFFSET 0x700
49#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) 55#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
50#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) 56#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
57#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
58#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
51 59
52#define PCIE_PHY_CTRL (PL_OFFSET + 0x114) 60#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
53#define PCIE_PHY_CTRL_DATA_LOC 0 61#define PCIE_PHY_CTRL_DATA_LOC 0
@@ -59,6 +67,9 @@ struct imx6_pcie {
59#define PCIE_PHY_STAT (PL_OFFSET + 0x110) 67#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
60#define PCIE_PHY_STAT_ACK_LOC 16 68#define PCIE_PHY_STAT_ACK_LOC 16
61 69
70#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
71#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
72
62/* PHY registers (not memory-mapped) */ 73/* PHY registers (not memory-mapped) */
63#define PCIE_PHY_RX_ASIC_OUT 0x100D 74#define PCIE_PHY_RX_ASIC_OUT 0x100D
64 75
@@ -209,15 +220,9 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
209 220
210 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 221 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
211 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); 222 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
212 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
213 IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
214 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 223 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
215 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); 224 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
216 225
217 gpio_set_value(imx6_pcie->reset_gpio, 0);
218 msleep(100);
219 gpio_set_value(imx6_pcie->reset_gpio, 1);
220
221 return 0; 226 return 0;
222} 227}
223 228
@@ -261,6 +266,12 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
261 /* allow the clocks to stabilize */ 266 /* allow the clocks to stabilize */
262 usleep_range(200, 500); 267 usleep_range(200, 500);
263 268
269 /* Some boards don't have PCIe reset GPIO. */
270 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
271 gpio_set_value(imx6_pcie->reset_gpio, 0);
272 msleep(100);
273 gpio_set_value(imx6_pcie->reset_gpio, 1);
274 }
264 return 0; 275 return 0;
265 276
266err_pcie_axi: 277err_pcie_axi:
@@ -299,11 +310,90 @@ static void imx6_pcie_init_phy(struct pcie_port *pp)
299 IMX6Q_GPR8_TX_SWING_LOW, 127 << 25); 310 IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
300} 311}
301 312
302static void imx6_pcie_host_init(struct pcie_port *pp) 313static int imx6_pcie_wait_for_link(struct pcie_port *pp)
314{
315 int count = 200;
316
317 while (!dw_pcie_link_up(pp)) {
318 usleep_range(100, 1000);
319 if (--count)
320 continue;
321
322 dev_err(pp->dev, "phy link never came up\n");
323 dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
324 readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
325 readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
326 return -EINVAL;
327 }
328
329 return 0;
330}
331
332static int imx6_pcie_start_link(struct pcie_port *pp)
303{ 333{
304 int count = 0;
305 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); 334 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
335 uint32_t tmp;
336 int ret, count;
306 337
338 /*
339 * Force Gen1 operation when starting the link. In case the link is
340 * started in Gen2 mode, there is a possibility the devices on the
341 * bus will not be detected at all. This happens with PCIe switches.
342 */
343 tmp = readl(pp->dbi_base + PCIE_RC_LCR);
344 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
345 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
346 writel(tmp, pp->dbi_base + PCIE_RC_LCR);
347
348 /* Start LTSSM. */
349 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
350 IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
351
352 ret = imx6_pcie_wait_for_link(pp);
353 if (ret)
354 return ret;
355
356 /* Allow Gen2 mode after the link is up. */
357 tmp = readl(pp->dbi_base + PCIE_RC_LCR);
358 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
359 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
360 writel(tmp, pp->dbi_base + PCIE_RC_LCR);
361
362 /*
363 * Start Directed Speed Change so the best possible speed both link
364 * partners support can be negotiated.
365 */
366 tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
367 tmp |= PORT_LOGIC_SPEED_CHANGE;
368 writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
369
370 count = 200;
371 while (count--) {
372 tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
373 /* Test if the speed change finished. */
374 if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
375 break;
376 usleep_range(100, 1000);
377 }
378
379 /* Make sure link training is finished as well! */
380 if (count)
381 ret = imx6_pcie_wait_for_link(pp);
382 else
383 ret = -EINVAL;
384
385 if (ret) {
386 dev_err(pp->dev, "Failed to bring link up!\n");
387 } else {
388 tmp = readl(pp->dbi_base + 0x80);
389 dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
390 }
391
392 return ret;
393}
394
395static void imx6_pcie_host_init(struct pcie_port *pp)
396{
307 imx6_pcie_assert_core_reset(pp); 397 imx6_pcie_assert_core_reset(pp);
308 398
309 imx6_pcie_init_phy(pp); 399 imx6_pcie_init_phy(pp);
@@ -312,33 +402,41 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
312 402
313 dw_pcie_setup_rc(pp); 403 dw_pcie_setup_rc(pp);
314 404
315 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 405 imx6_pcie_start_link(pp);
316 IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); 406}
317 407
318 while (!dw_pcie_link_up(pp)) { 408static void imx6_pcie_reset_phy(struct pcie_port *pp)
319 usleep_range(100, 1000); 409{
320 count++; 410 uint32_t temp;
321 if (count >= 200) { 411
322 dev_err(pp->dev, "phy link never came up\n"); 412 pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
323 dev_dbg(pp->dev, 413 temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
324 "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", 414 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
325 readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), 415 pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
326 readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); 416
327 break; 417 usleep_range(2000, 3000);
328 }
329 }
330 418
331 return; 419 pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
420 temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
421 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
422 pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
332} 423}
333 424
334static int imx6_pcie_link_up(struct pcie_port *pp) 425static int imx6_pcie_link_up(struct pcie_port *pp)
335{ 426{
336 u32 rc, ltssm, rx_valid, temp; 427 u32 rc, ltssm, rx_valid;
337 428
338 /* link is debug bit 36, debug register 1 starts at bit 32 */ 429 /*
339 rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32)); 430 * Test if the PHY reports that the link is up and also that
340 if (rc) 431 * the link training finished. It might happen that the PHY
341 return -EAGAIN; 432 * reports the link is already up, but the link training bit
433 * is still set, so make sure to check the training is done
434 * as well here.
435 */
436 rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
437 if ((rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP) &&
438 !(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
439 return 1;
342 440
343 /* 441 /*
344 * From L0, initiate MAC entry to gen2 if EP/RC supports gen2. 442 * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
@@ -358,21 +456,7 @@ static int imx6_pcie_link_up(struct pcie_port *pp)
358 456
359 dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n"); 457 dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
360 458
361 pcie_phy_read(pp->dbi_base, 459 imx6_pcie_reset_phy(pp);
362 PHY_RX_OVRD_IN_LO, &temp);
363 temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN
364 | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
365 pcie_phy_write(pp->dbi_base,
366 PHY_RX_OVRD_IN_LO, temp);
367
368 usleep_range(2000, 3000);
369
370 pcie_phy_read(pp->dbi_base,
371 PHY_RX_OVRD_IN_LO, &temp);
372 temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN
373 | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
374 pcie_phy_write(pp->dbi_base,
375 PHY_RX_OVRD_IN_LO, temp);
376 460
377 return 0; 461 return 0;
378} 462}
@@ -426,30 +510,19 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
426 "imprecise external abort"); 510 "imprecise external abort");
427 511
428 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 512 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
429 if (!dbi_base) {
430 dev_err(&pdev->dev, "dbi_base memory resource not found\n");
431 return -ENODEV;
432 }
433
434 pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base); 513 pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
435 if (IS_ERR(pp->dbi_base)) { 514 if (IS_ERR(pp->dbi_base))
436 ret = PTR_ERR(pp->dbi_base); 515 return PTR_ERR(pp->dbi_base);
437 goto err;
438 }
439 516
440 /* Fetch GPIOs */ 517 /* Fetch GPIOs */
441 imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); 518 imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
442 if (!gpio_is_valid(imx6_pcie->reset_gpio)) { 519 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
443 dev_err(&pdev->dev, "no reset-gpio defined\n"); 520 ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
444 ret = -ENODEV; 521 GPIOF_OUT_INIT_LOW, "PCIe reset");
445 } 522 if (ret) {
446 ret = devm_gpio_request_one(&pdev->dev, 523 dev_err(&pdev->dev, "unable to get reset gpio\n");
447 imx6_pcie->reset_gpio, 524 return ret;
448 GPIOF_OUT_INIT_LOW, 525 }
449 "PCIe reset");
450 if (ret) {
451 dev_err(&pdev->dev, "unable to get reset gpio\n");
452 goto err;
453 } 526 }
454 527
455 imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0); 528 imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0);
@@ -460,7 +533,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
460 "PCIe power enable"); 533 "PCIe power enable");
461 if (ret) { 534 if (ret) {
462 dev_err(&pdev->dev, "unable to get power-on gpio\n"); 535 dev_err(&pdev->dev, "unable to get power-on gpio\n");
463 goto err; 536 return ret;
464 } 537 }
465 } 538 }
466 539
@@ -472,7 +545,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
472 "PCIe wake up"); 545 "PCIe wake up");
473 if (ret) { 546 if (ret) {
474 dev_err(&pdev->dev, "unable to get wake-up gpio\n"); 547 dev_err(&pdev->dev, "unable to get wake-up gpio\n");
475 goto err; 548 return ret;
476 } 549 }
477 } 550 }
478 551
@@ -484,7 +557,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
484 "PCIe disable endpoint"); 557 "PCIe disable endpoint");
485 if (ret) { 558 if (ret) {
486 dev_err(&pdev->dev, "unable to get disable-ep gpio\n"); 559 dev_err(&pdev->dev, "unable to get disable-ep gpio\n");
487 goto err; 560 return ret;
488 } 561 }
489 } 562 }
490 563
@@ -493,32 +566,28 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
493 if (IS_ERR(imx6_pcie->lvds_gate)) { 566 if (IS_ERR(imx6_pcie->lvds_gate)) {
494 dev_err(&pdev->dev, 567 dev_err(&pdev->dev,
495 "lvds_gate clock select missing or invalid\n"); 568 "lvds_gate clock select missing or invalid\n");
496 ret = PTR_ERR(imx6_pcie->lvds_gate); 569 return PTR_ERR(imx6_pcie->lvds_gate);
497 goto err;
498 } 570 }
499 571
500 imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m"); 572 imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m");
501 if (IS_ERR(imx6_pcie->sata_ref_100m)) { 573 if (IS_ERR(imx6_pcie->sata_ref_100m)) {
502 dev_err(&pdev->dev, 574 dev_err(&pdev->dev,
503 "sata_ref_100m clock source missing or invalid\n"); 575 "sata_ref_100m clock source missing or invalid\n");
504 ret = PTR_ERR(imx6_pcie->sata_ref_100m); 576 return PTR_ERR(imx6_pcie->sata_ref_100m);
505 goto err;
506 } 577 }
507 578
508 imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m"); 579 imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m");
509 if (IS_ERR(imx6_pcie->pcie_ref_125m)) { 580 if (IS_ERR(imx6_pcie->pcie_ref_125m)) {
510 dev_err(&pdev->dev, 581 dev_err(&pdev->dev,
511 "pcie_ref_125m clock source missing or invalid\n"); 582 "pcie_ref_125m clock source missing or invalid\n");
512 ret = PTR_ERR(imx6_pcie->pcie_ref_125m); 583 return PTR_ERR(imx6_pcie->pcie_ref_125m);
513 goto err;
514 } 584 }
515 585
516 imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi"); 586 imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi");
517 if (IS_ERR(imx6_pcie->pcie_axi)) { 587 if (IS_ERR(imx6_pcie->pcie_axi)) {
518 dev_err(&pdev->dev, 588 dev_err(&pdev->dev,
519 "pcie_axi clock source missing or invalid\n"); 589 "pcie_axi clock source missing or invalid\n");
520 ret = PTR_ERR(imx6_pcie->pcie_axi); 590 return PTR_ERR(imx6_pcie->pcie_axi);
521 goto err;
522 } 591 }
523 592
524 /* Grab GPR config register range */ 593 /* Grab GPR config register range */
@@ -526,19 +595,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
526 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); 595 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
527 if (IS_ERR(imx6_pcie->iomuxc_gpr)) { 596 if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
528 dev_err(&pdev->dev, "unable to find iomuxc registers\n"); 597 dev_err(&pdev->dev, "unable to find iomuxc registers\n");
529 ret = PTR_ERR(imx6_pcie->iomuxc_gpr); 598 return PTR_ERR(imx6_pcie->iomuxc_gpr);
530 goto err;
531 } 599 }
532 600
533 ret = imx6_add_pcie_port(pp, pdev); 601 ret = imx6_add_pcie_port(pp, pdev);
534 if (ret < 0) 602 if (ret < 0)
535 goto err; 603 return ret;
536 604
537 platform_set_drvdata(pdev, imx6_pcie); 605 platform_set_drvdata(pdev, imx6_pcie);
538 return 0; 606 return 0;
539
540err:
541 return ret;
542} 607}
543 608
544static const struct of_device_id imx6_pcie_of_match[] = { 609static const struct of_device_id imx6_pcie_of_match[] = {
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index c269e430c760..13478ecd4113 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -150,6 +150,11 @@ static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
150 return readl(port->base + reg); 150 return readl(port->base + reg);
151} 151}
152 152
153static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
154{
155 return port->io_target != -1 && port->io_attr != -1;
156}
157
153static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) 158static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
154{ 159{
155 return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); 160 return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
@@ -300,7 +305,8 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
300 305
301 /* Are the new iobase/iolimit values invalid? */ 306 /* Are the new iobase/iolimit values invalid? */
302 if (port->bridge.iolimit < port->bridge.iobase || 307 if (port->bridge.iolimit < port->bridge.iobase ||
303 port->bridge.iolimitupper < port->bridge.iobaseupper) { 308 port->bridge.iolimitupper < port->bridge.iobaseupper ||
309 !(port->bridge.command & PCI_COMMAND_IO)) {
304 310
305 /* If a window was configured, remove it */ 311 /* If a window was configured, remove it */
306 if (port->iowin_base) { 312 if (port->iowin_base) {
@@ -313,6 +319,12 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
313 return; 319 return;
314 } 320 }
315 321
322 if (!mvebu_has_ioport(port)) {
323 dev_WARN(&port->pcie->pdev->dev,
324 "Attempt to set IO when IO is disabled\n");
325 return;
326 }
327
316 /* 328 /*
317 * We read the PCI-to-PCI bridge emulated registers, and 329 * We read the PCI-to-PCI bridge emulated registers, and
318 * calculate the base address and size of the address decoding 330 * calculate the base address and size of the address decoding
@@ -330,14 +342,13 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
330 mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr, 342 mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr,
331 port->iowin_base, port->iowin_size, 343 port->iowin_base, port->iowin_size,
332 iobase); 344 iobase);
333
334 pci_ioremap_io(iobase, port->iowin_base);
335} 345}
336 346
337static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) 347static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
338{ 348{
339 /* Are the new membase/memlimit values invalid? */ 349 /* Are the new membase/memlimit values invalid? */
340 if (port->bridge.memlimit < port->bridge.membase) { 350 if (port->bridge.memlimit < port->bridge.membase ||
351 !(port->bridge.command & PCI_COMMAND_MEMORY)) {
341 352
342 /* If a window was configured, remove it */ 353 /* If a window was configured, remove it */
343 if (port->memwin_base) { 354 if (port->memwin_base) {
@@ -426,9 +437,12 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
426 break; 437 break;
427 438
428 case PCI_IO_BASE: 439 case PCI_IO_BASE:
429 *value = (bridge->secondary_status << 16 | 440 if (!mvebu_has_ioport(port))
430 bridge->iolimit << 8 | 441 *value = bridge->secondary_status << 16;
431 bridge->iobase); 442 else
443 *value = (bridge->secondary_status << 16 |
444 bridge->iolimit << 8 |
445 bridge->iobase);
432 break; 446 break;
433 447
434 case PCI_MEMORY_BASE: 448 case PCI_MEMORY_BASE:
@@ -447,6 +461,11 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
447 *value = 0; 461 *value = 0;
448 break; 462 break;
449 463
464 case PCI_INTERRUPT_LINE:
465 /* LINE PIN MIN_GNT MAX_LAT */
466 *value = 0;
467 break;
468
450 default: 469 default:
451 *value = 0xffffffff; 470 *value = 0xffffffff;
452 return PCIBIOS_BAD_REGISTER_NUMBER; 471 return PCIBIOS_BAD_REGISTER_NUMBER;
@@ -485,8 +504,19 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
485 504
486 switch (where & ~3) { 505 switch (where & ~3) {
487 case PCI_COMMAND: 506 case PCI_COMMAND:
507 {
508 u32 old = bridge->command;
509
510 if (!mvebu_has_ioport(port))
511 value &= ~PCI_COMMAND_IO;
512
488 bridge->command = value & 0xffff; 513 bridge->command = value & 0xffff;
514 if ((old ^ bridge->command) & PCI_COMMAND_IO)
515 mvebu_pcie_handle_iobase_change(port);
516 if ((old ^ bridge->command) & PCI_COMMAND_MEMORY)
517 mvebu_pcie_handle_membase_change(port);
489 break; 518 break;
519 }
490 520
491 case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1: 521 case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
492 bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value; 522 bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value;
@@ -500,7 +530,6 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
500 */ 530 */
501 bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32; 531 bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32;
502 bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32; 532 bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32;
503 bridge->secondary_status = value >> 16;
504 mvebu_pcie_handle_iobase_change(port); 533 mvebu_pcie_handle_iobase_change(port);
505 break; 534 break;
506 535
@@ -651,7 +680,9 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
651 struct mvebu_pcie *pcie = sys_to_pcie(sys); 680 struct mvebu_pcie *pcie = sys_to_pcie(sys);
652 int i; 681 int i;
653 682
654 pci_add_resource_offset(&sys->resources, &pcie->realio, sys->io_offset); 683 if (resource_size(&pcie->realio) != 0)
684 pci_add_resource_offset(&sys->resources, &pcie->realio,
685 sys->io_offset);
655 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); 686 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
656 pci_add_resource(&sys->resources, &pcie->busn); 687 pci_add_resource(&sys->resources, &pcie->busn);
657 688
@@ -702,9 +733,9 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
702 * aligned on their size 733 * aligned on their size
703 */ 734 */
704 if (res->flags & IORESOURCE_IO) 735 if (res->flags & IORESOURCE_IO)
705 return round_up(start, max((resource_size_t)SZ_64K, size)); 736 return round_up(start, max_t(resource_size_t, SZ_64K, size));
706 else if (res->flags & IORESOURCE_MEM) 737 else if (res->flags & IORESOURCE_MEM)
707 return round_up(start, max((resource_size_t)SZ_1M, size)); 738 return round_up(start, max_t(resource_size_t, SZ_1M, size));
708 else 739 else
709 return start; 740 return start;
710} 741}
@@ -752,12 +783,17 @@ static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
752#define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF) 783#define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
753 784
754static int mvebu_get_tgt_attr(struct device_node *np, int devfn, 785static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
755 unsigned long type, int *tgt, int *attr) 786 unsigned long type,
787 unsigned int *tgt,
788 unsigned int *attr)
756{ 789{
757 const int na = 3, ns = 2; 790 const int na = 3, ns = 2;
758 const __be32 *range; 791 const __be32 *range;
759 int rlen, nranges, rangesz, pna, i; 792 int rlen, nranges, rangesz, pna, i;
760 793
794 *tgt = -1;
795 *attr = -1;
796
761 range = of_get_property(np, "ranges", &rlen); 797 range = of_get_property(np, "ranges", &rlen);
762 if (!range) 798 if (!range)
763 return -EINVAL; 799 return -EINVAL;
@@ -827,16 +863,15 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
827 } 863 }
828 864
829 mvebu_mbus_get_pcie_io_aperture(&pcie->io); 865 mvebu_mbus_get_pcie_io_aperture(&pcie->io);
830 if (resource_size(&pcie->io) == 0) {
831 dev_err(&pdev->dev, "invalid I/O aperture size\n");
832 return -EINVAL;
833 }
834 866
835 pcie->realio.flags = pcie->io.flags; 867 if (resource_size(&pcie->io) != 0) {
836 pcie->realio.start = PCIBIOS_MIN_IO; 868 pcie->realio.flags = pcie->io.flags;
837 pcie->realio.end = min_t(resource_size_t, 869 pcie->realio.start = PCIBIOS_MIN_IO;
838 IO_SPACE_LIMIT, 870 pcie->realio.end = min_t(resource_size_t,
839 resource_size(&pcie->io)); 871 IO_SPACE_LIMIT,
872 resource_size(&pcie->io));
873 } else
874 pcie->realio = pcie->io;
840 875
841 /* Get the bus range */ 876 /* Get the bus range */
842 ret = of_pci_parse_bus_range(np, &pcie->busn); 877 ret = of_pci_parse_bus_range(np, &pcie->busn);
@@ -895,12 +930,12 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
895 continue; 930 continue;
896 } 931 }
897 932
898 ret = mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_IO, 933 if (resource_size(&pcie->io) != 0)
899 &port->io_target, &port->io_attr); 934 mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_IO,
900 if (ret < 0) { 935 &port->io_target, &port->io_attr);
901 dev_err(&pdev->dev, "PCIe%d.%d: cannot get tgt/attr for io window\n", 936 else {
902 port->port, port->lane); 937 port->io_target = -1;
903 continue; 938 port->io_attr = -1;
904 } 939 }
905 940
906 port->reset_gpio = of_get_named_gpio_flags(child, 941 port->reset_gpio = of_get_named_gpio_flags(child,
@@ -949,14 +984,6 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
949 984
950 mvebu_pcie_set_local_dev_nr(port, 1); 985 mvebu_pcie_set_local_dev_nr(port, 1);
951 986
952 port->clk = of_clk_get_by_name(child, NULL);
953 if (IS_ERR(port->clk)) {
954 dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n",
955 port->port, port->lane);
956 iounmap(port->base);
957 continue;
958 }
959
960 port->dn = child; 987 port->dn = child;
961 spin_lock_init(&port->conf_lock); 988 spin_lock_init(&port->conf_lock);
962 mvebu_sw_pci_bridge_init(port); 989 mvebu_sw_pci_bridge_init(port);
@@ -964,6 +991,10 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
964 } 991 }
965 992
966 pcie->nports = i; 993 pcie->nports = i;
994
995 for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K)
996 pci_ioremap_io(i, pcie->io.start + i);
997
967 mvebu_pcie_msi_enable(pcie); 998 mvebu_pcie_msi_enable(pcie);
968 mvebu_pcie_enable(pcie); 999 mvebu_pcie_enable(pcie);
969 1000
@@ -983,8 +1014,7 @@ static struct platform_driver mvebu_pcie_driver = {
983 .driver = { 1014 .driver = {
984 .owner = THIS_MODULE, 1015 .owner = THIS_MODULE,
985 .name = "mvebu-pcie", 1016 .name = "mvebu-pcie",
986 .of_match_table = 1017 .of_match_table = mvebu_pcie_of_match_table,
987 of_match_ptr(mvebu_pcie_of_match_table),
988 /* driver unloading/unbinding currently not supported */ 1018 /* driver unloading/unbinding currently not supported */
989 .suppress_bind_attrs = true, 1019 .suppress_bind_attrs = true,
990 }, 1020 },
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index cbaa5c4397e3..ceec147baec3 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/pm_runtime.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
21 22
22/* AHB-PCI Bridge PCI communication registers */ 23/* AHB-PCI Bridge PCI communication registers */
@@ -77,6 +78,7 @@
77#define RCAR_PCI_NR_CONTROLLERS 3 78#define RCAR_PCI_NR_CONTROLLERS 3
78 79
79struct rcar_pci_priv { 80struct rcar_pci_priv {
81 struct device *dev;
80 void __iomem *reg; 82 void __iomem *reg;
81 struct resource io_res; 83 struct resource io_res;
82 struct resource mem_res; 84 struct resource mem_res;
@@ -169,8 +171,11 @@ static int __init rcar_pci_setup(int nr, struct pci_sys_data *sys)
169 void __iomem *reg = priv->reg; 171 void __iomem *reg = priv->reg;
170 u32 val; 172 u32 val;
171 173
174 pm_runtime_enable(priv->dev);
175 pm_runtime_get_sync(priv->dev);
176
172 val = ioread32(reg + RCAR_PCI_UNIT_REV_REG); 177 val = ioread32(reg + RCAR_PCI_UNIT_REV_REG);
173 pr_info("PCI: bus%u revision %x\n", sys->busnr, val); 178 dev_info(priv->dev, "PCI: bus%u revision %x\n", sys->busnr, val);
174 179
175 /* Disable Direct Power Down State and assert reset */ 180 /* Disable Direct Power Down State and assert reset */
176 val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD; 181 val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD;
@@ -276,8 +281,8 @@ static int __init rcar_pci_probe(struct platform_device *pdev)
276 281
277 cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 282 cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
278 reg = devm_ioremap_resource(&pdev->dev, cfg_res); 283 reg = devm_ioremap_resource(&pdev->dev, cfg_res);
279 if (!reg) 284 if (IS_ERR(reg))
280 return -ENODEV; 285 return PTR_ERR(reg);
281 286
282 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 287 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
283 if (!mem_res || !mem_res->start) 288 if (!mem_res || !mem_res->start)
@@ -301,6 +306,7 @@ static int __init rcar_pci_probe(struct platform_device *pdev)
301 306
302 priv->irq = platform_get_irq(pdev, 0); 307 priv->irq = platform_get_irq(pdev, 0);
303 priv->reg = reg; 308 priv->reg = reg;
309 priv->dev = &pdev->dev;
304 310
305 return rcar_pci_add_controller(priv); 311 return rcar_pci_add_controller(priv);
306} 312}
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 0afbbbc55c81..b8ba2f794559 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -805,7 +805,7 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
805 afi_writel(pcie, value, AFI_PCIE_CONFIG); 805 afi_writel(pcie, value, AFI_PCIE_CONFIG);
806 806
807 value = afi_readl(pcie, AFI_FUSE); 807 value = afi_readl(pcie, AFI_FUSE);
808 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS; 808 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
809 afi_writel(pcie, value, AFI_FUSE); 809 afi_writel(pcie, value, AFI_FUSE);
810 810
811 /* initialize internal PHY, enable up to 16 PCIE lanes */ 811 /* initialize internal PHY, enable up to 16 PCIE lanes */
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index e33b68be0391..17ce88f79d2b 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -74,7 +74,7 @@ static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
74 return sys->private_data; 74 return sys->private_data;
75} 75}
76 76
77int cfg_read(void __iomem *addr, int where, int size, u32 *val) 77int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val)
78{ 78{
79 *val = readl(addr); 79 *val = readl(addr);
80 80
@@ -88,7 +88,7 @@ int cfg_read(void __iomem *addr, int where, int size, u32 *val)
88 return PCIBIOS_SUCCESSFUL; 88 return PCIBIOS_SUCCESSFUL;
89} 89}
90 90
91int cfg_write(void __iomem *addr, int where, int size, u32 val) 91int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val)
92{ 92{
93 if (size == 4) 93 if (size == 4)
94 writel(val, addr); 94 writel(val, addr);
@@ -126,7 +126,8 @@ static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
126 if (pp->ops->rd_own_conf) 126 if (pp->ops->rd_own_conf)
127 ret = pp->ops->rd_own_conf(pp, where, size, val); 127 ret = pp->ops->rd_own_conf(pp, where, size, val);
128 else 128 else
129 ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val); 129 ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where,
130 size, val);
130 131
131 return ret; 132 return ret;
132} 133}
@@ -139,8 +140,8 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
139 if (pp->ops->wr_own_conf) 140 if (pp->ops->wr_own_conf)
140 ret = pp->ops->wr_own_conf(pp, where, size, val); 141 ret = pp->ops->wr_own_conf(pp, where, size, val);
141 else 142 else
142 ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, 143 ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), where,
143 val); 144 size, val);
144 145
145 return ret; 146 return ret;
146} 147}
@@ -167,11 +168,13 @@ void dw_handle_msi_irq(struct pcie_port *pp)
167 while ((pos = find_next_bit(&val, 32, pos)) != 32) { 168 while ((pos = find_next_bit(&val, 32, pos)) != 32) {
168 irq = irq_find_mapping(pp->irq_domain, 169 irq = irq_find_mapping(pp->irq_domain,
169 i * 32 + pos); 170 i * 32 + pos);
171 dw_pcie_wr_own_conf(pp,
172 PCIE_MSI_INTR0_STATUS + i * 12,
173 4, 1 << pos);
170 generic_handle_irq(irq); 174 generic_handle_irq(irq);
171 pos++; 175 pos++;
172 } 176 }
173 } 177 }
174 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, val);
175 } 178 }
176} 179}
177 180
@@ -209,6 +212,23 @@ static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
209 return 0; 212 return 0;
210} 213}
211 214
215static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
216 unsigned int nvec, unsigned int pos)
217{
218 unsigned int i, res, bit, val;
219
220 for (i = 0; i < nvec; i++) {
221 irq_set_msi_desc_off(irq_base, i, NULL);
222 clear_bit(pos + i, pp->msi_irq_in_use);
223 /* Disable corresponding interrupt on MSI controller */
224 res = ((pos + i) / 32) * 12;
225 bit = (pos + i) % 32;
226 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
227 val &= ~(1 << bit);
228 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
229 }
230}
231
212static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) 232static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
213{ 233{
214 int res, bit, irq, pos0, pos1, i; 234 int res, bit, irq, pos0, pos1, i;
@@ -242,18 +262,25 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
242 if (!irq) 262 if (!irq)
243 goto no_valid_irq; 263 goto no_valid_irq;
244 264
245 i = 0; 265 /*
246 while (i < no_irqs) { 266 * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
267 * descs so there is no need to allocate descs here. We can therefore
268 * assume that if irq_find_mapping above returns non-zero, then the
269 * descs are also successfully allocated.
270 */
271
272 for (i = 0; i < no_irqs; i++) {
273 if (irq_set_msi_desc_off(irq, i, desc) != 0) {
274 clear_irq_range(pp, irq, i, pos0);
275 goto no_valid_irq;
276 }
247 set_bit(pos0 + i, pp->msi_irq_in_use); 277 set_bit(pos0 + i, pp->msi_irq_in_use);
248 irq_alloc_descs((irq + i), (irq + i), 1, 0);
249 irq_set_msi_desc(irq + i, desc);
250 /*Enable corresponding interrupt in MSI interrupt controller */ 278 /*Enable corresponding interrupt in MSI interrupt controller */
251 res = ((pos0 + i) / 32) * 12; 279 res = ((pos0 + i) / 32) * 12;
252 bit = (pos0 + i) % 32; 280 bit = (pos0 + i) % 32;
253 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); 281 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
254 val |= 1 << bit; 282 val |= 1 << bit;
255 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); 283 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
256 i++;
257 } 284 }
258 285
259 *pos = pos0; 286 *pos = pos0;
@@ -266,7 +293,7 @@ no_valid_irq:
266 293
267static void clear_irq(unsigned int irq) 294static void clear_irq(unsigned int irq)
268{ 295{
269 int res, bit, val, pos; 296 unsigned int pos, nvec;
270 struct irq_desc *desc; 297 struct irq_desc *desc;
271 struct msi_desc *msi; 298 struct msi_desc *msi;
272 struct pcie_port *pp; 299 struct pcie_port *pp;
@@ -281,18 +308,15 @@ static void clear_irq(unsigned int irq)
281 return; 308 return;
282 } 309 }
283 310
311 /* undo what was done in assign_irq */
284 pos = data->hwirq; 312 pos = data->hwirq;
313 nvec = 1 << msi->msi_attrib.multiple;
285 314
286 irq_free_desc(irq); 315 clear_irq_range(pp, irq, nvec, pos);
287
288 clear_bit(pos, pp->msi_irq_in_use);
289 316
290 /* Disable corresponding interrupt on MSI interrupt controller */ 317 /* all irqs cleared; reset attributes */
291 res = (pos / 32) * 12; 318 msi->irq = 0;
292 bit = pos % 32; 319 msi->msi_attrib.multiple = 0;
293 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
294 val &= ~(1 << bit);
295 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
296} 320}
297 321
298static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, 322static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
@@ -320,10 +344,10 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
320 if (irq < 0) 344 if (irq < 0)
321 return irq; 345 return irq;
322 346
323 msg_ctr &= ~PCI_MSI_FLAGS_QSIZE; 347 /*
324 msg_ctr |= msgvec << 4; 348 * write_msi_msg() will update PCI_MSI_FLAGS so there is
325 pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS, 349 * no need to explicitly call pci_write_config_word().
326 msg_ctr); 350 */
327 desc->msi_attrib.multiple = msgvec; 351 desc->msi_attrib.multiple = msgvec;
328 352
329 msg.address_lo = virt_to_phys((void *)pp->msi_data); 353 msg.address_lo = virt_to_phys((void *)pp->msi_data);
@@ -394,6 +418,7 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
394 + global_io_offset); 418 + global_io_offset);
395 pp->config.io_size = resource_size(&pp->io); 419 pp->config.io_size = resource_size(&pp->io);
396 pp->config.io_bus_addr = range.pci_addr; 420 pp->config.io_bus_addr = range.pci_addr;
421 pp->io_base = range.cpu_addr;
397 } 422 }
398 if (restype == IORESOURCE_MEM) { 423 if (restype == IORESOURCE_MEM) {
399 of_pci_range_to_resource(&range, np, &pp->mem); 424 of_pci_range_to_resource(&range, np, &pp->mem);
@@ -419,7 +444,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
419 444
420 pp->cfg0_base = pp->cfg.start; 445 pp->cfg0_base = pp->cfg.start;
421 pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size; 446 pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
422 pp->io_base = pp->io.start;
423 pp->mem_base = pp->mem.start; 447 pp->mem_base = pp->mem.start;
424 448
425 pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base, 449 pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
@@ -551,11 +575,13 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
551 575
552 if (bus->parent->number == pp->root_bus_nr) { 576 if (bus->parent->number == pp->root_bus_nr) {
553 dw_pcie_prog_viewport_cfg0(pp, busdev); 577 dw_pcie_prog_viewport_cfg0(pp, busdev);
554 ret = cfg_read(pp->va_cfg0_base + address, where, size, val); 578 ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size,
579 val);
555 dw_pcie_prog_viewport_mem_outbound(pp); 580 dw_pcie_prog_viewport_mem_outbound(pp);
556 } else { 581 } else {
557 dw_pcie_prog_viewport_cfg1(pp, busdev); 582 dw_pcie_prog_viewport_cfg1(pp, busdev);
558 ret = cfg_read(pp->va_cfg1_base + address, where, size, val); 583 ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size,
584 val);
559 dw_pcie_prog_viewport_io_outbound(pp); 585 dw_pcie_prog_viewport_io_outbound(pp);
560 } 586 }
561 587
@@ -574,18 +600,19 @@ static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
574 600
575 if (bus->parent->number == pp->root_bus_nr) { 601 if (bus->parent->number == pp->root_bus_nr) {
576 dw_pcie_prog_viewport_cfg0(pp, busdev); 602 dw_pcie_prog_viewport_cfg0(pp, busdev);
577 ret = cfg_write(pp->va_cfg0_base + address, where, size, val); 603 ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size,
604 val);
578 dw_pcie_prog_viewport_mem_outbound(pp); 605 dw_pcie_prog_viewport_mem_outbound(pp);
579 } else { 606 } else {
580 dw_pcie_prog_viewport_cfg1(pp, busdev); 607 dw_pcie_prog_viewport_cfg1(pp, busdev);
581 ret = cfg_write(pp->va_cfg1_base + address, where, size, val); 608 ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size,
609 val);
582 dw_pcie_prog_viewport_io_outbound(pp); 610 dw_pcie_prog_viewport_io_outbound(pp);
583 } 611 }
584 612
585 return ret; 613 return ret;
586} 614}
587 615
588
589static int dw_pcie_valid_config(struct pcie_port *pp, 616static int dw_pcie_valid_config(struct pcie_port *pp,
590 struct pci_bus *bus, int dev) 617 struct pci_bus *bus, int dev)
591{ 618{
@@ -679,7 +706,7 @@ static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
679 706
680 if (global_io_offset < SZ_1M && pp->config.io_size > 0) { 707 if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
681 sys->io_offset = global_io_offset - pp->config.io_bus_addr; 708 sys->io_offset = global_io_offset - pp->config.io_bus_addr;
682 pci_ioremap_io(sys->io_offset, pp->io.start); 709 pci_ioremap_io(global_io_offset, pp->io_base);
683 global_io_offset += SZ_64K; 710 global_io_offset += SZ_64K;
684 pci_add_resource_offset(&sys->resources, &pp->io, 711 pci_add_resource_offset(&sys->resources, &pp->io,
685 sys->io_offset); 712 sys->io_offset);
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index c15379be2372..3063b3594d88 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -66,8 +66,8 @@ struct pcie_host_ops {
66 void (*host_init)(struct pcie_port *pp); 66 void (*host_init)(struct pcie_port *pp);
67}; 67};
68 68
69int cfg_read(void __iomem *addr, int where, int size, u32 *val); 69int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
70int cfg_write(void __iomem *addr, int where, int size, u32 val); 70int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val);
71void dw_handle_msi_irq(struct pcie_port *pp); 71void dw_handle_msi_irq(struct pcie_port *pp);
72void dw_pcie_msi_init(struct pcie_port *pp); 72void dw_pcie_msi_init(struct pcie_port *pp);
73int dw_pcie_link_up(struct pcie_port *pp); 73int dw_pcie_link_up(struct pcie_port *pp);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 21e865ded1dc..ffe6a6b336cf 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -140,15 +140,15 @@ struct controller *pcie_init(struct pcie_device *dev);
140int pcie_init_notification(struct controller *ctrl); 140int pcie_init_notification(struct controller *ctrl);
141int pciehp_enable_slot(struct slot *p_slot); 141int pciehp_enable_slot(struct slot *p_slot);
142int pciehp_disable_slot(struct slot *p_slot); 142int pciehp_disable_slot(struct slot *p_slot);
143int pcie_enable_notification(struct controller *ctrl); 143void pcie_enable_notification(struct controller *ctrl);
144int pciehp_power_on_slot(struct slot *slot); 144int pciehp_power_on_slot(struct slot *slot);
145int pciehp_power_off_slot(struct slot *slot); 145void pciehp_power_off_slot(struct slot *slot);
146int pciehp_get_power_status(struct slot *slot, u8 *status); 146void pciehp_get_power_status(struct slot *slot, u8 *status);
147int pciehp_get_attention_status(struct slot *slot, u8 *status); 147void pciehp_get_attention_status(struct slot *slot, u8 *status);
148 148
149int pciehp_set_attention_status(struct slot *slot, u8 status); 149void pciehp_set_attention_status(struct slot *slot, u8 status);
150int pciehp_get_latch_status(struct slot *slot, u8 *status); 150void pciehp_get_latch_status(struct slot *slot, u8 *status);
151int pciehp_get_adapter_status(struct slot *slot, u8 *status); 151void pciehp_get_adapter_status(struct slot *slot, u8 *status);
152int pciehp_query_power_fault(struct slot *slot); 152int pciehp_query_power_fault(struct slot *slot);
153void pciehp_green_led_on(struct slot *slot); 153void pciehp_green_led_on(struct slot *slot);
154void pciehp_green_led_off(struct slot *slot); 154void pciehp_green_led_off(struct slot *slot);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index bbd48bbe4e9b..143a389d81fa 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -160,7 +160,8 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
160 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 160 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
161 __func__, slot_name(slot)); 161 __func__, slot_name(slot));
162 162
163 return pciehp_set_attention_status(slot, status); 163 pciehp_set_attention_status(slot, status);
164 return 0;
164} 165}
165 166
166 167
@@ -192,7 +193,8 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
192 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 193 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
193 __func__, slot_name(slot)); 194 __func__, slot_name(slot));
194 195
195 return pciehp_get_power_status(slot, value); 196 pciehp_get_power_status(slot, value);
197 return 0;
196} 198}
197 199
198static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) 200static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
@@ -202,7 +204,8 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
202 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 204 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
203 __func__, slot_name(slot)); 205 __func__, slot_name(slot));
204 206
205 return pciehp_get_attention_status(slot, value); 207 pciehp_get_attention_status(slot, value);
208 return 0;
206} 209}
207 210
208static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) 211static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
@@ -212,7 +215,8 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
212 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 215 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
213 __func__, slot_name(slot)); 216 __func__, slot_name(slot));
214 217
215 return pciehp_get_latch_status(slot, value); 218 pciehp_get_latch_status(slot, value);
219 return 0;
216} 220}
217 221
218static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) 222static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
@@ -222,7 +226,8 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
222 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 226 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
223 __func__, slot_name(slot)); 227 __func__, slot_name(slot));
224 228
225 return pciehp_get_adapter_status(slot, value); 229 pciehp_get_adapter_status(slot, value);
230 return 0;
226} 231}
227 232
228static int reset_slot(struct hotplug_slot *hotplug_slot, int probe) 233static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 38f018679175..50628487597d 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -158,11 +158,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
158{ 158{
159 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ 159 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/
160 if (POWER_CTRL(ctrl)) { 160 if (POWER_CTRL(ctrl)) {
161 if (pciehp_power_off_slot(pslot)) { 161 pciehp_power_off_slot(pslot);
162 ctrl_err(ctrl, 162
163 "Issue of Slot Power Off command failed\n");
164 return;
165 }
166 /* 163 /*
167 * After turning power off, we must wait for at least 1 second 164 * After turning power off, we must wait for at least 1 second
168 * before taking any action that relies on power having been 165 * before taking any action that relies on power having been
@@ -171,16 +168,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
171 msleep(1000); 168 msleep(1000);
172 } 169 }
173 170
174 if (PWR_LED(ctrl)) 171 pciehp_green_led_off(pslot);
175 pciehp_green_led_off(pslot); 172 pciehp_set_attention_status(pslot, 1);
176
177 if (ATTN_LED(ctrl)) {
178 if (pciehp_set_attention_status(pslot, 1)) {
179 ctrl_err(ctrl,
180 "Issue of Set Attention Led command failed\n");
181 return;
182 }
183 }
184} 173}
185 174
186/** 175/**
@@ -203,8 +192,7 @@ static int board_added(struct slot *p_slot)
203 return retval; 192 return retval;
204 } 193 }
205 194
206 if (PWR_LED(ctrl)) 195 pciehp_green_led_blink(p_slot);
207 pciehp_green_led_blink(p_slot);
208 196
209 /* Check link training status */ 197 /* Check link training status */
210 retval = pciehp_check_link_status(ctrl); 198 retval = pciehp_check_link_status(ctrl);
@@ -227,9 +215,7 @@ static int board_added(struct slot *p_slot)
227 goto err_exit; 215 goto err_exit;
228 } 216 }
229 217
230 if (PWR_LED(ctrl)) 218 pciehp_green_led_on(p_slot);
231 pciehp_green_led_on(p_slot);
232
233 return 0; 219 return 0;
234 220
235err_exit: 221err_exit:
@@ -243,7 +229,7 @@ err_exit:
243 */ 229 */
244static int remove_board(struct slot *p_slot) 230static int remove_board(struct slot *p_slot)
245{ 231{
246 int retval = 0; 232 int retval;
247 struct controller *ctrl = p_slot->ctrl; 233 struct controller *ctrl = p_slot->ctrl;
248 234
249 retval = pciehp_unconfigure_device(p_slot); 235 retval = pciehp_unconfigure_device(p_slot);
@@ -251,13 +237,8 @@ static int remove_board(struct slot *p_slot)
251 return retval; 237 return retval;
252 238
253 if (POWER_CTRL(ctrl)) { 239 if (POWER_CTRL(ctrl)) {
254 /* power off slot */ 240 pciehp_power_off_slot(p_slot);
255 retval = pciehp_power_off_slot(p_slot); 241
256 if (retval) {
257 ctrl_err(ctrl,
258 "Issue of Slot Disable command failed\n");
259 return retval;
260 }
261 /* 242 /*
262 * After turning power off, we must wait for at least 1 second 243 * After turning power off, we must wait for at least 1 second
263 * before taking any action that relies on power having been 244 * before taking any action that relies on power having been
@@ -267,9 +248,7 @@ static int remove_board(struct slot *p_slot)
267 } 248 }
268 249
269 /* turn off Green LED */ 250 /* turn off Green LED */
270 if (PWR_LED(ctrl)) 251 pciehp_green_led_off(p_slot);
271 pciehp_green_led_off(p_slot);
272
273 return 0; 252 return 0;
274} 253}
275 254
@@ -305,7 +284,7 @@ static void pciehp_power_thread(struct work_struct *work)
305 break; 284 break;
306 case POWERON_STATE: 285 case POWERON_STATE:
307 mutex_unlock(&p_slot->lock); 286 mutex_unlock(&p_slot->lock);
308 if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl)) 287 if (pciehp_enable_slot(p_slot))
309 pciehp_green_led_off(p_slot); 288 pciehp_green_led_off(p_slot);
310 mutex_lock(&p_slot->lock); 289 mutex_lock(&p_slot->lock);
311 p_slot->state = STATIC_STATE; 290 p_slot->state = STATIC_STATE;
@@ -372,11 +351,8 @@ static void handle_button_press_event(struct slot *p_slot)
372 "press.\n", slot_name(p_slot)); 351 "press.\n", slot_name(p_slot));
373 } 352 }
374 /* blink green LED and turn off amber */ 353 /* blink green LED and turn off amber */
375 if (PWR_LED(ctrl)) 354 pciehp_green_led_blink(p_slot);
376 pciehp_green_led_blink(p_slot); 355 pciehp_set_attention_status(p_slot, 0);
377 if (ATTN_LED(ctrl))
378 pciehp_set_attention_status(p_slot, 0);
379
380 queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); 356 queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
381 break; 357 break;
382 case BLINKINGOFF_STATE: 358 case BLINKINGOFF_STATE:
@@ -389,14 +365,11 @@ static void handle_button_press_event(struct slot *p_slot)
389 ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot)); 365 ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot));
390 cancel_delayed_work(&p_slot->work); 366 cancel_delayed_work(&p_slot->work);
391 if (p_slot->state == BLINKINGOFF_STATE) { 367 if (p_slot->state == BLINKINGOFF_STATE) {
392 if (PWR_LED(ctrl)) 368 pciehp_green_led_on(p_slot);
393 pciehp_green_led_on(p_slot);
394 } else { 369 } else {
395 if (PWR_LED(ctrl)) 370 pciehp_green_led_off(p_slot);
396 pciehp_green_led_off(p_slot);
397 } 371 }
398 if (ATTN_LED(ctrl)) 372 pciehp_set_attention_status(p_slot, 0);
399 pciehp_set_attention_status(p_slot, 0);
400 ctrl_info(ctrl, "PCI slot #%s - action canceled " 373 ctrl_info(ctrl, "PCI slot #%s - action canceled "
401 "due to button press\n", slot_name(p_slot)); 374 "due to button press\n", slot_name(p_slot));
402 p_slot->state = STATIC_STATE; 375 p_slot->state = STATIC_STATE;
@@ -456,10 +429,8 @@ static void interrupt_event_handler(struct work_struct *work)
456 case INT_POWER_FAULT: 429 case INT_POWER_FAULT:
457 if (!POWER_CTRL(ctrl)) 430 if (!POWER_CTRL(ctrl))
458 break; 431 break;
459 if (ATTN_LED(ctrl)) 432 pciehp_set_attention_status(p_slot, 1);
460 pciehp_set_attention_status(p_slot, 1); 433 pciehp_green_led_off(p_slot);
461 if (PWR_LED(ctrl))
462 pciehp_green_led_off(p_slot);
463 break; 434 break;
464 case INT_PRESENCE_ON: 435 case INT_PRESENCE_ON:
465 case INT_PRESENCE_OFF: 436 case INT_PRESENCE_OFF:
@@ -482,14 +453,14 @@ int pciehp_enable_slot(struct slot *p_slot)
482 int rc; 453 int rc;
483 struct controller *ctrl = p_slot->ctrl; 454 struct controller *ctrl = p_slot->ctrl;
484 455
485 rc = pciehp_get_adapter_status(p_slot, &getstatus); 456 pciehp_get_adapter_status(p_slot, &getstatus);
486 if (rc || !getstatus) { 457 if (!getstatus) {
487 ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); 458 ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
488 return -ENODEV; 459 return -ENODEV;
489 } 460 }
490 if (MRL_SENS(p_slot->ctrl)) { 461 if (MRL_SENS(p_slot->ctrl)) {
491 rc = pciehp_get_latch_status(p_slot, &getstatus); 462 pciehp_get_latch_status(p_slot, &getstatus);
492 if (rc || getstatus) { 463 if (getstatus) {
493 ctrl_info(ctrl, "Latch open on slot(%s)\n", 464 ctrl_info(ctrl, "Latch open on slot(%s)\n",
494 slot_name(p_slot)); 465 slot_name(p_slot));
495 return -ENODEV; 466 return -ENODEV;
@@ -497,8 +468,8 @@ int pciehp_enable_slot(struct slot *p_slot)
497 } 468 }
498 469
499 if (POWER_CTRL(p_slot->ctrl)) { 470 if (POWER_CTRL(p_slot->ctrl)) {
500 rc = pciehp_get_power_status(p_slot, &getstatus); 471 pciehp_get_power_status(p_slot, &getstatus);
501 if (rc || getstatus) { 472 if (getstatus) {
502 ctrl_info(ctrl, "Already enabled on slot(%s)\n", 473 ctrl_info(ctrl, "Already enabled on slot(%s)\n",
503 slot_name(p_slot)); 474 slot_name(p_slot));
504 return -EINVAL; 475 return -EINVAL;
@@ -518,15 +489,14 @@ int pciehp_enable_slot(struct slot *p_slot)
518int pciehp_disable_slot(struct slot *p_slot) 489int pciehp_disable_slot(struct slot *p_slot)
519{ 490{
520 u8 getstatus = 0; 491 u8 getstatus = 0;
521 int ret = 0;
522 struct controller *ctrl = p_slot->ctrl; 492 struct controller *ctrl = p_slot->ctrl;
523 493
524 if (!p_slot->ctrl) 494 if (!p_slot->ctrl)
525 return 1; 495 return 1;
526 496
527 if (!HP_SUPR_RM(p_slot->ctrl)) { 497 if (!HP_SUPR_RM(p_slot->ctrl)) {
528 ret = pciehp_get_adapter_status(p_slot, &getstatus); 498 pciehp_get_adapter_status(p_slot, &getstatus);
529 if (ret || !getstatus) { 499 if (!getstatus) {
530 ctrl_info(ctrl, "No adapter on slot(%s)\n", 500 ctrl_info(ctrl, "No adapter on slot(%s)\n",
531 slot_name(p_slot)); 501 slot_name(p_slot));
532 return -ENODEV; 502 return -ENODEV;
@@ -534,8 +504,8 @@ int pciehp_disable_slot(struct slot *p_slot)
534 } 504 }
535 505
536 if (MRL_SENS(p_slot->ctrl)) { 506 if (MRL_SENS(p_slot->ctrl)) {
537 ret = pciehp_get_latch_status(p_slot, &getstatus); 507 pciehp_get_latch_status(p_slot, &getstatus);
538 if (ret || getstatus) { 508 if (getstatus) {
539 ctrl_info(ctrl, "Latch open on slot(%s)\n", 509 ctrl_info(ctrl, "Latch open on slot(%s)\n",
540 slot_name(p_slot)); 510 slot_name(p_slot));
541 return -ENODEV; 511 return -ENODEV;
@@ -543,8 +513,8 @@ int pciehp_disable_slot(struct slot *p_slot)
543 } 513 }
544 514
545 if (POWER_CTRL(p_slot->ctrl)) { 515 if (POWER_CTRL(p_slot->ctrl)) {
546 ret = pciehp_get_power_status(p_slot, &getstatus); 516 pciehp_get_power_status(p_slot, &getstatus);
547 if (ret || !getstatus) { 517 if (!getstatus) {
548 ctrl_info(ctrl, "Already disabled on slot(%s)\n", 518 ctrl_info(ctrl, "Already disabled on slot(%s)\n",
549 slot_name(p_slot)); 519 slot_name(p_slot));
550 return -EINVAL; 520 return -EINVAL;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 3eea3fdd4b0b..14acfccb7670 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -41,34 +41,11 @@
41#include "../pci.h" 41#include "../pci.h"
42#include "pciehp.h" 42#include "pciehp.h"
43 43
44static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) 44static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
45{ 45{
46 struct pci_dev *dev = ctrl->pcie->port; 46 return ctrl->pcie->port;
47 return pcie_capability_read_word(dev, reg, value);
48} 47}
49 48
50static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value)
51{
52 struct pci_dev *dev = ctrl->pcie->port;
53 return pcie_capability_read_dword(dev, reg, value);
54}
55
56static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value)
57{
58 struct pci_dev *dev = ctrl->pcie->port;
59 return pcie_capability_write_word(dev, reg, value);
60}
61
62static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
63{
64 struct pci_dev *dev = ctrl->pcie->port;
65 return pcie_capability_write_dword(dev, reg, value);
66}
67
68/* Power Control Command */
69#define POWER_ON 0
70#define POWER_OFF PCI_EXP_SLTCTL_PCC
71
72static irqreturn_t pcie_isr(int irq, void *dev_id); 49static irqreturn_t pcie_isr(int irq, void *dev_id);
73static void start_int_poll_timer(struct controller *ctrl, int sec); 50static void start_int_poll_timer(struct controller *ctrl, int sec);
74 51
@@ -129,20 +106,23 @@ static inline void pciehp_free_irq(struct controller *ctrl)
129 106
130static int pcie_poll_cmd(struct controller *ctrl) 107static int pcie_poll_cmd(struct controller *ctrl)
131{ 108{
109 struct pci_dev *pdev = ctrl_dev(ctrl);
132 u16 slot_status; 110 u16 slot_status;
133 int err, timeout = 1000; 111 int timeout = 1000;
134 112
135 err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 113 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
136 if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) { 114 if (slot_status & PCI_EXP_SLTSTA_CC) {
137 pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); 115 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
116 PCI_EXP_SLTSTA_CC);
138 return 1; 117 return 1;
139 } 118 }
140 while (timeout > 0) { 119 while (timeout > 0) {
141 msleep(10); 120 msleep(10);
142 timeout -= 10; 121 timeout -= 10;
143 err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 122 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
144 if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) { 123 if (slot_status & PCI_EXP_SLTSTA_CC) {
145 pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); 124 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
125 PCI_EXP_SLTSTA_CC);
146 return 1; 126 return 1;
147 } 127 }
148 } 128 }
@@ -169,21 +149,15 @@ static void pcie_wait_cmd(struct controller *ctrl, int poll)
169 * @cmd: command value written to slot control register 149 * @cmd: command value written to slot control register
170 * @mask: bitmask of slot control register to be modified 150 * @mask: bitmask of slot control register to be modified
171 */ 151 */
172static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) 152static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
173{ 153{
174 int retval = 0; 154 struct pci_dev *pdev = ctrl_dev(ctrl);
175 u16 slot_status; 155 u16 slot_status;
176 u16 slot_ctrl; 156 u16 slot_ctrl;
177 157
178 mutex_lock(&ctrl->ctrl_lock); 158 mutex_lock(&ctrl->ctrl_lock);
179 159
180 retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 160 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
181 if (retval) {
182 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
183 __func__);
184 goto out;
185 }
186
187 if (slot_status & PCI_EXP_SLTSTA_CC) { 161 if (slot_status & PCI_EXP_SLTSTA_CC) {
188 if (!ctrl->no_cmd_complete) { 162 if (!ctrl->no_cmd_complete) {
189 /* 163 /*
@@ -207,24 +181,17 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
207 } 181 }
208 } 182 }
209 183
210 retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl); 184 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
211 if (retval) {
212 ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
213 goto out;
214 }
215
216 slot_ctrl &= ~mask; 185 slot_ctrl &= ~mask;
217 slot_ctrl |= (cmd & mask); 186 slot_ctrl |= (cmd & mask);
218 ctrl->cmd_busy = 1; 187 ctrl->cmd_busy = 1;
219 smp_mb(); 188 smp_mb();
220 retval = pciehp_writew(ctrl, PCI_EXP_SLTCTL, slot_ctrl); 189 pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
221 if (retval)
222 ctrl_err(ctrl, "Cannot write to SLOTCTRL register\n");
223 190
224 /* 191 /*
225 * Wait for command completion. 192 * Wait for command completion.
226 */ 193 */
227 if (!retval && !ctrl->no_cmd_complete) { 194 if (!ctrl->no_cmd_complete) {
228 int poll = 0; 195 int poll = 0;
229 /* 196 /*
230 * if hotplug interrupt is not enabled or command 197 * if hotplug interrupt is not enabled or command
@@ -236,19 +203,16 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
236 poll = 1; 203 poll = 1;
237 pcie_wait_cmd(ctrl, poll); 204 pcie_wait_cmd(ctrl, poll);
238 } 205 }
239 out:
240 mutex_unlock(&ctrl->ctrl_lock); 206 mutex_unlock(&ctrl->ctrl_lock);
241 return retval;
242} 207}
243 208
244static bool check_link_active(struct controller *ctrl) 209static bool check_link_active(struct controller *ctrl)
245{ 210{
246 bool ret = false; 211 struct pci_dev *pdev = ctrl_dev(ctrl);
247 u16 lnk_status; 212 u16 lnk_status;
213 bool ret;
248 214
249 if (pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status)) 215 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
250 return ret;
251
252 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA); 216 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
253 217
254 if (ret) 218 if (ret)
@@ -311,9 +275,9 @@ static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
311 275
312int pciehp_check_link_status(struct controller *ctrl) 276int pciehp_check_link_status(struct controller *ctrl)
313{ 277{
278 struct pci_dev *pdev = ctrl_dev(ctrl);
279 bool found;
314 u16 lnk_status; 280 u16 lnk_status;
315 int retval = 0;
316 bool found = false;
317 281
318 /* 282 /*
319 * Data Link Layer Link Active Reporting must be capable for 283 * Data Link Layer Link Active Reporting must be capable for
@@ -330,52 +294,37 @@ int pciehp_check_link_status(struct controller *ctrl)
330 found = pci_bus_check_dev(ctrl->pcie->port->subordinate, 294 found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
331 PCI_DEVFN(0, 0)); 295 PCI_DEVFN(0, 0));
332 296
333 retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); 297 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
334 if (retval) {
335 ctrl_err(ctrl, "Cannot read LNKSTATUS register\n");
336 return retval;
337 }
338
339 ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); 298 ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
340 if ((lnk_status & PCI_EXP_LNKSTA_LT) || 299 if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
341 !(lnk_status & PCI_EXP_LNKSTA_NLW)) { 300 !(lnk_status & PCI_EXP_LNKSTA_NLW)) {
342 ctrl_err(ctrl, "Link Training Error occurs \n"); 301 ctrl_err(ctrl, "Link Training Error occurs \n");
343 retval = -1; 302 return -1;
344 return retval;
345 } 303 }
346 304
347 pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); 305 pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
348 306
349 if (!found && !retval) 307 if (!found)
350 retval = -1; 308 return -1;
351 309
352 return retval; 310 return 0;
353} 311}
354 312
355static int __pciehp_link_set(struct controller *ctrl, bool enable) 313static int __pciehp_link_set(struct controller *ctrl, bool enable)
356{ 314{
315 struct pci_dev *pdev = ctrl_dev(ctrl);
357 u16 lnk_ctrl; 316 u16 lnk_ctrl;
358 int retval = 0;
359 317
360 retval = pciehp_readw(ctrl, PCI_EXP_LNKCTL, &lnk_ctrl); 318 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
361 if (retval) {
362 ctrl_err(ctrl, "Cannot read LNKCTRL register\n");
363 return retval;
364 }
365 319
366 if (enable) 320 if (enable)
367 lnk_ctrl &= ~PCI_EXP_LNKCTL_LD; 321 lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
368 else 322 else
369 lnk_ctrl |= PCI_EXP_LNKCTL_LD; 323 lnk_ctrl |= PCI_EXP_LNKCTL_LD;
370 324
371 retval = pciehp_writew(ctrl, PCI_EXP_LNKCTL, lnk_ctrl); 325 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
372 if (retval) {
373 ctrl_err(ctrl, "Cannot write LNKCTRL register\n");
374 return retval;
375 }
376 ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl); 326 ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
377 327 return 0;
378 return retval;
379} 328}
380 329
381static int pciehp_link_enable(struct controller *ctrl) 330static int pciehp_link_enable(struct controller *ctrl)
@@ -388,223 +337,165 @@ static int pciehp_link_disable(struct controller *ctrl)
388 return __pciehp_link_set(ctrl, false); 337 return __pciehp_link_set(ctrl, false);
389} 338}
390 339
391int pciehp_get_attention_status(struct slot *slot, u8 *status) 340void pciehp_get_attention_status(struct slot *slot, u8 *status)
392{ 341{
393 struct controller *ctrl = slot->ctrl; 342 struct controller *ctrl = slot->ctrl;
343 struct pci_dev *pdev = ctrl_dev(ctrl);
394 u16 slot_ctrl; 344 u16 slot_ctrl;
395 u8 atten_led_state;
396 int retval = 0;
397
398 retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl);
399 if (retval) {
400 ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
401 return retval;
402 }
403 345
346 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
404 ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__, 347 ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
405 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl); 348 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
406 349
407 atten_led_state = (slot_ctrl & PCI_EXP_SLTCTL_AIC) >> 6; 350 switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) {
408 351 case PCI_EXP_SLTCTL_ATTN_IND_ON:
409 switch (atten_led_state) {
410 case 0:
411 *status = 0xFF; /* Reserved */
412 break;
413 case 1:
414 *status = 1; /* On */ 352 *status = 1; /* On */
415 break; 353 break;
416 case 2: 354 case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
417 *status = 2; /* Blink */ 355 *status = 2; /* Blink */
418 break; 356 break;
419 case 3: 357 case PCI_EXP_SLTCTL_ATTN_IND_OFF:
420 *status = 0; /* Off */ 358 *status = 0; /* Off */
421 break; 359 break;
422 default: 360 default:
423 *status = 0xFF; 361 *status = 0xFF;
424 break; 362 break;
425 } 363 }
426
427 return 0;
428} 364}
429 365
430int pciehp_get_power_status(struct slot *slot, u8 *status) 366void pciehp_get_power_status(struct slot *slot, u8 *status)
431{ 367{
432 struct controller *ctrl = slot->ctrl; 368 struct controller *ctrl = slot->ctrl;
369 struct pci_dev *pdev = ctrl_dev(ctrl);
433 u16 slot_ctrl; 370 u16 slot_ctrl;
434 u8 pwr_state;
435 int retval = 0;
436 371
437 retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl); 372 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
438 if (retval) {
439 ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
440 return retval;
441 }
442 ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__, 373 ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
443 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl); 374 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
444 375
445 pwr_state = (slot_ctrl & PCI_EXP_SLTCTL_PCC) >> 10; 376 switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) {
446 377 case PCI_EXP_SLTCTL_PWR_ON:
447 switch (pwr_state) { 378 *status = 1; /* On */
448 case 0:
449 *status = 1;
450 break; 379 break;
451 case 1: 380 case PCI_EXP_SLTCTL_PWR_OFF:
452 *status = 0; 381 *status = 0; /* Off */
453 break; 382 break;
454 default: 383 default:
455 *status = 0xFF; 384 *status = 0xFF;
456 break; 385 break;
457 } 386 }
458
459 return retval;
460} 387}
461 388
462int pciehp_get_latch_status(struct slot *slot, u8 *status) 389void pciehp_get_latch_status(struct slot *slot, u8 *status)
463{ 390{
464 struct controller *ctrl = slot->ctrl; 391 struct pci_dev *pdev = ctrl_dev(slot->ctrl);
465 u16 slot_status; 392 u16 slot_status;
466 int retval;
467 393
468 retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 394 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
469 if (retval) {
470 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
471 __func__);
472 return retval;
473 }
474 *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS); 395 *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
475 return 0;
476} 396}
477 397
478int pciehp_get_adapter_status(struct slot *slot, u8 *status) 398void pciehp_get_adapter_status(struct slot *slot, u8 *status)
479{ 399{
480 struct controller *ctrl = slot->ctrl; 400 struct pci_dev *pdev = ctrl_dev(slot->ctrl);
481 u16 slot_status; 401 u16 slot_status;
482 int retval;
483 402
484 retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 403 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
485 if (retval) {
486 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
487 __func__);
488 return retval;
489 }
490 *status = !!(slot_status & PCI_EXP_SLTSTA_PDS); 404 *status = !!(slot_status & PCI_EXP_SLTSTA_PDS);
491 return 0;
492} 405}
493 406
494int pciehp_query_power_fault(struct slot *slot) 407int pciehp_query_power_fault(struct slot *slot)
495{ 408{
496 struct controller *ctrl = slot->ctrl; 409 struct pci_dev *pdev = ctrl_dev(slot->ctrl);
497 u16 slot_status; 410 u16 slot_status;
498 int retval;
499 411
500 retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 412 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
501 if (retval) {
502 ctrl_err(ctrl, "Cannot check for power fault\n");
503 return retval;
504 }
505 return !!(slot_status & PCI_EXP_SLTSTA_PFD); 413 return !!(slot_status & PCI_EXP_SLTSTA_PFD);
506} 414}
507 415
508int pciehp_set_attention_status(struct slot *slot, u8 value) 416void pciehp_set_attention_status(struct slot *slot, u8 value)
509{ 417{
510 struct controller *ctrl = slot->ctrl; 418 struct controller *ctrl = slot->ctrl;
511 u16 slot_cmd; 419 u16 slot_cmd;
512 u16 cmd_mask;
513 420
514 cmd_mask = PCI_EXP_SLTCTL_AIC; 421 if (!ATTN_LED(ctrl))
422 return;
423
515 switch (value) { 424 switch (value) {
516 case 0 : /* turn off */ 425 case 0 : /* turn off */
517 slot_cmd = 0x00C0; 426 slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_OFF;
518 break; 427 break;
519 case 1: /* turn on */ 428 case 1: /* turn on */
520 slot_cmd = 0x0040; 429 slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_ON;
521 break; 430 break;
522 case 2: /* turn blink */ 431 case 2: /* turn blink */
523 slot_cmd = 0x0080; 432 slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_BLINK;
524 break; 433 break;
525 default: 434 default:
526 return -EINVAL; 435 return;
527 } 436 }
528 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 437 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
529 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 438 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
530 return pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 439 pcie_write_cmd(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
531} 440}
532 441
533void pciehp_green_led_on(struct slot *slot) 442void pciehp_green_led_on(struct slot *slot)
534{ 443{
535 struct controller *ctrl = slot->ctrl; 444 struct controller *ctrl = slot->ctrl;
536 u16 slot_cmd;
537 u16 cmd_mask;
538 445
539 slot_cmd = 0x0100; 446 if (!PWR_LED(ctrl))
540 cmd_mask = PCI_EXP_SLTCTL_PIC; 447 return;
541 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 448
449 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, PCI_EXP_SLTCTL_PIC);
542 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 450 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
543 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 451 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
452 PCI_EXP_SLTCTL_PWR_IND_ON);
544} 453}
545 454
546void pciehp_green_led_off(struct slot *slot) 455void pciehp_green_led_off(struct slot *slot)
547{ 456{
548 struct controller *ctrl = slot->ctrl; 457 struct controller *ctrl = slot->ctrl;
549 u16 slot_cmd;
550 u16 cmd_mask;
551 458
552 slot_cmd = 0x0300; 459 if (!PWR_LED(ctrl))
553 cmd_mask = PCI_EXP_SLTCTL_PIC; 460 return;
554 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 461
462 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, PCI_EXP_SLTCTL_PIC);
555 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 463 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
556 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 464 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
465 PCI_EXP_SLTCTL_PWR_IND_OFF);
557} 466}
558 467
559void pciehp_green_led_blink(struct slot *slot) 468void pciehp_green_led_blink(struct slot *slot)
560{ 469{
561 struct controller *ctrl = slot->ctrl; 470 struct controller *ctrl = slot->ctrl;
562 u16 slot_cmd;
563 u16 cmd_mask;
564 471
565 slot_cmd = 0x0200; 472 if (!PWR_LED(ctrl))
566 cmd_mask = PCI_EXP_SLTCTL_PIC; 473 return;
567 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 474
475 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, PCI_EXP_SLTCTL_PIC);
568 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 476 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
569 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 477 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
478 PCI_EXP_SLTCTL_PWR_IND_BLINK);
570} 479}
571 480
572int pciehp_power_on_slot(struct slot * slot) 481int pciehp_power_on_slot(struct slot * slot)
573{ 482{
574 struct controller *ctrl = slot->ctrl; 483 struct controller *ctrl = slot->ctrl;
575 u16 slot_cmd; 484 struct pci_dev *pdev = ctrl_dev(ctrl);
576 u16 cmd_mask;
577 u16 slot_status; 485 u16 slot_status;
578 int retval = 0; 486 int retval;
579 487
580 /* Clear sticky power-fault bit from previous power failures */ 488 /* Clear sticky power-fault bit from previous power failures */
581 retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 489 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
582 if (retval) { 490 if (slot_status & PCI_EXP_SLTSTA_PFD)
583 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", 491 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
584 __func__); 492 PCI_EXP_SLTSTA_PFD);
585 return retval;
586 }
587 slot_status &= PCI_EXP_SLTSTA_PFD;
588 if (slot_status) {
589 retval = pciehp_writew(ctrl, PCI_EXP_SLTSTA, slot_status);
590 if (retval) {
591 ctrl_err(ctrl,
592 "%s: Cannot write to SLOTSTATUS register\n",
593 __func__);
594 return retval;
595 }
596 }
597 ctrl->power_fault_detected = 0; 493 ctrl->power_fault_detected = 0;
598 494
599 slot_cmd = POWER_ON; 495 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC);
600 cmd_mask = PCI_EXP_SLTCTL_PCC;
601 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
602 if (retval) {
603 ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd);
604 return retval;
605 }
606 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 496 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
607 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 497 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
498 PCI_EXP_SLTCTL_PWR_ON);
608 499
609 retval = pciehp_link_enable(ctrl); 500 retval = pciehp_link_enable(ctrl);
610 if (retval) 501 if (retval)
@@ -613,12 +504,9 @@ int pciehp_power_on_slot(struct slot * slot)
613 return retval; 504 return retval;
614} 505}
615 506
616int pciehp_power_off_slot(struct slot * slot) 507void pciehp_power_off_slot(struct slot * slot)
617{ 508{
618 struct controller *ctrl = slot->ctrl; 509 struct controller *ctrl = slot->ctrl;
619 u16 slot_cmd;
620 u16 cmd_mask;
621 int retval;
622 510
623 /* Disable the link at first */ 511 /* Disable the link at first */
624 pciehp_link_disable(ctrl); 512 pciehp_link_disable(ctrl);
@@ -628,21 +516,16 @@ int pciehp_power_off_slot(struct slot * slot)
628 else 516 else
629 msleep(1000); 517 msleep(1000);
630 518
631 slot_cmd = POWER_OFF; 519 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
632 cmd_mask = PCI_EXP_SLTCTL_PCC;
633 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
634 if (retval) {
635 ctrl_err(ctrl, "Write command failed!\n");
636 return retval;
637 }
638 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 520 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
639 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 521 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
640 return 0; 522 PCI_EXP_SLTCTL_PWR_OFF);
641} 523}
642 524
643static irqreturn_t pcie_isr(int irq, void *dev_id) 525static irqreturn_t pcie_isr(int irq, void *dev_id)
644{ 526{
645 struct controller *ctrl = (struct controller *)dev_id; 527 struct controller *ctrl = (struct controller *)dev_id;
528 struct pci_dev *pdev = ctrl_dev(ctrl);
646 struct slot *slot = ctrl->slot; 529 struct slot *slot = ctrl->slot;
647 u16 detected, intr_loc; 530 u16 detected, intr_loc;
648 531
@@ -653,11 +536,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
653 */ 536 */
654 intr_loc = 0; 537 intr_loc = 0;
655 do { 538 do {
656 if (pciehp_readw(ctrl, PCI_EXP_SLTSTA, &detected)) { 539 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &detected);
657 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n",
658 __func__);
659 return IRQ_NONE;
660 }
661 540
662 detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | 541 detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
663 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | 542 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
@@ -666,11 +545,9 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
666 intr_loc |= detected; 545 intr_loc |= detected;
667 if (!intr_loc) 546 if (!intr_loc)
668 return IRQ_NONE; 547 return IRQ_NONE;
669 if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, intr_loc)) { 548 if (detected)
670 ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n", 549 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
671 __func__); 550 intr_loc);
672 return IRQ_NONE;
673 }
674 } while (detected); 551 } while (detected);
675 552
676 ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc); 553 ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc);
@@ -705,7 +582,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
705 return IRQ_HANDLED; 582 return IRQ_HANDLED;
706} 583}
707 584
708int pcie_enable_notification(struct controller *ctrl) 585void pcie_enable_notification(struct controller *ctrl)
709{ 586{
710 u16 cmd, mask; 587 u16 cmd, mask;
711 588
@@ -731,22 +608,18 @@ int pcie_enable_notification(struct controller *ctrl)
731 PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | 608 PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
732 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE); 609 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE);
733 610
734 if (pcie_write_cmd(ctrl, cmd, mask)) { 611 pcie_write_cmd(ctrl, cmd, mask);
735 ctrl_err(ctrl, "Cannot enable software notification\n");
736 return -1;
737 }
738 return 0;
739} 612}
740 613
741static void pcie_disable_notification(struct controller *ctrl) 614static void pcie_disable_notification(struct controller *ctrl)
742{ 615{
743 u16 mask; 616 u16 mask;
617
744 mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | 618 mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
745 PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | 619 PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
746 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE | 620 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
747 PCI_EXP_SLTCTL_DLLSCE); 621 PCI_EXP_SLTCTL_DLLSCE);
748 if (pcie_write_cmd(ctrl, 0, mask)) 622 pcie_write_cmd(ctrl, 0, mask);
749 ctrl_warn(ctrl, "Cannot disable software notification\n");
750} 623}
751 624
752/* 625/*
@@ -758,6 +631,7 @@ static void pcie_disable_notification(struct controller *ctrl)
758int pciehp_reset_slot(struct slot *slot, int probe) 631int pciehp_reset_slot(struct slot *slot, int probe)
759{ 632{
760 struct controller *ctrl = slot->ctrl; 633 struct controller *ctrl = slot->ctrl;
634 struct pci_dev *pdev = ctrl_dev(ctrl);
761 635
762 if (probe) 636 if (probe)
763 return 0; 637 return 0;
@@ -771,7 +645,8 @@ int pciehp_reset_slot(struct slot *slot, int probe)
771 pci_reset_bridge_secondary_bus(ctrl->pcie->port); 645 pci_reset_bridge_secondary_bus(ctrl->pcie->port);
772 646
773 if (HP_SUPR_RM(ctrl)) { 647 if (HP_SUPR_RM(ctrl)) {
774 pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDC); 648 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
649 PCI_EXP_SLTSTA_PDC);
775 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PDCE, PCI_EXP_SLTCTL_PDCE); 650 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PDCE, PCI_EXP_SLTCTL_PDCE);
776 if (pciehp_poll_mode) 651 if (pciehp_poll_mode)
777 int_poll_timeout(ctrl->poll_timer.data); 652 int_poll_timeout(ctrl->poll_timer.data);
@@ -784,10 +659,7 @@ int pcie_init_notification(struct controller *ctrl)
784{ 659{
785 if (pciehp_request_irq(ctrl)) 660 if (pciehp_request_irq(ctrl))
786 return -1; 661 return -1;
787 if (pcie_enable_notification(ctrl)) { 662 pcie_enable_notification(ctrl);
788 pciehp_free_irq(ctrl);
789 return -1;
790 }
791 ctrl->notification_enabled = 1; 663 ctrl->notification_enabled = 1;
792 return 0; 664 return 0;
793} 665}
@@ -875,12 +747,14 @@ static inline void dbg_ctrl(struct controller *ctrl)
875 EMI(ctrl) ? "yes" : "no"); 747 EMI(ctrl) ? "yes" : "no");
876 ctrl_info(ctrl, " Command Completed : %3s\n", 748 ctrl_info(ctrl, " Command Completed : %3s\n",
877 NO_CMD_CMPL(ctrl) ? "no" : "yes"); 749 NO_CMD_CMPL(ctrl) ? "no" : "yes");
878 pciehp_readw(ctrl, PCI_EXP_SLTSTA, &reg16); 750 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
879 ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16); 751 ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16);
880 pciehp_readw(ctrl, PCI_EXP_SLTCTL, &reg16); 752 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
881 ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16); 753 ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16);
882} 754}
883 755
756#define FLAG(x,y) (((x) & (y)) ? '+' : '-')
757
884struct controller *pcie_init(struct pcie_device *dev) 758struct controller *pcie_init(struct pcie_device *dev)
885{ 759{
886 struct controller *ctrl; 760 struct controller *ctrl;
@@ -893,11 +767,7 @@ struct controller *pcie_init(struct pcie_device *dev)
893 goto abort; 767 goto abort;
894 } 768 }
895 ctrl->pcie = dev; 769 ctrl->pcie = dev;
896 if (pciehp_readl(ctrl, PCI_EXP_SLTCAP, &slot_cap)) { 770 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
897 ctrl_err(ctrl, "Cannot read SLOTCAP register\n");
898 goto abort_ctrl;
899 }
900
901 ctrl->slot_cap = slot_cap; 771 ctrl->slot_cap = slot_cap;
902 mutex_init(&ctrl->ctrl_lock); 772 mutex_init(&ctrl->ctrl_lock);
903 init_waitqueue_head(&ctrl->queue); 773 init_waitqueue_head(&ctrl->queue);
@@ -913,25 +783,31 @@ struct controller *pcie_init(struct pcie_device *dev)
913 ctrl->no_cmd_complete = 1; 783 ctrl->no_cmd_complete = 1;
914 784
915 /* Check if Data Link Layer Link Active Reporting is implemented */ 785 /* Check if Data Link Layer Link Active Reporting is implemented */
916 if (pciehp_readl(ctrl, PCI_EXP_LNKCAP, &link_cap)) { 786 pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
917 ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
918 goto abort_ctrl;
919 }
920 if (link_cap & PCI_EXP_LNKCAP_DLLLARC) { 787 if (link_cap & PCI_EXP_LNKCAP_DLLLARC) {
921 ctrl_dbg(ctrl, "Link Active Reporting supported\n"); 788 ctrl_dbg(ctrl, "Link Active Reporting supported\n");
922 ctrl->link_active_reporting = 1; 789 ctrl->link_active_reporting = 1;
923 } 790 }
924 791
925 /* Clear all remaining event bits in Slot Status register */ 792 /* Clear all remaining event bits in Slot Status register */
926 if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f)) 793 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
927 goto abort_ctrl; 794 PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
795 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
796 PCI_EXP_SLTSTA_CC);
928 797
929 /* Disable software notification */ 798 /* Disable software notification */
930 pcie_disable_notification(ctrl); 799 pcie_disable_notification(ctrl);
931 800
932 ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", 801 ctrl_info(ctrl, "Slot #%d AttnBtn%c AttnInd%c PwrInd%c PwrCtrl%c MRL%c Interlock%c NoCompl%c LLActRep%c\n",
933 pdev->vendor, pdev->device, pdev->subsystem_vendor, 802 (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
934 pdev->subsystem_device); 803 FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
804 FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
805 FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
806 FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
807 FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
808 FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
809 FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
810 FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC));
935 811
936 if (pcie_init_slot(ctrl)) 812 if (pcie_init_slot(ctrl))
937 goto abort_ctrl; 813 goto abort_ctrl;
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 0e0d0f7f63fd..198355112ee7 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -78,7 +78,7 @@ int pciehp_configure_device(struct slot *p_slot)
78 78
79int pciehp_unconfigure_device(struct slot *p_slot) 79int pciehp_unconfigure_device(struct slot *p_slot)
80{ 80{
81 int ret, rc = 0; 81 int rc = 0;
82 u8 bctl = 0; 82 u8 bctl = 0;
83 u8 presence = 0; 83 u8 presence = 0;
84 struct pci_dev *dev, *temp; 84 struct pci_dev *dev, *temp;
@@ -88,9 +88,7 @@ int pciehp_unconfigure_device(struct slot *p_slot)
88 88
89 ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n", 89 ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n",
90 __func__, pci_domain_nr(parent), parent->number); 90 __func__, pci_domain_nr(parent), parent->number);
91 ret = pciehp_get_adapter_status(p_slot, &presence); 91 pciehp_get_adapter_status(p_slot, &presence);
92 if (ret)
93 presence = 0;
94 92
95 /* 93 /*
96 * Stopping an SR-IOV PF device removes all the associated VFs, 94 * Stopping an SR-IOV PF device removes all the associated VFs,
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
index 50ce68098298..2c2930ea06ad 100644
--- a/drivers/pci/ioapic.c
+++ b/drivers/pci/ioapic.c
@@ -113,6 +113,10 @@ static struct pci_driver ioapic_driver = {
113 .remove = ioapic_remove, 113 .remove = ioapic_remove,
114}; 114};
115 115
116module_pci_driver(ioapic_driver); 116static int __init ioapic_init(void)
117{
118 return pci_register_driver(&ioapic_driver);
119}
120module_init(ioapic_init);
117 121
118MODULE_LICENSE("GPL"); 122MODULE_LICENSE("GPL");
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 1fe2d6fb19d5..68311ec849ee 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -441,6 +441,7 @@ static int sriov_init(struct pci_dev *dev, int pos)
441 441
442found: 442found:
443 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl); 443 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
444 pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, 0);
444 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); 445 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
445 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); 446 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
446 if (!offset || (total > 1 && !stride)) 447 if (!offset || (total > 1 && !stride))
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 3fcd67a16677..7a0fec6ce571 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -116,7 +116,7 @@ void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
116 return default_teardown_msi_irqs(dev); 116 return default_teardown_msi_irqs(dev);
117} 117}
118 118
119void default_restore_msi_irqs(struct pci_dev *dev, int irq) 119static void default_restore_msi_irq(struct pci_dev *dev, int irq)
120{ 120{
121 struct msi_desc *entry; 121 struct msi_desc *entry;
122 122
@@ -134,9 +134,9 @@ void default_restore_msi_irqs(struct pci_dev *dev, int irq)
134 write_msi_msg(irq, &entry->msg); 134 write_msi_msg(irq, &entry->msg);
135} 135}
136 136
137void __weak arch_restore_msi_irqs(struct pci_dev *dev, int irq) 137void __weak arch_restore_msi_irqs(struct pci_dev *dev)
138{ 138{
139 return default_restore_msi_irqs(dev, irq); 139 return default_restore_msi_irqs(dev);
140} 140}
141 141
142static void msi_set_enable(struct pci_dev *dev, int enable) 142static void msi_set_enable(struct pci_dev *dev, int enable)
@@ -262,6 +262,15 @@ void unmask_msi_irq(struct irq_data *data)
262 msi_set_mask_bit(data, 0); 262 msi_set_mask_bit(data, 0);
263} 263}
264 264
265void default_restore_msi_irqs(struct pci_dev *dev)
266{
267 struct msi_desc *entry;
268
269 list_for_each_entry(entry, &dev->msi_list, list) {
270 default_restore_msi_irq(dev, entry->irq);
271 }
272}
273
265void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 274void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
266{ 275{
267 BUG_ON(entry->dev->current_state != PCI_D0); 276 BUG_ON(entry->dev->current_state != PCI_D0);
@@ -363,6 +372,9 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
363static void free_msi_irqs(struct pci_dev *dev) 372static void free_msi_irqs(struct pci_dev *dev)
364{ 373{
365 struct msi_desc *entry, *tmp; 374 struct msi_desc *entry, *tmp;
375 struct attribute **msi_attrs;
376 struct device_attribute *dev_attr;
377 int count = 0;
366 378
367 list_for_each_entry(entry, &dev->msi_list, list) { 379 list_for_each_entry(entry, &dev->msi_list, list) {
368 int i, nvec; 380 int i, nvec;
@@ -398,6 +410,22 @@ static void free_msi_irqs(struct pci_dev *dev)
398 list_del(&entry->list); 410 list_del(&entry->list);
399 kfree(entry); 411 kfree(entry);
400 } 412 }
413
414 if (dev->msi_irq_groups) {
415 sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups);
416 msi_attrs = dev->msi_irq_groups[0]->attrs;
417 list_for_each_entry(entry, &dev->msi_list, list) {
418 dev_attr = container_of(msi_attrs[count],
419 struct device_attribute, attr);
420 kfree(dev_attr->attr.name);
421 kfree(dev_attr);
422 ++count;
423 }
424 kfree(msi_attrs);
425 kfree(dev->msi_irq_groups[0]);
426 kfree(dev->msi_irq_groups);
427 dev->msi_irq_groups = NULL;
428 }
401} 429}
402 430
403static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) 431static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
@@ -430,7 +458,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
430 458
431 pci_intx_for_msi(dev, 0); 459 pci_intx_for_msi(dev, 0);
432 msi_set_enable(dev, 0); 460 msi_set_enable(dev, 0);
433 arch_restore_msi_irqs(dev, dev->irq); 461 arch_restore_msi_irqs(dev);
434 462
435 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); 463 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
436 msi_mask_irq(entry, msi_capable_mask(control), entry->masked); 464 msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
@@ -455,8 +483,8 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
455 control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL; 483 control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
456 pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); 484 pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
457 485
486 arch_restore_msi_irqs(dev);
458 list_for_each_entry(entry, &dev->msi_list, list) { 487 list_for_each_entry(entry, &dev->msi_list, list) {
459 arch_restore_msi_irqs(dev, entry->irq);
460 msix_mask_irq(entry, entry->masked); 488 msix_mask_irq(entry, entry->masked);
461 } 489 }
462 490
@@ -471,94 +499,95 @@ void pci_restore_msi_state(struct pci_dev *dev)
471} 499}
472EXPORT_SYMBOL_GPL(pci_restore_msi_state); 500EXPORT_SYMBOL_GPL(pci_restore_msi_state);
473 501
474 502static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
475#define to_msi_attr(obj) container_of(obj, struct msi_attribute, attr)
476#define to_msi_desc(obj) container_of(obj, struct msi_desc, kobj)
477
478struct msi_attribute {
479 struct attribute attr;
480 ssize_t (*show)(struct msi_desc *entry, struct msi_attribute *attr,
481 char *buf);
482 ssize_t (*store)(struct msi_desc *entry, struct msi_attribute *attr,
483 const char *buf, size_t count);
484};
485
486static ssize_t show_msi_mode(struct msi_desc *entry, struct msi_attribute *atr,
487 char *buf) 503 char *buf)
488{ 504{
489 return sprintf(buf, "%s\n", entry->msi_attrib.is_msix ? "msix" : "msi"); 505 struct pci_dev *pdev = to_pci_dev(dev);
490} 506 struct msi_desc *entry;
491 507 unsigned long irq;
492static ssize_t msi_irq_attr_show(struct kobject *kobj, 508 int retval;
493 struct attribute *attr, char *buf)
494{
495 struct msi_attribute *attribute = to_msi_attr(attr);
496 struct msi_desc *entry = to_msi_desc(kobj);
497
498 if (!attribute->show)
499 return -EIO;
500
501 return attribute->show(entry, attribute, buf);
502}
503
504static const struct sysfs_ops msi_irq_sysfs_ops = {
505 .show = msi_irq_attr_show,
506};
507
508static struct msi_attribute mode_attribute =
509 __ATTR(mode, S_IRUGO, show_msi_mode, NULL);
510
511 509
512static struct attribute *msi_irq_default_attrs[] = { 510 retval = kstrtoul(attr->attr.name, 10, &irq);
513 &mode_attribute.attr, 511 if (retval)
514 NULL 512 return retval;
515};
516 513
517static void msi_kobj_release(struct kobject *kobj) 514 list_for_each_entry(entry, &pdev->msi_list, list) {
518{ 515 if (entry->irq == irq) {
519 struct msi_desc *entry = to_msi_desc(kobj); 516 return sprintf(buf, "%s\n",
520 517 entry->msi_attrib.is_msix ? "msix" : "msi");
521 pci_dev_put(entry->dev); 518 }
519 }
520 return -ENODEV;
522} 521}
523 522
524static struct kobj_type msi_irq_ktype = {
525 .release = msi_kobj_release,
526 .sysfs_ops = &msi_irq_sysfs_ops,
527 .default_attrs = msi_irq_default_attrs,
528};
529
530static int populate_msi_sysfs(struct pci_dev *pdev) 523static int populate_msi_sysfs(struct pci_dev *pdev)
531{ 524{
525 struct attribute **msi_attrs;
526 struct attribute *msi_attr;
527 struct device_attribute *msi_dev_attr;
528 struct attribute_group *msi_irq_group;
529 const struct attribute_group **msi_irq_groups;
532 struct msi_desc *entry; 530 struct msi_desc *entry;
533 struct kobject *kobj; 531 int ret = -ENOMEM;
534 int ret; 532 int num_msi = 0;
535 int count = 0; 533 int count = 0;
536 534
537 pdev->msi_kset = kset_create_and_add("msi_irqs", NULL, &pdev->dev.kobj); 535 /* Determine how many msi entries we have */
538 if (!pdev->msi_kset) 536 list_for_each_entry(entry, &pdev->msi_list, list) {
539 return -ENOMEM; 537 ++num_msi;
538 }
539 if (!num_msi)
540 return 0;
540 541
542 /* Dynamically create the MSI attributes for the PCI device */
543 msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL);
544 if (!msi_attrs)
545 return -ENOMEM;
541 list_for_each_entry(entry, &pdev->msi_list, list) { 546 list_for_each_entry(entry, &pdev->msi_list, list) {
542 kobj = &entry->kobj; 547 char *name = kmalloc(20, GFP_KERNEL);
543 kobj->kset = pdev->msi_kset; 548 msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
544 pci_dev_get(pdev); 549 if (!msi_dev_attr)
545 ret = kobject_init_and_add(kobj, &msi_irq_ktype, NULL, 550 goto error_attrs;
546 "%u", entry->irq); 551 sprintf(name, "%d", entry->irq);
547 if (ret) 552 sysfs_attr_init(&msi_dev_attr->attr);
548 goto out_unroll; 553 msi_dev_attr->attr.name = name;
549 554 msi_dev_attr->attr.mode = S_IRUGO;
550 count++; 555 msi_dev_attr->show = msi_mode_show;
556 msi_attrs[count] = &msi_dev_attr->attr;
557 ++count;
551 } 558 }
552 559
560 msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
561 if (!msi_irq_group)
562 goto error_attrs;
563 msi_irq_group->name = "msi_irqs";
564 msi_irq_group->attrs = msi_attrs;
565
566 msi_irq_groups = kzalloc(sizeof(void *) * 2, GFP_KERNEL);
567 if (!msi_irq_groups)
568 goto error_irq_group;
569 msi_irq_groups[0] = msi_irq_group;
570
571 ret = sysfs_create_groups(&pdev->dev.kobj, msi_irq_groups);
572 if (ret)
573 goto error_irq_groups;
574 pdev->msi_irq_groups = msi_irq_groups;
575
553 return 0; 576 return 0;
554 577
555out_unroll: 578error_irq_groups:
556 list_for_each_entry(entry, &pdev->msi_list, list) { 579 kfree(msi_irq_groups);
557 if (!count) 580error_irq_group:
558 break; 581 kfree(msi_irq_group);
559 kobject_del(&entry->kobj); 582error_attrs:
560 kobject_put(&entry->kobj); 583 count = 0;
561 count--; 584 msi_attr = msi_attrs[count];
585 while (msi_attr) {
586 msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
587 kfree(msi_attr->name);
588 kfree(msi_dev_attr);
589 ++count;
590 msi_attr = msi_attrs[count];
562 } 591 }
563 return ret; 592 return ret;
564} 593}
@@ -729,7 +758,7 @@ static int msix_capability_init(struct pci_dev *dev,
729 758
730 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); 759 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
731 if (ret) 760 if (ret)
732 goto error; 761 goto out_avail;
733 762
734 /* 763 /*
735 * Some devices require MSI-X to be enabled before we can touch the 764 * Some devices require MSI-X to be enabled before we can touch the
@@ -742,10 +771,8 @@ static int msix_capability_init(struct pci_dev *dev,
742 msix_program_entries(dev, entries); 771 msix_program_entries(dev, entries);
743 772
744 ret = populate_msi_sysfs(dev); 773 ret = populate_msi_sysfs(dev);
745 if (ret) { 774 if (ret)
746 ret = 0; 775 goto out_free;
747 goto error;
748 }
749 776
750 /* Set MSI-X enabled bits and unmask the function */ 777 /* Set MSI-X enabled bits and unmask the function */
751 pci_intx_for_msi(dev, 0); 778 pci_intx_for_msi(dev, 0);
@@ -756,7 +783,7 @@ static int msix_capability_init(struct pci_dev *dev,
756 783
757 return 0; 784 return 0;
758 785
759error: 786out_avail:
760 if (ret < 0) { 787 if (ret < 0) {
761 /* 788 /*
762 * If we had some success, report the number of irqs 789 * If we had some success, report the number of irqs
@@ -773,6 +800,7 @@ error:
773 ret = avail; 800 ret = avail;
774 } 801 }
775 802
803out_free:
776 free_msi_irqs(dev); 804 free_msi_irqs(dev);
777 805
778 return ret; 806 return ret;
@@ -824,6 +852,31 @@ static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
824} 852}
825 853
826/** 854/**
855 * pci_msi_vec_count - Return the number of MSI vectors a device can send
856 * @dev: device to report about
857 *
858 * This function returns the number of MSI vectors a device requested via
859 * Multiple Message Capable register. It returns a negative errno if the
860 * device is not capable sending MSI interrupts. Otherwise, the call succeeds
861 * and returns a power of two, up to a maximum of 2^5 (32), according to the
862 * MSI specification.
863 **/
864int pci_msi_vec_count(struct pci_dev *dev)
865{
866 int ret;
867 u16 msgctl;
868
869 if (!dev->msi_cap)
870 return -EINVAL;
871
872 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
873 ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
874
875 return ret;
876}
877EXPORT_SYMBOL(pci_msi_vec_count);
878
879/**
827 * pci_enable_msi_block - configure device's MSI capability structure 880 * pci_enable_msi_block - configure device's MSI capability structure
828 * @dev: device to configure 881 * @dev: device to configure
829 * @nvec: number of interrupts to configure 882 * @nvec: number of interrupts to configure
@@ -836,16 +889,16 @@ static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
836 * updates the @dev's irq member to the lowest new interrupt number; the 889 * updates the @dev's irq member to the lowest new interrupt number; the
837 * other interrupt numbers allocated to this device are consecutive. 890 * other interrupt numbers allocated to this device are consecutive.
838 */ 891 */
839int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec) 892int pci_enable_msi_block(struct pci_dev *dev, int nvec)
840{ 893{
841 int status, maxvec; 894 int status, maxvec;
842 u16 msgctl;
843 895
844 if (!dev->msi_cap || dev->current_state != PCI_D0) 896 if (dev->current_state != PCI_D0)
845 return -EINVAL; 897 return -EINVAL;
846 898
847 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); 899 maxvec = pci_msi_vec_count(dev);
848 maxvec = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1); 900 if (maxvec < 0)
901 return maxvec;
849 if (nvec > maxvec) 902 if (nvec > maxvec)
850 return maxvec; 903 return maxvec;
851 904
@@ -867,31 +920,6 @@ int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
867} 920}
868EXPORT_SYMBOL(pci_enable_msi_block); 921EXPORT_SYMBOL(pci_enable_msi_block);
869 922
870int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec)
871{
872 int ret, nvec;
873 u16 msgctl;
874
875 if (!dev->msi_cap || dev->current_state != PCI_D0)
876 return -EINVAL;
877
878 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
879 ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
880
881 if (maxvec)
882 *maxvec = ret;
883
884 do {
885 nvec = ret;
886 ret = pci_enable_msi_block(dev, nvec);
887 } while (ret > 0);
888
889 if (ret < 0)
890 return ret;
891 return nvec;
892}
893EXPORT_SYMBOL(pci_enable_msi_block_auto);
894
895void pci_msi_shutdown(struct pci_dev *dev) 923void pci_msi_shutdown(struct pci_dev *dev)
896{ 924{
897 struct msi_desc *desc; 925 struct msi_desc *desc;
@@ -925,25 +953,29 @@ void pci_disable_msi(struct pci_dev *dev)
925 953
926 pci_msi_shutdown(dev); 954 pci_msi_shutdown(dev);
927 free_msi_irqs(dev); 955 free_msi_irqs(dev);
928 kset_unregister(dev->msi_kset);
929 dev->msi_kset = NULL;
930} 956}
931EXPORT_SYMBOL(pci_disable_msi); 957EXPORT_SYMBOL(pci_disable_msi);
932 958
933/** 959/**
934 * pci_msix_table_size - return the number of device's MSI-X table entries 960 * pci_msix_vec_count - return the number of device's MSI-X table entries
935 * @dev: pointer to the pci_dev data structure of MSI-X device function 961 * @dev: pointer to the pci_dev data structure of MSI-X device function
936 */ 962
937int pci_msix_table_size(struct pci_dev *dev) 963 * This function returns the number of device's MSI-X table entries and
964 * therefore the number of MSI-X vectors device is capable of sending.
965 * It returns a negative errno if the device is not capable of sending MSI-X
966 * interrupts.
967 **/
968int pci_msix_vec_count(struct pci_dev *dev)
938{ 969{
939 u16 control; 970 u16 control;
940 971
941 if (!dev->msix_cap) 972 if (!dev->msix_cap)
942 return 0; 973 return -EINVAL;
943 974
944 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); 975 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
945 return msix_table_size(control); 976 return msix_table_size(control);
946} 977}
978EXPORT_SYMBOL(pci_msix_vec_count);
947 979
948/** 980/**
949 * pci_enable_msix - configure device's MSI-X capability structure 981 * pci_enable_msix - configure device's MSI-X capability structure
@@ -972,7 +1004,9 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
972 if (status) 1004 if (status)
973 return status; 1005 return status;
974 1006
975 nr_entries = pci_msix_table_size(dev); 1007 nr_entries = pci_msix_vec_count(dev);
1008 if (nr_entries < 0)
1009 return nr_entries;
976 if (nvec > nr_entries) 1010 if (nvec > nr_entries)
977 return nr_entries; 1011 return nr_entries;
978 1012
@@ -1023,8 +1057,6 @@ void pci_disable_msix(struct pci_dev *dev)
1023 1057
1024 pci_msix_shutdown(dev); 1058 pci_msix_shutdown(dev);
1025 free_msi_irqs(dev); 1059 free_msi_irqs(dev);
1026 kset_unregister(dev->msi_kset);
1027 dev->msi_kset = NULL;
1028} 1060}
1029EXPORT_SYMBOL(pci_disable_msix); 1061EXPORT_SYMBOL(pci_disable_msix);
1030 1062
@@ -1079,3 +1111,77 @@ void pci_msi_init_pci_dev(struct pci_dev *dev)
1079 if (dev->msix_cap) 1111 if (dev->msix_cap)
1080 msix_set_enable(dev, 0); 1112 msix_set_enable(dev, 0);
1081} 1113}
1114
1115/**
1116 * pci_enable_msi_range - configure device's MSI capability structure
1117 * @dev: device to configure
1118 * @minvec: minimal number of interrupts to configure
1119 * @maxvec: maximum number of interrupts to configure
1120 *
1121 * This function tries to allocate a maximum possible number of interrupts in a
1122 * range between @minvec and @maxvec. It returns a negative errno if an error
1123 * occurs. If it succeeds, it returns the actual number of interrupts allocated
1124 * and updates the @dev's irq member to the lowest new interrupt number;
1125 * the other interrupt numbers allocated to this device are consecutive.
1126 **/
1127int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
1128{
1129 int nvec = maxvec;
1130 int rc;
1131
1132 if (maxvec < minvec)
1133 return -ERANGE;
1134
1135 do {
1136 rc = pci_enable_msi_block(dev, nvec);
1137 if (rc < 0) {
1138 return rc;
1139 } else if (rc > 0) {
1140 if (rc < minvec)
1141 return -ENOSPC;
1142 nvec = rc;
1143 }
1144 } while (rc);
1145
1146 return nvec;
1147}
1148EXPORT_SYMBOL(pci_enable_msi_range);
1149
1150/**
1151 * pci_enable_msix_range - configure device's MSI-X capability structure
1152 * @dev: pointer to the pci_dev data structure of MSI-X device function
1153 * @entries: pointer to an array of MSI-X entries
1154 * @minvec: minimum number of MSI-X irqs requested
1155 * @maxvec: maximum number of MSI-X irqs requested
1156 *
1157 * Setup the MSI-X capability structure of device function with a maximum
1158 * possible number of interrupts in the range between @minvec and @maxvec
1159 * upon its software driver call to request for MSI-X mode enabled on its
1160 * hardware device function. It returns a negative errno if an error occurs.
1161 * If it succeeds, it returns the actual number of interrupts allocated and
1162 * indicates the successful configuration of MSI-X capability structure
1163 * with new allocated MSI-X interrupts.
1164 **/
1165int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1166 int minvec, int maxvec)
1167{
1168 int nvec = maxvec;
1169 int rc;
1170
1171 if (maxvec < minvec)
1172 return -ERANGE;
1173
1174 do {
1175 rc = pci_enable_msix(dev, entries, nvec);
1176 if (rc < 0) {
1177 return rc;
1178 } else if (rc > 0) {
1179 if (rc < minvec)
1180 return -ENOSPC;
1181 nvec = rc;
1182 }
1183 } while (rc);
1184
1185 return nvec;
1186}
1187EXPORT_SYMBOL(pci_enable_msix_range);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 577074efbe62..e0431f1af33b 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -358,7 +358,7 @@ static void pci_acpi_cleanup(struct device *dev)
358 358
359static bool pci_acpi_bus_match(struct device *dev) 359static bool pci_acpi_bus_match(struct device *dev)
360{ 360{
361 return dev->bus == &pci_bus_type; 361 return dev_is_pci(dev);
362} 362}
363 363
364static struct acpi_bus_type acpi_pci_bus = { 364static struct acpi_bus_type acpi_pci_bus = {
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 9042fdbd7244..25f0bc659164 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -19,6 +19,7 @@
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include <linux/pm_runtime.h> 20#include <linux/pm_runtime.h>
21#include <linux/suspend.h> 21#include <linux/suspend.h>
22#include <linux/kexec.h>
22#include "pci.h" 23#include "pci.h"
23 24
24struct pci_dynid { 25struct pci_dynid {
@@ -288,12 +289,27 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
288 int error, node; 289 int error, node;
289 struct drv_dev_and_id ddi = { drv, dev, id }; 290 struct drv_dev_and_id ddi = { drv, dev, id };
290 291
291 /* Execute driver initialization on node where the device's 292 /*
292 bus is attached to. This way the driver likely allocates 293 * Execute driver initialization on node where the device is
293 its local memory on the right node without any need to 294 * attached. This way the driver likely allocates its local memory
294 change it. */ 295 * on the right node.
296 */
295 node = dev_to_node(&dev->dev); 297 node = dev_to_node(&dev->dev);
296 if (node >= 0) { 298
299 /*
300 * On NUMA systems, we are likely to call a PF probe function using
301 * work_on_cpu(). If that probe calls pci_enable_sriov() (which
302 * adds the VF devices via pci_bus_add_device()), we may re-enter
303 * this function to call the VF probe function. Calling
304 * work_on_cpu() again will cause a lockdep warning. Since VFs are
305 * always on the same node as the PF, we can work around this by
306 * avoiding work_on_cpu() when we're already on the correct node.
307 *
308 * Preemption is enabled, so it's theoretically unsafe to use
309 * numa_node_id(), but even if we run the probe function on the
310 * wrong node, it should be functionally correct.
311 */
312 if (node >= 0 && node != numa_node_id()) {
297 int cpu; 313 int cpu;
298 314
299 get_online_cpus(); 315 get_online_cpus();
@@ -305,6 +321,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
305 put_online_cpus(); 321 put_online_cpus();
306 } else 322 } else
307 error = local_pci_probe(&ddi); 323 error = local_pci_probe(&ddi);
324
308 return error; 325 return error;
309} 326}
310 327
@@ -399,12 +416,17 @@ static void pci_device_shutdown(struct device *dev)
399 pci_msi_shutdown(pci_dev); 416 pci_msi_shutdown(pci_dev);
400 pci_msix_shutdown(pci_dev); 417 pci_msix_shutdown(pci_dev);
401 418
419#ifdef CONFIG_KEXEC
402 /* 420 /*
403 * Turn off Bus Master bit on the device to tell it to not 421 * If this is a kexec reboot, turn off Bus Master bit on the
404 * continue to do DMA. Don't touch devices in D3cold or unknown states. 422 * device to tell it to not continue to do DMA. Don't touch
423 * devices in D3cold or unknown states.
424 * If it is not a kexec reboot, firmware will hit the PCI
425 * devices with big hammer and stop their DMA any way.
405 */ 426 */
406 if (pci_dev->current_state <= PCI_D3hot) 427 if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
407 pci_clear_master(pci_dev); 428 pci_clear_master(pci_dev);
429#endif
408} 430}
409 431
410#ifdef CONFIG_PM 432#ifdef CONFIG_PM
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 33120d156668..508e560b7d2a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -431,6 +431,32 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
431} 431}
432 432
433/** 433/**
434 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
435 * @dev: the PCI device to operate on
436 * @pos: config space offset of status word
437 * @mask: mask of bit(s) to care about in status word
438 *
439 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
440 */
441int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
442{
443 int i;
444
445 /* Wait for Transaction Pending bit clean */
446 for (i = 0; i < 4; i++) {
447 u16 status;
448 if (i)
449 msleep((1 << (i - 1)) * 100);
450
451 pci_read_config_word(dev, pos, &status);
452 if (!(status & mask))
453 return 1;
454 }
455
456 return 0;
457}
458
459/**
434 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 460 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
435 * @dev: PCI device to have its BARs restored 461 * @dev: PCI device to have its BARs restored
436 * 462 *
@@ -835,18 +861,28 @@ EXPORT_SYMBOL(pci_choose_state);
835#define PCI_EXP_SAVE_REGS 7 861#define PCI_EXP_SAVE_REGS 7
836 862
837 863
838static struct pci_cap_saved_state *pci_find_saved_cap( 864static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
839 struct pci_dev *pci_dev, char cap) 865 u16 cap, bool extended)
840{ 866{
841 struct pci_cap_saved_state *tmp; 867 struct pci_cap_saved_state *tmp;
842 868
843 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) { 869 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
844 if (tmp->cap.cap_nr == cap) 870 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
845 return tmp; 871 return tmp;
846 } 872 }
847 return NULL; 873 return NULL;
848} 874}
849 875
876struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
877{
878 return _pci_find_saved_cap(dev, cap, false);
879}
880
881struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
882{
883 return _pci_find_saved_cap(dev, cap, true);
884}
885
850static int pci_save_pcie_state(struct pci_dev *dev) 886static int pci_save_pcie_state(struct pci_dev *dev)
851{ 887{
852 int i = 0; 888 int i = 0;
@@ -948,6 +984,8 @@ pci_save_state(struct pci_dev *dev)
948 return i; 984 return i;
949 if ((i = pci_save_pcix_state(dev)) != 0) 985 if ((i = pci_save_pcix_state(dev)) != 0)
950 return i; 986 return i;
987 if ((i = pci_save_vc_state(dev)) != 0)
988 return i;
951 return 0; 989 return 0;
952} 990}
953 991
@@ -1010,6 +1048,7 @@ void pci_restore_state(struct pci_dev *dev)
1010 /* PCI Express register must be restored first */ 1048 /* PCI Express register must be restored first */
1011 pci_restore_pcie_state(dev); 1049 pci_restore_pcie_state(dev);
1012 pci_restore_ats_state(dev); 1050 pci_restore_ats_state(dev);
1051 pci_restore_vc_state(dev);
1013 1052
1014 pci_restore_config_space(dev); 1053 pci_restore_config_space(dev);
1015 1054
@@ -1087,7 +1126,7 @@ int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1087 while (cap->size) { 1126 while (cap->size) {
1088 struct pci_cap_saved_state *tmp; 1127 struct pci_cap_saved_state *tmp;
1089 1128
1090 tmp = pci_find_saved_cap(dev, cap->cap_nr); 1129 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1091 if (!tmp || tmp->cap.size != cap->size) 1130 if (!tmp || tmp->cap.size != cap->size)
1092 return -EINVAL; 1131 return -EINVAL;
1093 1132
@@ -2021,18 +2060,24 @@ static void pci_add_saved_cap(struct pci_dev *pci_dev,
2021} 2060}
2022 2061
2023/** 2062/**
2024 * pci_add_cap_save_buffer - allocate buffer for saving given capability registers 2063 * _pci_add_cap_save_buffer - allocate buffer for saving given
2064 * capability registers
2025 * @dev: the PCI device 2065 * @dev: the PCI device
2026 * @cap: the capability to allocate the buffer for 2066 * @cap: the capability to allocate the buffer for
2067 * @extended: Standard or Extended capability ID
2027 * @size: requested size of the buffer 2068 * @size: requested size of the buffer
2028 */ 2069 */
2029static int pci_add_cap_save_buffer( 2070static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2030 struct pci_dev *dev, char cap, unsigned int size) 2071 bool extended, unsigned int size)
2031{ 2072{
2032 int pos; 2073 int pos;
2033 struct pci_cap_saved_state *save_state; 2074 struct pci_cap_saved_state *save_state;
2034 2075
2035 pos = pci_find_capability(dev, cap); 2076 if (extended)
2077 pos = pci_find_ext_capability(dev, cap);
2078 else
2079 pos = pci_find_capability(dev, cap);
2080
2036 if (pos <= 0) 2081 if (pos <= 0)
2037 return 0; 2082 return 0;
2038 2083
@@ -2041,12 +2086,23 @@ static int pci_add_cap_save_buffer(
2041 return -ENOMEM; 2086 return -ENOMEM;
2042 2087
2043 save_state->cap.cap_nr = cap; 2088 save_state->cap.cap_nr = cap;
2089 save_state->cap.cap_extended = extended;
2044 save_state->cap.size = size; 2090 save_state->cap.size = size;
2045 pci_add_saved_cap(dev, save_state); 2091 pci_add_saved_cap(dev, save_state);
2046 2092
2047 return 0; 2093 return 0;
2048} 2094}
2049 2095
2096int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2097{
2098 return _pci_add_cap_save_buffer(dev, cap, false, size);
2099}
2100
2101int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2102{
2103 return _pci_add_cap_save_buffer(dev, cap, true, size);
2104}
2105
2050/** 2106/**
2051 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities 2107 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2052 * @dev: the PCI device 2108 * @dev: the PCI device
@@ -2065,6 +2121,8 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2065 if (error) 2121 if (error)
2066 dev_err(&dev->dev, 2122 dev_err(&dev->dev,
2067 "unable to preallocate PCI-X save buffer\n"); 2123 "unable to preallocate PCI-X save buffer\n");
2124
2125 pci_allocate_vc_save_buffers(dev);
2068} 2126}
2069 2127
2070void pci_free_cap_save_buffers(struct pci_dev *dev) 2128void pci_free_cap_save_buffers(struct pci_dev *dev)
@@ -3204,20 +3262,10 @@ EXPORT_SYMBOL(pci_set_dma_seg_boundary);
3204 */ 3262 */
3205int pci_wait_for_pending_transaction(struct pci_dev *dev) 3263int pci_wait_for_pending_transaction(struct pci_dev *dev)
3206{ 3264{
3207 int i; 3265 if (!pci_is_pcie(dev))
3208 u16 status; 3266 return 1;
3209
3210 /* Wait for Transaction Pending bit clean */
3211 for (i = 0; i < 4; i++) {
3212 if (i)
3213 msleep((1 << (i - 1)) * 100);
3214
3215 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
3216 if (!(status & PCI_EXP_DEVSTA_TRPND))
3217 return 1;
3218 }
3219 3267
3220 return 0; 3268 return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND);
3221} 3269}
3222EXPORT_SYMBOL(pci_wait_for_pending_transaction); 3270EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3223 3271
@@ -3244,10 +3292,8 @@ static int pcie_flr(struct pci_dev *dev, int probe)
3244 3292
3245static int pci_af_flr(struct pci_dev *dev, int probe) 3293static int pci_af_flr(struct pci_dev *dev, int probe)
3246{ 3294{
3247 int i;
3248 int pos; 3295 int pos;
3249 u8 cap; 3296 u8 cap;
3250 u8 status;
3251 3297
3252 pos = pci_find_capability(dev, PCI_CAP_ID_AF); 3298 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3253 if (!pos) 3299 if (!pos)
@@ -3261,14 +3307,8 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
3261 return 0; 3307 return 0;
3262 3308
3263 /* Wait for Transaction Pending bit clean */ 3309 /* Wait for Transaction Pending bit clean */
3264 for (i = 0; i < 4; i++) { 3310 if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP))
3265 if (i) 3311 goto clear;
3266 msleep((1 << (i - 1)) * 100);
3267
3268 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3269 if (!(status & PCI_AF_STATUS_TP))
3270 goto clear;
3271 }
3272 3312
3273 dev_err(&dev->dev, "transaction is not cleared; " 3313 dev_err(&dev->dev, "transaction is not cleared; "
3274 "proceeding with reset anyway\n"); 3314 "proceeding with reset anyway\n");
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index cf611ab2193a..4d6991794fa2 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -50,14 +50,37 @@ struct aer_hest_parse_info {
50 int firmware_first; 50 int firmware_first;
51}; 51};
52 52
53static int hest_source_is_pcie_aer(struct acpi_hest_header *hest_hdr)
54{
55 if (hest_hdr->type == ACPI_HEST_TYPE_AER_ROOT_PORT ||
56 hest_hdr->type == ACPI_HEST_TYPE_AER_ENDPOINT ||
57 hest_hdr->type == ACPI_HEST_TYPE_AER_BRIDGE)
58 return 1;
59 return 0;
60}
61
53static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data) 62static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
54{ 63{
55 struct aer_hest_parse_info *info = data; 64 struct aer_hest_parse_info *info = data;
56 struct acpi_hest_aer_common *p; 65 struct acpi_hest_aer_common *p;
57 int ff; 66 int ff;
58 67
68 if (!hest_source_is_pcie_aer(hest_hdr))
69 return 0;
70
59 p = (struct acpi_hest_aer_common *)(hest_hdr + 1); 71 p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
60 ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); 72 ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
73
74 /*
75 * If no specific device is supplied, determine whether
76 * FIRMWARE_FIRST is set for *any* PCIe device.
77 */
78 if (!info->pci_dev) {
79 info->firmware_first |= ff;
80 return 0;
81 }
82
83 /* Otherwise, check the specific device */
61 if (p->flags & ACPI_HEST_GLOBAL) { 84 if (p->flags & ACPI_HEST_GLOBAL) {
62 if (hest_match_type(hest_hdr, info->pci_dev)) 85 if (hest_match_type(hest_hdr, info->pci_dev))
63 info->firmware_first = ff; 86 info->firmware_first = ff;
@@ -97,33 +120,20 @@ int pcie_aer_get_firmware_first(struct pci_dev *dev)
97 120
98static bool aer_firmware_first; 121static bool aer_firmware_first;
99 122
100static int aer_hest_parse_aff(struct acpi_hest_header *hest_hdr, void *data)
101{
102 struct acpi_hest_aer_common *p;
103
104 if (aer_firmware_first)
105 return 0;
106
107 switch (hest_hdr->type) {
108 case ACPI_HEST_TYPE_AER_ROOT_PORT:
109 case ACPI_HEST_TYPE_AER_ENDPOINT:
110 case ACPI_HEST_TYPE_AER_BRIDGE:
111 p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
112 aer_firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
113 default:
114 return 0;
115 }
116}
117
118/** 123/**
119 * aer_acpi_firmware_first - Check if APEI should control AER. 124 * aer_acpi_firmware_first - Check if APEI should control AER.
120 */ 125 */
121bool aer_acpi_firmware_first(void) 126bool aer_acpi_firmware_first(void)
122{ 127{
123 static bool parsed = false; 128 static bool parsed = false;
129 struct aer_hest_parse_info info = {
130 .pci_dev = NULL, /* Check all PCIe devices */
131 .firmware_first = 0,
132 };
124 133
125 if (!parsed) { 134 if (!parsed) {
126 apei_hest_parse(aer_hest_parse_aff, NULL); 135 apei_hest_parse(aer_hest_parse, &info);
136 aer_firmware_first = info.firmware_first;
127 parsed = true; 137 parsed = true;
128 } 138 }
129 return aer_firmware_first; 139 return aer_firmware_first;
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 2c7c9f5f592c..34ff7026440c 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -124,6 +124,21 @@ static const char *aer_agent_string[] = {
124 "Transmitter ID" 124 "Transmitter ID"
125}; 125};
126 126
127static void __print_tlp_header(struct pci_dev *dev,
128 struct aer_header_log_regs *t)
129{
130 unsigned char *tlp = (unsigned char *)&t;
131
132 dev_err(&dev->dev, " TLP Header:"
133 " %02x%02x%02x%02x %02x%02x%02x%02x"
134 " %02x%02x%02x%02x %02x%02x%02x%02x\n",
135 *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
136 *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
137 *(tlp + 11), *(tlp + 10), *(tlp + 9),
138 *(tlp + 8), *(tlp + 15), *(tlp + 14),
139 *(tlp + 13), *(tlp + 12));
140}
141
127static void __aer_print_error(struct pci_dev *dev, 142static void __aer_print_error(struct pci_dev *dev,
128 struct aer_err_info *info) 143 struct aer_err_info *info)
129{ 144{
@@ -153,48 +168,39 @@ static void __aer_print_error(struct pci_dev *dev,
153 168
154void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) 169void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
155{ 170{
171 int layer, agent;
156 int id = ((dev->bus->number << 8) | dev->devfn); 172 int id = ((dev->bus->number << 8) | dev->devfn);
157 173
158 if (info->status == 0) { 174 if (!info->status) {
159 dev_err(&dev->dev, 175 dev_err(&dev->dev,
160 "PCIe Bus Error: severity=%s, type=Unaccessible, " 176 "PCIe Bus Error: severity=%s, type=Unaccessible, "
161 "id=%04x(Unregistered Agent ID)\n", 177 "id=%04x(Unregistered Agent ID)\n",
162 aer_error_severity_string[info->severity], id); 178 aer_error_severity_string[info->severity], id);
163 } else { 179 goto out;
164 int layer, agent; 180 }
165 181
166 layer = AER_GET_LAYER_ERROR(info->severity, info->status); 182 layer = AER_GET_LAYER_ERROR(info->severity, info->status);
167 agent = AER_GET_AGENT(info->severity, info->status); 183 agent = AER_GET_AGENT(info->severity, info->status);
168 184
169 dev_err(&dev->dev, 185 dev_err(&dev->dev,
170 "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n", 186 "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
171 aer_error_severity_string[info->severity], 187 aer_error_severity_string[info->severity],
172 aer_error_layer[layer], id, aer_agent_string[agent]); 188 aer_error_layer[layer], id, aer_agent_string[agent]);
173 189
174 dev_err(&dev->dev, 190 dev_err(&dev->dev,
175 " device [%04x:%04x] error status/mask=%08x/%08x\n", 191 " device [%04x:%04x] error status/mask=%08x/%08x\n",
176 dev->vendor, dev->device, 192 dev->vendor, dev->device,
177 info->status, info->mask); 193 info->status, info->mask);
178 194
179 __aer_print_error(dev, info); 195 __aer_print_error(dev, info);
180
181 if (info->tlp_header_valid) {
182 unsigned char *tlp = (unsigned char *) &info->tlp;
183 dev_err(&dev->dev, " TLP Header:"
184 " %02x%02x%02x%02x %02x%02x%02x%02x"
185 " %02x%02x%02x%02x %02x%02x%02x%02x\n",
186 *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
187 *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
188 *(tlp + 11), *(tlp + 10), *(tlp + 9),
189 *(tlp + 8), *(tlp + 15), *(tlp + 14),
190 *(tlp + 13), *(tlp + 12));
191 }
192 }
193 196
197 if (info->tlp_header_valid)
198 __print_tlp_header(dev, &info->tlp);
199
200out:
194 if (info->id && info->error_dev_num > 1 && info->id == id) 201 if (info->id && info->error_dev_num > 1 && info->id == id)
195 dev_err(&dev->dev, 202 dev_err(&dev->dev, " Error of this Agent(%04x) is reported first\n", id);
196 " Error of this Agent(%04x) is reported first\n", 203
197 id);
198 trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask), 204 trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
199 info->severity); 205 info->severity);
200} 206}
@@ -228,6 +234,7 @@ void cper_print_aer(struct pci_dev *dev, int cper_severity,
228 const char **status_strs; 234 const char **status_strs;
229 235
230 aer_severity = cper_severity_to_aer(cper_severity); 236 aer_severity = cper_severity_to_aer(cper_severity);
237
231 if (aer_severity == AER_CORRECTABLE) { 238 if (aer_severity == AER_CORRECTABLE) {
232 status = aer->cor_status; 239 status = aer->cor_status;
233 mask = aer->cor_mask; 240 mask = aer->cor_mask;
@@ -240,28 +247,22 @@ void cper_print_aer(struct pci_dev *dev, int cper_severity,
240 status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string); 247 status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string);
241 tlp_header_valid = status & AER_LOG_TLP_MASKS; 248 tlp_header_valid = status & AER_LOG_TLP_MASKS;
242 } 249 }
250
243 layer = AER_GET_LAYER_ERROR(aer_severity, status); 251 layer = AER_GET_LAYER_ERROR(aer_severity, status);
244 agent = AER_GET_AGENT(aer_severity, status); 252 agent = AER_GET_AGENT(aer_severity, status);
245 dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", 253
246 status, mask); 254 dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
247 cper_print_bits("", status, status_strs, status_strs_size); 255 cper_print_bits("", status, status_strs, status_strs_size);
248 dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n", 256 dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n",
249 aer_error_layer[layer], aer_agent_string[agent]); 257 aer_error_layer[layer], aer_agent_string[agent]);
258
250 if (aer_severity != AER_CORRECTABLE) 259 if (aer_severity != AER_CORRECTABLE)
251 dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n", 260 dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n",
252 aer->uncor_severity); 261 aer->uncor_severity);
253 if (tlp_header_valid) { 262
254 const unsigned char *tlp; 263 if (tlp_header_valid)
255 tlp = (const unsigned char *)&aer->header_log; 264 __print_tlp_header(dev, &aer->header_log);
256 dev_err(&dev->dev, "aer_tlp_header:" 265
257 " %02x%02x%02x%02x %02x%02x%02x%02x"
258 " %02x%02x%02x%02x %02x%02x%02x%02x\n",
259 *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
260 *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
261 *(tlp + 11), *(tlp + 10), *(tlp + 9),
262 *(tlp + 8), *(tlp + 15), *(tlp + 14),
263 *(tlp + 13), *(tlp + 12));
264 }
265 trace_aer_event(dev_name(&dev->dev), (status & ~mask), 266 trace_aer_event(dev_name(&dev->dev), (status & ~mask),
266 aer_severity); 267 aer_severity);
267} 268}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 0b6e76604068..986f8eadfd39 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -79,9 +79,10 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
79 u16 reg16; 79 u16 reg16;
80 u32 reg32; 80 u32 reg32;
81 81
82 nr_entries = pci_msix_table_size(dev); 82 nr_entries = pci_msix_vec_count(dev);
83 if (!nr_entries) 83 if (nr_entries < 0)
84 return -EINVAL; 84 return nr_entries;
85 BUG_ON(!nr_entries);
85 if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES) 86 if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES)
86 nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES; 87 nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES;
87 88
@@ -344,11 +345,12 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
344 device_enable_async_suspend(device); 345 device_enable_async_suspend(device);
345 346
346 retval = device_register(device); 347 retval = device_register(device);
347 if (retval) 348 if (retval) {
348 kfree(pcie); 349 put_device(device);
349 else 350 return retval;
350 get_device(device); 351 }
351 return retval; 352
353 return 0;
352} 354}
353 355
354/** 356/**
@@ -454,10 +456,8 @@ int pcie_port_device_resume(struct device *dev)
454 456
455static int remove_iter(struct device *dev, void *data) 457static int remove_iter(struct device *dev, void *data)
456{ 458{
457 if (dev->bus == &pcie_port_bus_type) { 459 if (dev->bus == &pcie_port_bus_type)
458 put_device(dev);
459 device_unregister(dev); 460 device_unregister(dev);
460 }
461 return 0; 461 return 0;
462} 462}
463 463
@@ -498,12 +498,12 @@ static int pcie_port_probe_service(struct device *dev)
498 498
499 pciedev = to_pcie_device(dev); 499 pciedev = to_pcie_device(dev);
500 status = driver->probe(pciedev); 500 status = driver->probe(pciedev);
501 if (!status) { 501 if (status)
502 dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", 502 return status;
503 driver->name); 503
504 get_device(dev); 504 dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", driver->name);
505 } 505 get_device(dev);
506 return status; 506 return 0;
507} 507}
508 508
509/** 509/**
@@ -554,7 +554,7 @@ int pcie_port_service_register(struct pcie_port_service_driver *new)
554 if (pcie_ports_disabled) 554 if (pcie_ports_disabled)
555 return -ENODEV; 555 return -ENODEV;
556 556
557 new->driver.name = (char *)new->name; 557 new->driver.name = new->name;
558 new->driver.bus = &pcie_port_bus_type; 558 new->driver.bus = &pcie_port_bus_type;
559 new->driver.probe = pcie_port_probe_service; 559 new->driver.probe = pcie_port_probe_service;
560 new->driver.remove = pcie_port_remove_service; 560 new->driver.remove = pcie_port_remove_service;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index f049e3f53fcc..23cdfac0bdb3 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1154,6 +1154,18 @@ static void pci_release_capabilities(struct pci_dev *dev)
1154 pci_free_cap_save_buffers(dev); 1154 pci_free_cap_save_buffers(dev);
1155} 1155}
1156 1156
1157static void pci_free_resources(struct pci_dev *dev)
1158{
1159 int i;
1160
1161 pci_cleanup_rom(dev);
1162 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1163 struct resource *res = dev->resource + i;
1164 if (res->parent)
1165 release_resource(res);
1166 }
1167}
1168
1157/** 1169/**
1158 * pci_release_dev - free a pci device structure when all users of it are finished. 1170 * pci_release_dev - free a pci device structure when all users of it are finished.
1159 * @dev: device that's been disconnected 1171 * @dev: device that's been disconnected
@@ -1163,9 +1175,14 @@ static void pci_release_capabilities(struct pci_dev *dev)
1163 */ 1175 */
1164static void pci_release_dev(struct device *dev) 1176static void pci_release_dev(struct device *dev)
1165{ 1177{
1166 struct pci_dev *pci_dev; 1178 struct pci_dev *pci_dev = to_pci_dev(dev);
1179
1180 down_write(&pci_bus_sem);
1181 list_del(&pci_dev->bus_list);
1182 up_write(&pci_bus_sem);
1183
1184 pci_free_resources(pci_dev);
1167 1185
1168 pci_dev = to_pci_dev(dev);
1169 pci_release_capabilities(pci_dev); 1186 pci_release_capabilities(pci_dev);
1170 pci_release_of_node(pci_dev); 1187 pci_release_of_node(pci_dev);
1171 pcibios_release_device(pci_dev); 1188 pcibios_release_device(pci_dev);
@@ -1381,8 +1398,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1381 dev->match_driver = false; 1398 dev->match_driver = false;
1382 ret = device_add(&dev->dev); 1399 ret = device_add(&dev->dev);
1383 WARN_ON(ret < 0); 1400 WARN_ON(ret < 0);
1384
1385 pci_proc_attach_device(dev);
1386} 1401}
1387 1402
1388struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) 1403struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 1576851028db..f452148e6d55 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -3,20 +3,6 @@
3#include <linux/pci-aspm.h> 3#include <linux/pci-aspm.h>
4#include "pci.h" 4#include "pci.h"
5 5
6static void pci_free_resources(struct pci_dev *dev)
7{
8 int i;
9
10 msi_remove_pci_irq_vectors(dev);
11
12 pci_cleanup_rom(dev);
13 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
14 struct resource *res = dev->resource + i;
15 if (res->parent)
16 release_resource(res);
17 }
18}
19
20static void pci_stop_dev(struct pci_dev *dev) 6static void pci_stop_dev(struct pci_dev *dev)
21{ 7{
22 pci_pme_active(dev, false); 8 pci_pme_active(dev, false);
@@ -24,7 +10,7 @@ static void pci_stop_dev(struct pci_dev *dev)
24 if (dev->is_added) { 10 if (dev->is_added) {
25 pci_proc_detach_device(dev); 11 pci_proc_detach_device(dev);
26 pci_remove_sysfs_dev_files(dev); 12 pci_remove_sysfs_dev_files(dev);
27 device_del(&dev->dev); 13 device_release_driver(&dev->dev);
28 dev->is_added = 0; 14 dev->is_added = 0;
29 } 15 }
30 16
@@ -34,11 +20,8 @@ static void pci_stop_dev(struct pci_dev *dev)
34 20
35static void pci_destroy_dev(struct pci_dev *dev) 21static void pci_destroy_dev(struct pci_dev *dev)
36{ 22{
37 down_write(&pci_bus_sem); 23 device_del(&dev->dev);
38 list_del(&dev->bus_list);
39 up_write(&pci_bus_sem);
40 24
41 pci_free_resources(dev);
42 put_device(&dev->dev); 25 put_device(&dev->dev);
43} 26}
44 27
@@ -126,7 +109,7 @@ void pci_stop_root_bus(struct pci_bus *bus)
126 pci_stop_bus_device(child); 109 pci_stop_bus_device(child);
127 110
128 /* stop the host bridge */ 111 /* stop the host bridge */
129 device_del(&host_bridge->dev); 112 device_release_driver(&host_bridge->dev);
130} 113}
131 114
132void pci_remove_root_bus(struct pci_bus *bus) 115void pci_remove_root_bus(struct pci_bus *bus)
@@ -145,5 +128,5 @@ void pci_remove_root_bus(struct pci_bus *bus)
145 host_bridge->bus = NULL; 128 host_bridge->bus = NULL;
146 129
147 /* remove the host bridge */ 130 /* remove the host bridge */
148 put_device(&host_bridge->dev); 131 device_unregister(&host_bridge->dev);
149} 132}
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 79339822d80e..138bdd6393be 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -538,7 +538,8 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
538 struct pci_bus_region region; 538 struct pci_bus_region region;
539 unsigned long io_mask; 539 unsigned long io_mask;
540 u8 io_base_lo, io_limit_lo; 540 u8 io_base_lo, io_limit_lo;
541 u32 l, io_upper16; 541 u16 l;
542 u32 io_upper16;
542 543
543 io_mask = PCI_IO_RANGE_MASK; 544 io_mask = PCI_IO_RANGE_MASK;
544 if (bridge->io_window_1k) 545 if (bridge->io_window_1k)
@@ -548,11 +549,10 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
548 res = bus->resource[0]; 549 res = bus->resource[0];
549 pcibios_resource_to_bus(bridge->bus, &region, res); 550 pcibios_resource_to_bus(bridge->bus, &region, res);
550 if (res->flags & IORESOURCE_IO) { 551 if (res->flags & IORESOURCE_IO) {
551 pci_read_config_dword(bridge, PCI_IO_BASE, &l); 552 pci_read_config_word(bridge, PCI_IO_BASE, &l);
552 l &= 0xffff0000;
553 io_base_lo = (region.start >> 8) & io_mask; 553 io_base_lo = (region.start >> 8) & io_mask;
554 io_limit_lo = (region.end >> 8) & io_mask; 554 io_limit_lo = (region.end >> 8) & io_mask;
555 l |= ((u32) io_limit_lo << 8) | io_base_lo; 555 l = ((u16) io_limit_lo << 8) | io_base_lo;
556 /* Set up upper 16 bits of I/O base/limit. */ 556 /* Set up upper 16 bits of I/O base/limit. */
557 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); 557 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
558 dev_info(&bridge->dev, " bridge window %pR\n", res); 558 dev_info(&bridge->dev, " bridge window %pR\n", res);
@@ -564,7 +564,7 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
564 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ 564 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */
565 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); 565 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
566 /* Update lower 16 bits of I/O base/limit. */ 566 /* Update lower 16 bits of I/O base/limit. */
567 pci_write_config_dword(bridge, PCI_IO_BASE, l); 567 pci_write_config_word(bridge, PCI_IO_BASE, l);
568 /* Update upper 16 bits of I/O base/limit. */ 568 /* Update upper 16 bits of I/O base/limit. */
569 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); 569 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
570} 570}
@@ -665,21 +665,23 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
665 665
666 pci_read_config_word(bridge, PCI_IO_BASE, &io); 666 pci_read_config_word(bridge, PCI_IO_BASE, &io);
667 if (!io) { 667 if (!io) {
668 pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0); 668 pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);
669 pci_read_config_word(bridge, PCI_IO_BASE, &io); 669 pci_read_config_word(bridge, PCI_IO_BASE, &io);
670 pci_write_config_word(bridge, PCI_IO_BASE, 0x0); 670 pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
671 } 671 }
672 if (io) 672 if (io)
673 b_res[0].flags |= IORESOURCE_IO; 673 b_res[0].flags |= IORESOURCE_IO;
674
674 /* DECchip 21050 pass 2 errata: the bridge may miss an address 675 /* DECchip 21050 pass 2 errata: the bridge may miss an address
675 disconnect boundary by one PCI data phase. 676 disconnect boundary by one PCI data phase.
676 Workaround: do not use prefetching on this device. */ 677 Workaround: do not use prefetching on this device. */
677 if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) 678 if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
678 return; 679 return;
680
679 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); 681 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
680 if (!pmem) { 682 if (!pmem) {
681 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 683 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
682 0xfff0fff0); 684 0xffe0fff0);
683 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); 685 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
684 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); 686 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
685 } 687 }
diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c
new file mode 100644
index 000000000000..7e1304d2e389
--- /dev/null
+++ b/drivers/pci/vc.c
@@ -0,0 +1,434 @@
1/*
2 * PCI Virtual Channel support
3 *
4 * Copyright (C) 2013 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/device.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/pci.h>
16#include <linux/pci_regs.h>
17#include <linux/types.h>
18
19/**
20 * pci_vc_save_restore_dwords - Save or restore a series of dwords
21 * @dev: device
22 * @pos: starting config space position
23 * @buf: buffer to save to or restore from
24 * @dwords: number of dwords to save/restore
25 * @save: whether to save or restore
26 */
27static void pci_vc_save_restore_dwords(struct pci_dev *dev, int pos,
28 u32 *buf, int dwords, bool save)
29{
30 int i;
31
32 for (i = 0; i < dwords; i++, buf++) {
33 if (save)
34 pci_read_config_dword(dev, pos + (i * 4), buf);
35 else
36 pci_write_config_dword(dev, pos + (i * 4), *buf);
37 }
38}
39
40/**
41 * pci_vc_load_arb_table - load and wait for VC arbitration table
42 * @dev: device
43 * @pos: starting position of VC capability (VC/VC9/MFVC)
44 *
45 * Set Load VC Arbitration Table bit requesting hardware to apply the VC
46 * Arbitration Table (previously loaded). When the VC Arbitration Table
47 * Status clears, hardware has latched the table into VC arbitration logic.
48 */
49static void pci_vc_load_arb_table(struct pci_dev *dev, int pos)
50{
51 u16 ctrl;
52
53 pci_read_config_word(dev, pos + PCI_VC_PORT_CTRL, &ctrl);
54 pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
55 ctrl | PCI_VC_PORT_CTRL_LOAD_TABLE);
56 if (pci_wait_for_pending(dev, pos + PCI_VC_PORT_STATUS,
57 PCI_VC_PORT_STATUS_TABLE))
58 return;
59
60 dev_err(&dev->dev, "VC arbitration table failed to load\n");
61}
62
63/**
64 * pci_vc_load_port_arb_table - Load and wait for VC port arbitration table
65 * @dev: device
66 * @pos: starting position of VC capability (VC/VC9/MFVC)
67 * @res: VC resource number, ie. VCn (0-7)
68 *
69 * Set Load Port Arbitration Table bit requesting hardware to apply the Port
70 * Arbitration Table (previously loaded). When the Port Arbitration Table
71 * Status clears, hardware has latched the table into port arbitration logic.
72 */
73static void pci_vc_load_port_arb_table(struct pci_dev *dev, int pos, int res)
74{
75 int ctrl_pos, status_pos;
76 u32 ctrl;
77
78 ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF);
79 status_pos = pos + PCI_VC_RES_STATUS + (res * PCI_CAP_VC_PER_VC_SIZEOF);
80
81 pci_read_config_dword(dev, ctrl_pos, &ctrl);
82 pci_write_config_dword(dev, ctrl_pos,
83 ctrl | PCI_VC_RES_CTRL_LOAD_TABLE);
84
85 if (pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_TABLE))
86 return;
87
88 dev_err(&dev->dev, "VC%d port arbitration table failed to load\n", res);
89}
90
91/**
92 * pci_vc_enable - Enable virtual channel
93 * @dev: device
94 * @pos: starting position of VC capability (VC/VC9/MFVC)
95 * @res: VC res number, ie. VCn (0-7)
96 *
97 * A VC is enabled by setting the enable bit in matching resource control
98 * registers on both sides of a link. We therefore need to find the opposite
99 * end of the link. To keep this simple we enable from the downstream device.
100 * RC devices do not have an upstream device, nor does it seem that VC9 do
101 * (spec is unclear). Once we find the upstream device, match the VC ID to
102 * get the correct resource, disable and enable on both ends.
103 */
104static void pci_vc_enable(struct pci_dev *dev, int pos, int res)
105{
106 int ctrl_pos, status_pos, id, pos2, evcc, i, ctrl_pos2, status_pos2;
107 u32 ctrl, header, cap1, ctrl2;
108 struct pci_dev *link = NULL;
109
110 /* Enable VCs from the downstream device */
111 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
112 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
113 return;
114
115 ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF);
116 status_pos = pos + PCI_VC_RES_STATUS + (res * PCI_CAP_VC_PER_VC_SIZEOF);
117
118 pci_read_config_dword(dev, ctrl_pos, &ctrl);
119 id = ctrl & PCI_VC_RES_CTRL_ID;
120
121 pci_read_config_dword(dev, pos, &header);
122
123 /* If there is no opposite end of the link, skip to enable */
124 if (PCI_EXT_CAP_ID(header) == PCI_EXT_CAP_ID_VC9 ||
125 pci_is_root_bus(dev->bus))
126 goto enable;
127
128 pos2 = pci_find_ext_capability(dev->bus->self, PCI_EXT_CAP_ID_VC);
129 if (!pos2)
130 goto enable;
131
132 pci_read_config_dword(dev->bus->self, pos2 + PCI_VC_PORT_CAP1, &cap1);
133 evcc = cap1 & PCI_VC_CAP1_EVCC;
134
135 /* VC0 is hardwired enabled, so we can start with 1 */
136 for (i = 1; i < evcc + 1; i++) {
137 ctrl_pos2 = pos2 + PCI_VC_RES_CTRL +
138 (i * PCI_CAP_VC_PER_VC_SIZEOF);
139 status_pos2 = pos2 + PCI_VC_RES_STATUS +
140 (i * PCI_CAP_VC_PER_VC_SIZEOF);
141 pci_read_config_dword(dev->bus->self, ctrl_pos2, &ctrl2);
142 if ((ctrl2 & PCI_VC_RES_CTRL_ID) == id) {
143 link = dev->bus->self;
144 break;
145 }
146 }
147
148 if (!link)
149 goto enable;
150
151 /* Disable if enabled */
152 if (ctrl2 & PCI_VC_RES_CTRL_ENABLE) {
153 ctrl2 &= ~PCI_VC_RES_CTRL_ENABLE;
154 pci_write_config_dword(link, ctrl_pos2, ctrl2);
155 }
156
157 /* Enable on both ends */
158 ctrl2 |= PCI_VC_RES_CTRL_ENABLE;
159 pci_write_config_dword(link, ctrl_pos2, ctrl2);
160enable:
161 ctrl |= PCI_VC_RES_CTRL_ENABLE;
162 pci_write_config_dword(dev, ctrl_pos, ctrl);
163
164 if (!pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_NEGO))
165 dev_err(&dev->dev, "VC%d negotiation stuck pending\n", id);
166
167 if (link && !pci_wait_for_pending(link, status_pos2,
168 PCI_VC_RES_STATUS_NEGO))
169 dev_err(&link->dev, "VC%d negotiation stuck pending\n", id);
170}
171
172/**
173 * pci_vc_do_save_buffer - Size, save, or restore VC state
174 * @dev: device
175 * @pos: starting position of VC capability (VC/VC9/MFVC)
176 * @save_state: buffer for save/restore
177 * @name: for error message
178 * @save: if provided a buffer, this indicates what to do with it
179 *
180 * Walking Virtual Channel config space to size, save, or restore it
181 * is complicated, so we do it all from one function to reduce code and
182 * guarantee ordering matches in the buffer. When called with NULL
183 * @save_state, return the size of the necessary save buffer. When called
184 * with a non-NULL @save_state, @save determines whether we save to the
185 * buffer or restore from it.
186 */
187static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
188 struct pci_cap_saved_state *save_state,
189 bool save)
190{
191 u32 cap1;
192 char evcc, lpevcc, parb_size;
193 int i, len = 0;
194 u8 *buf = save_state ? (u8 *)save_state->cap.data : NULL;
195
196 /* Sanity check buffer size for save/restore */
197 if (buf && save_state->cap.size !=
198 pci_vc_do_save_buffer(dev, pos, NULL, save)) {
199 dev_err(&dev->dev,
200 "VC save buffer size does not match @0x%x\n", pos);
201 return -ENOMEM;
202 }
203
204 pci_read_config_dword(dev, pos + PCI_VC_PORT_CAP1, &cap1);
205 /* Extended VC Count (not counting VC0) */
206 evcc = cap1 & PCI_VC_CAP1_EVCC;
207 /* Low Priority Extended VC Count (not counting VC0) */
208 lpevcc = (cap1 & PCI_VC_CAP1_LPEVCC) >> 4;
209 /* Port Arbitration Table Entry Size (bits) */
210 parb_size = 1 << ((cap1 & PCI_VC_CAP1_ARB_SIZE) >> 10);
211
212 /*
213 * Port VC Control Register contains VC Arbitration Select, which
214 * cannot be modified when more than one LPVC is in operation. We
215 * therefore save/restore it first, as only VC0 should be enabled
216 * after device reset.
217 */
218 if (buf) {
219 if (save)
220 pci_read_config_word(dev, pos + PCI_VC_PORT_CTRL,
221 (u16 *)buf);
222 else
223 pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
224 *(u16 *)buf);
225 buf += 2;
226 }
227 len += 2;
228
229 /*
230 * If we have any Low Priority VCs and a VC Arbitration Table Offset
231 * in Port VC Capability Register 2 then save/restore it next.
232 */
233 if (lpevcc) {
234 u32 cap2;
235 int vcarb_offset;
236
237 pci_read_config_dword(dev, pos + PCI_VC_PORT_CAP2, &cap2);
238 vcarb_offset = ((cap2 & PCI_VC_CAP2_ARB_OFF) >> 24) * 16;
239
240 if (vcarb_offset) {
241 int size, vcarb_phases = 0;
242
243 if (cap2 & PCI_VC_CAP2_128_PHASE)
244 vcarb_phases = 128;
245 else if (cap2 & PCI_VC_CAP2_64_PHASE)
246 vcarb_phases = 64;
247 else if (cap2 & PCI_VC_CAP2_32_PHASE)
248 vcarb_phases = 32;
249
250 /* Fixed 4 bits per phase per lpevcc (plus VC0) */
251 size = ((lpevcc + 1) * vcarb_phases * 4) / 8;
252
253 if (size && buf) {
254 pci_vc_save_restore_dwords(dev,
255 pos + vcarb_offset,
256 (u32 *)buf,
257 size / 4, save);
258 /*
259 * On restore, we need to signal hardware to
260 * re-load the VC Arbitration Table.
261 */
262 if (!save)
263 pci_vc_load_arb_table(dev, pos);
264
265 buf += size;
266 }
267 len += size;
268 }
269 }
270
271 /*
272 * In addition to each VC Resource Control Register, we may have a
273 * Port Arbitration Table attached to each VC. The Port Arbitration
274 * Table Offset in each VC Resource Capability Register tells us if
275 * it exists. The entry size is global from the Port VC Capability
276 * Register1 above. The number of phases is determined per VC.
277 */
278 for (i = 0; i < evcc + 1; i++) {
279 u32 cap;
280 int parb_offset;
281
282 pci_read_config_dword(dev, pos + PCI_VC_RES_CAP +
283 (i * PCI_CAP_VC_PER_VC_SIZEOF), &cap);
284 parb_offset = ((cap & PCI_VC_RES_CAP_ARB_OFF) >> 24) * 16;
285 if (parb_offset) {
286 int size, parb_phases = 0;
287
288 if (cap & PCI_VC_RES_CAP_256_PHASE)
289 parb_phases = 256;
290 else if (cap & (PCI_VC_RES_CAP_128_PHASE |
291 PCI_VC_RES_CAP_128_PHASE_TB))
292 parb_phases = 128;
293 else if (cap & PCI_VC_RES_CAP_64_PHASE)
294 parb_phases = 64;
295 else if (cap & PCI_VC_RES_CAP_32_PHASE)
296 parb_phases = 32;
297
298 size = (parb_size * parb_phases) / 8;
299
300 if (size && buf) {
301 pci_vc_save_restore_dwords(dev,
302 pos + parb_offset,
303 (u32 *)buf,
304 size / 4, save);
305 buf += size;
306 }
307 len += size;
308 }
309
310 /* VC Resource Control Register */
311 if (buf) {
312 int ctrl_pos = pos + PCI_VC_RES_CTRL +
313 (i * PCI_CAP_VC_PER_VC_SIZEOF);
314 if (save)
315 pci_read_config_dword(dev, ctrl_pos,
316 (u32 *)buf);
317 else {
318 u32 tmp, ctrl = *(u32 *)buf;
319 /*
320 * For an FLR case, the VC config may remain.
321 * Preserve enable bit, restore the rest.
322 */
323 pci_read_config_dword(dev, ctrl_pos, &tmp);
324 tmp &= PCI_VC_RES_CTRL_ENABLE;
325 tmp |= ctrl & ~PCI_VC_RES_CTRL_ENABLE;
326 pci_write_config_dword(dev, ctrl_pos, tmp);
327 /* Load port arbitration table if used */
328 if (ctrl & PCI_VC_RES_CTRL_ARB_SELECT)
329 pci_vc_load_port_arb_table(dev, pos, i);
330 /* Re-enable if needed */
331 if ((ctrl ^ tmp) & PCI_VC_RES_CTRL_ENABLE)
332 pci_vc_enable(dev, pos, i);
333 }
334 buf += 4;
335 }
336 len += 4;
337 }
338
339 return buf ? 0 : len;
340}
341
342static struct {
343 u16 id;
344 const char *name;
345} vc_caps[] = { { PCI_EXT_CAP_ID_MFVC, "MFVC" },
346 { PCI_EXT_CAP_ID_VC, "VC" },
347 { PCI_EXT_CAP_ID_VC9, "VC9" } };
348
349/**
350 * pci_save_vc_state - Save VC state to pre-allocate save buffer
351 * @dev: device
352 *
353 * For each type of VC capability, VC/VC9/MFVC, find the capability and
354 * save it to the pre-allocated save buffer.
355 */
356int pci_save_vc_state(struct pci_dev *dev)
357{
358 int i;
359
360 for (i = 0; i < ARRAY_SIZE(vc_caps); i++) {
361 int pos, ret;
362 struct pci_cap_saved_state *save_state;
363
364 pos = pci_find_ext_capability(dev, vc_caps[i].id);
365 if (!pos)
366 continue;
367
368 save_state = pci_find_saved_ext_cap(dev, vc_caps[i].id);
369 if (!save_state) {
370 dev_err(&dev->dev, "%s buffer not found in %s\n",
371 vc_caps[i].name, __func__);
372 return -ENOMEM;
373 }
374
375 ret = pci_vc_do_save_buffer(dev, pos, save_state, true);
376 if (ret) {
377 dev_err(&dev->dev, "%s save unsuccessful %s\n",
378 vc_caps[i].name, __func__);
379 return ret;
380 }
381 }
382
383 return 0;
384}
385
386/**
387 * pci_restore_vc_state - Restore VC state from save buffer
388 * @dev: device
389 *
390 * For each type of VC capability, VC/VC9/MFVC, find the capability and
391 * restore it from the previously saved buffer.
392 */
393void pci_restore_vc_state(struct pci_dev *dev)
394{
395 int i;
396
397 for (i = 0; i < ARRAY_SIZE(vc_caps); i++) {
398 int pos;
399 struct pci_cap_saved_state *save_state;
400
401 pos = pci_find_ext_capability(dev, vc_caps[i].id);
402 save_state = pci_find_saved_ext_cap(dev, vc_caps[i].id);
403 if (!save_state || !pos)
404 continue;
405
406 pci_vc_do_save_buffer(dev, pos, save_state, false);
407 }
408}
409
410/**
411 * pci_allocate_vc_save_buffers - Allocate save buffers for VC caps
412 * @dev: device
413 *
414 * For each type of VC capability, VC/VC9/MFVC, find the capability, size
415 * it, and allocate a buffer for save/restore.
416 */
417
418void pci_allocate_vc_save_buffers(struct pci_dev *dev)
419{
420 int i;
421
422 for (i = 0; i < ARRAY_SIZE(vc_caps); i++) {
423 int len, pos = pci_find_ext_capability(dev, vc_caps[i].id);
424
425 if (!pos)
426 continue;
427
428 len = pci_vc_do_save_buffer(dev, pos, NULL, false);
429 if (pci_add_ext_cap_save_buffer(dev, vc_caps[i].id, len))
430 dev_err(&dev->dev,
431 "unable to preallocate %s save buffer\n",
432 vc_caps[i].name);
433 }
434}
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index ffd0632c3cbc..83cd1574c810 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -975,20 +975,20 @@ static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos)
975 int ret, evcc, phases, vc_arb; 975 int ret, evcc, phases, vc_arb;
976 int len = PCI_CAP_VC_BASE_SIZEOF; 976 int len = PCI_CAP_VC_BASE_SIZEOF;
977 977
978 ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_REG1, &tmp); 978 ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP1, &tmp);
979 if (ret) 979 if (ret)
980 return pcibios_err_to_errno(ret); 980 return pcibios_err_to_errno(ret);
981 981
982 evcc = tmp & PCI_VC_REG1_EVCC; /* extended vc count */ 982 evcc = tmp & PCI_VC_CAP1_EVCC; /* extended vc count */
983 ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_REG2, &tmp); 983 ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP2, &tmp);
984 if (ret) 984 if (ret)
985 return pcibios_err_to_errno(ret); 985 return pcibios_err_to_errno(ret);
986 986
987 if (tmp & PCI_VC_REG2_128_PHASE) 987 if (tmp & PCI_VC_CAP2_128_PHASE)
988 phases = 128; 988 phases = 128;
989 else if (tmp & PCI_VC_REG2_64_PHASE) 989 else if (tmp & PCI_VC_CAP2_64_PHASE)
990 phases = 64; 990 phases = 64;
991 else if (tmp & PCI_VC_REG2_32_PHASE) 991 else if (tmp & PCI_VC_CAP2_32_PHASE)
992 phases = 32; 992 phases = 32;
993 else 993 else
994 phases = 0; 994 phases = 0;
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index d78d28a733b1..5fd33dc1fe3a 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -198,6 +198,9 @@ extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
198extern size_t vmcoreinfo_size; 198extern size_t vmcoreinfo_size;
199extern size_t vmcoreinfo_max_size; 199extern size_t vmcoreinfo_max_size;
200 200
201/* flag to track if kexec reboot is in progress */
202extern bool kexec_in_progress;
203
201int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, 204int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
202 unsigned long long *crash_size, unsigned long long *crash_base); 205 unsigned long long *crash_size, unsigned long long *crash_base);
203int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, 206int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 009b02481436..92a2f991262a 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -60,10 +60,10 @@ void arch_teardown_msi_irq(unsigned int irq);
60int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); 60int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
61void arch_teardown_msi_irqs(struct pci_dev *dev); 61void arch_teardown_msi_irqs(struct pci_dev *dev);
62int arch_msi_check_device(struct pci_dev* dev, int nvec, int type); 62int arch_msi_check_device(struct pci_dev* dev, int nvec, int type);
63void arch_restore_msi_irqs(struct pci_dev *dev, int irq); 63void arch_restore_msi_irqs(struct pci_dev *dev);
64 64
65void default_teardown_msi_irqs(struct pci_dev *dev); 65void default_teardown_msi_irqs(struct pci_dev *dev);
66void default_restore_msi_irqs(struct pci_dev *dev, int irq); 66void default_restore_msi_irqs(struct pci_dev *dev);
67u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); 67u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
68u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag); 68u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag);
69 69
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 095eb44fcbb6..f7d1dcc002fa 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -224,7 +224,8 @@ enum pci_bus_speed {
224}; 224};
225 225
226struct pci_cap_saved_data { 226struct pci_cap_saved_data {
227 char cap_nr; 227 u16 cap_nr;
228 bool cap_extended;
228 unsigned int size; 229 unsigned int size;
229 u32 data[0]; 230 u32 data[0];
230}; 231};
@@ -351,7 +352,7 @@ struct pci_dev {
351 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ 352 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
352#ifdef CONFIG_PCI_MSI 353#ifdef CONFIG_PCI_MSI
353 struct list_head msi_list; 354 struct list_head msi_list;
354 struct kset *msi_kset; 355 const struct attribute_group **msi_irq_groups;
355#endif 356#endif
356 struct pci_vpd *vpd; 357 struct pci_vpd *vpd;
357#ifdef CONFIG_PCI_ATS 358#ifdef CONFIG_PCI_ATS
@@ -634,8 +635,7 @@ struct pci_driver {
634 * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table 635 * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table
635 * @_table: device table name 636 * @_table: device table name
636 * 637 *
637 * This macro is used to create a struct pci_device_id array (a device table) 638 * This macro is deprecated and should not be used in new code.
638 * in a generic manner.
639 */ 639 */
640#define DEFINE_PCI_DEVICE_TABLE(_table) \ 640#define DEFINE_PCI_DEVICE_TABLE(_table) \
641 const struct pci_device_id _table[] 641 const struct pci_device_id _table[]
@@ -938,6 +938,7 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev);
938void pci_msi_off(struct pci_dev *dev); 938void pci_msi_off(struct pci_dev *dev);
939int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size); 939int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
940int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask); 940int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
941int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
941int pci_wait_for_pending_transaction(struct pci_dev *dev); 942int pci_wait_for_pending_transaction(struct pci_dev *dev);
942int pcix_get_max_mmrbc(struct pci_dev *dev); 943int pcix_get_max_mmrbc(struct pci_dev *dev);
943int pcix_get_mmrbc(struct pci_dev *dev); 944int pcix_get_mmrbc(struct pci_dev *dev);
@@ -976,6 +977,12 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
976int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state); 977int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state);
977int pci_load_and_free_saved_state(struct pci_dev *dev, 978int pci_load_and_free_saved_state(struct pci_dev *dev,
978 struct pci_saved_state **state); 979 struct pci_saved_state **state);
980struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
981struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
982 u16 cap);
983int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
984int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
985 u16 cap, unsigned int size);
979int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state); 986int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
980int pci_set_power_state(struct pci_dev *dev, pci_power_t state); 987int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
981pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); 988pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
@@ -997,6 +1004,11 @@ static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
997 return __pci_enable_wake(dev, state, false, enable); 1004 return __pci_enable_wake(dev, state, false, enable);
998} 1005}
999 1006
1007/* PCI Virtual Channel */
1008int pci_save_vc_state(struct pci_dev *dev);
1009void pci_restore_vc_state(struct pci_dev *dev);
1010void pci_allocate_vc_save_buffers(struct pci_dev *dev);
1011
1000#define PCI_EXP_IDO_REQUEST (1<<0) 1012#define PCI_EXP_IDO_REQUEST (1<<0)
1001#define PCI_EXP_IDO_COMPLETION (1<<1) 1013#define PCI_EXP_IDO_COMPLETION (1<<1)
1002void pci_enable_ido(struct pci_dev *dev, unsigned long type); 1014void pci_enable_ido(struct pci_dev *dev, unsigned long type);
@@ -1162,15 +1174,14 @@ struct msix_entry {
1162 1174
1163 1175
1164#ifndef CONFIG_PCI_MSI 1176#ifndef CONFIG_PCI_MSI
1165static inline int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec) 1177static inline int pci_msi_vec_count(struct pci_dev *dev)
1166{ 1178{
1167 return -1; 1179 return -ENOSYS;
1168} 1180}
1169 1181
1170static inline int 1182static inline int pci_enable_msi_block(struct pci_dev *dev, int nvec)
1171pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec)
1172{ 1183{
1173 return -1; 1184 return -ENOSYS;
1174} 1185}
1175 1186
1176static inline void pci_msi_shutdown(struct pci_dev *dev) 1187static inline void pci_msi_shutdown(struct pci_dev *dev)
@@ -1178,14 +1189,14 @@ static inline void pci_msi_shutdown(struct pci_dev *dev)
1178static inline void pci_disable_msi(struct pci_dev *dev) 1189static inline void pci_disable_msi(struct pci_dev *dev)
1179{ } 1190{ }
1180 1191
1181static inline int pci_msix_table_size(struct pci_dev *dev) 1192static inline int pci_msix_vec_count(struct pci_dev *dev)
1182{ 1193{
1183 return 0; 1194 return -ENOSYS;
1184} 1195}
1185static inline int pci_enable_msix(struct pci_dev *dev, 1196static inline int pci_enable_msix(struct pci_dev *dev,
1186 struct msix_entry *entries, int nvec) 1197 struct msix_entry *entries, int nvec)
1187{ 1198{
1188 return -1; 1199 return -ENOSYS;
1189} 1200}
1190 1201
1191static inline void pci_msix_shutdown(struct pci_dev *dev) 1202static inline void pci_msix_shutdown(struct pci_dev *dev)
@@ -1202,18 +1213,32 @@ static inline int pci_msi_enabled(void)
1202{ 1213{
1203 return 0; 1214 return 0;
1204} 1215}
1216
1217static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec,
1218 int maxvec)
1219{
1220 return -ENOSYS;
1221}
1222static inline int pci_enable_msix_range(struct pci_dev *dev,
1223 struct msix_entry *entries, int minvec, int maxvec)
1224{
1225 return -ENOSYS;
1226}
1205#else 1227#else
1206int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec); 1228int pci_msi_vec_count(struct pci_dev *dev);
1207int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec); 1229int pci_enable_msi_block(struct pci_dev *dev, int nvec);
1208void pci_msi_shutdown(struct pci_dev *dev); 1230void pci_msi_shutdown(struct pci_dev *dev);
1209void pci_disable_msi(struct pci_dev *dev); 1231void pci_disable_msi(struct pci_dev *dev);
1210int pci_msix_table_size(struct pci_dev *dev); 1232int pci_msix_vec_count(struct pci_dev *dev);
1211int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec); 1233int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec);
1212void pci_msix_shutdown(struct pci_dev *dev); 1234void pci_msix_shutdown(struct pci_dev *dev);
1213void pci_disable_msix(struct pci_dev *dev); 1235void pci_disable_msix(struct pci_dev *dev);
1214void msi_remove_pci_irq_vectors(struct pci_dev *dev); 1236void msi_remove_pci_irq_vectors(struct pci_dev *dev);
1215void pci_restore_msi_state(struct pci_dev *dev); 1237void pci_restore_msi_state(struct pci_dev *dev);
1216int pci_msi_enabled(void); 1238int pci_msi_enabled(void);
1239int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec);
1240int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1241 int minvec, int maxvec);
1217#endif 1242#endif
1218 1243
1219#ifdef CONFIG_PCIEPORTBUS 1244#ifdef CONFIG_PCIEPORTBUS
@@ -1571,65 +1596,65 @@ enum pci_fixup_pass {
1571/* Anonymous variables would be nice... */ 1596/* Anonymous variables would be nice... */
1572#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ 1597#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
1573 class_shift, hook) \ 1598 class_shift, hook) \
1574 static const struct pci_fixup __pci_fixup_##name __used \ 1599 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
1575 __attribute__((__section__(#section), aligned((sizeof(void *))))) \ 1600 __attribute__((__section__(#section), aligned((sizeof(void *))))) \
1576 = { vendor, device, class, class_shift, hook }; 1601 = { vendor, device, class, class_shift, hook };
1577 1602
1578#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ 1603#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
1579 class_shift, hook) \ 1604 class_shift, hook) \
1580 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 1605 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1581 vendor##device##hook, vendor, device, class, class_shift, hook) 1606 hook, vendor, device, class, class_shift, hook)
1582#define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \ 1607#define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
1583 class_shift, hook) \ 1608 class_shift, hook) \
1584 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ 1609 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1585 vendor##device##hook, vendor, device, class, class_shift, hook) 1610 hook, vendor, device, class, class_shift, hook)
1586#define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \ 1611#define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
1587 class_shift, hook) \ 1612 class_shift, hook) \
1588 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ 1613 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1589 vendor##device##hook, vendor, device, class, class_shift, hook) 1614 hook, vendor, device, class, class_shift, hook)
1590#define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \ 1615#define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
1591 class_shift, hook) \ 1616 class_shift, hook) \
1592 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ 1617 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1593 vendor##device##hook, vendor, device, class, class_shift, hook) 1618 hook, vendor, device, class, class_shift, hook)
1594#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ 1619#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
1595 class_shift, hook) \ 1620 class_shift, hook) \
1596 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 1621 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1597 resume##vendor##device##hook, vendor, device, class, \ 1622 resume##hook, vendor, device, class, \
1598 class_shift, hook) 1623 class_shift, hook)
1599#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ 1624#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
1600 class_shift, hook) \ 1625 class_shift, hook) \
1601 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 1626 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1602 resume_early##vendor##device##hook, vendor, device, \ 1627 resume_early##hook, vendor, device, \
1603 class, class_shift, hook) 1628 class, class_shift, hook)
1604#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ 1629#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
1605 class_shift, hook) \ 1630 class_shift, hook) \
1606 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1631 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1607 suspend##vendor##device##hook, vendor, device, class, \ 1632 suspend##hook, vendor, device, class, \
1608 class_shift, hook) 1633 class_shift, hook)
1609 1634
1610#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ 1635#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
1611 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 1636 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1612 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1637 hook, vendor, device, PCI_ANY_ID, 0, hook)
1613#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ 1638#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
1614 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ 1639 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1615 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1640 hook, vendor, device, PCI_ANY_ID, 0, hook)
1616#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ 1641#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
1617 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ 1642 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1618 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1643 hook, vendor, device, PCI_ANY_ID, 0, hook)
1619#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ 1644#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
1620 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ 1645 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1621 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1646 hook, vendor, device, PCI_ANY_ID, 0, hook)
1622#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ 1647#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
1623 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 1648 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1624 resume##vendor##device##hook, vendor, device, \ 1649 resume##hook, vendor, device, \
1625 PCI_ANY_ID, 0, hook) 1650 PCI_ANY_ID, 0, hook)
1626#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ 1651#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
1627 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 1652 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1628 resume_early##vendor##device##hook, vendor, device, \ 1653 resume_early##hook, vendor, device, \
1629 PCI_ANY_ID, 0, hook) 1654 PCI_ANY_ID, 0, hook)
1630#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ 1655#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
1631 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1656 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1632 suspend##vendor##device##hook, vendor, device, \ 1657 suspend##hook, vendor, device, \
1633 PCI_ANY_ID, 0, hook) 1658 PCI_ANY_ID, 0, hook)
1634 1659
1635#ifdef CONFIG_PCI_QUIRKS 1660#ifdef CONFIG_PCI_QUIRKS
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 4a98e85438a7..ab6b4e7f6657 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -518,8 +518,16 @@
518#define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */ 518#define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */
519#define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */ 519#define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */
520#define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */ 520#define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */
521#define PCI_EXP_SLTCTL_ATTN_IND_ON 0x0040 /* Attention Indicator on */
522#define PCI_EXP_SLTCTL_ATTN_IND_BLINK 0x0080 /* Attention Indicator blinking */
523#define PCI_EXP_SLTCTL_ATTN_IND_OFF 0x00c0 /* Attention Indicator off */
521#define PCI_EXP_SLTCTL_PIC 0x0300 /* Power Indicator Control */ 524#define PCI_EXP_SLTCTL_PIC 0x0300 /* Power Indicator Control */
525#define PCI_EXP_SLTCTL_PWR_IND_ON 0x0100 /* Power Indicator on */
526#define PCI_EXP_SLTCTL_PWR_IND_BLINK 0x0200 /* Power Indicator blinking */
527#define PCI_EXP_SLTCTL_PWR_IND_OFF 0x0300 /* Power Indicator off */
522#define PCI_EXP_SLTCTL_PCC 0x0400 /* Power Controller Control */ 528#define PCI_EXP_SLTCTL_PCC 0x0400 /* Power Controller Control */
529#define PCI_EXP_SLTCTL_PWR_ON 0x0000 /* Power On */
530#define PCI_EXP_SLTCTL_PWR_OFF 0x0400 /* Power Off */
523#define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */ 531#define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */
524#define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */ 532#define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */
525#define PCI_EXP_SLTSTA 26 /* Slot Status */ 533#define PCI_EXP_SLTSTA 26 /* Slot Status */
@@ -677,17 +685,34 @@
677#define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */ 685#define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */
678 686
679/* Virtual Channel */ 687/* Virtual Channel */
680#define PCI_VC_PORT_REG1 4 688#define PCI_VC_PORT_CAP1 4
681#define PCI_VC_REG1_EVCC 0x7 /* extended VC count */ 689#define PCI_VC_CAP1_EVCC 0x00000007 /* extended VC count */
682#define PCI_VC_PORT_REG2 8 690#define PCI_VC_CAP1_LPEVCC 0x00000070 /* low prio extended VC count */
683#define PCI_VC_REG2_32_PHASE 0x2 691#define PCI_VC_CAP1_ARB_SIZE 0x00000c00
684#define PCI_VC_REG2_64_PHASE 0x4 692#define PCI_VC_PORT_CAP2 8
685#define PCI_VC_REG2_128_PHASE 0x8 693#define PCI_VC_CAP2_32_PHASE 0x00000002
694#define PCI_VC_CAP2_64_PHASE 0x00000004
695#define PCI_VC_CAP2_128_PHASE 0x00000008
696#define PCI_VC_CAP2_ARB_OFF 0xff000000
686#define PCI_VC_PORT_CTRL 12 697#define PCI_VC_PORT_CTRL 12
698#define PCI_VC_PORT_CTRL_LOAD_TABLE 0x00000001
687#define PCI_VC_PORT_STATUS 14 699#define PCI_VC_PORT_STATUS 14
700#define PCI_VC_PORT_STATUS_TABLE 0x00000001
688#define PCI_VC_RES_CAP 16 701#define PCI_VC_RES_CAP 16
702#define PCI_VC_RES_CAP_32_PHASE 0x00000002
703#define PCI_VC_RES_CAP_64_PHASE 0x00000004
704#define PCI_VC_RES_CAP_128_PHASE 0x00000008
705#define PCI_VC_RES_CAP_128_PHASE_TB 0x00000010
706#define PCI_VC_RES_CAP_256_PHASE 0x00000020
707#define PCI_VC_RES_CAP_ARB_OFF 0xff000000
689#define PCI_VC_RES_CTRL 20 708#define PCI_VC_RES_CTRL 20
709#define PCI_VC_RES_CTRL_LOAD_TABLE 0x00010000
710#define PCI_VC_RES_CTRL_ARB_SELECT 0x000e0000
711#define PCI_VC_RES_CTRL_ID 0x07000000
712#define PCI_VC_RES_CTRL_ENABLE 0x80000000
690#define PCI_VC_RES_STATUS 26 713#define PCI_VC_RES_STATUS 26
714#define PCI_VC_RES_STATUS_TABLE 0x00000001
715#define PCI_VC_RES_STATUS_NEGO 0x00000002
691#define PCI_CAP_VC_BASE_SIZEOF 0x10 716#define PCI_CAP_VC_BASE_SIZEOF 0x10
692#define PCI_CAP_VC_PER_VC_SIZEOF 0x0C 717#define PCI_CAP_VC_PER_VC_SIZEOF 0x0C
693 718
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 490afc03627e..d0d8fca54065 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -47,6 +47,9 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
47size_t vmcoreinfo_size; 47size_t vmcoreinfo_size;
48size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); 48size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
49 49
50/* Flag to indicate we are going to kexec a new kernel */
51bool kexec_in_progress = false;
52
50/* Location of the reserved area for the crash kernel */ 53/* Location of the reserved area for the crash kernel */
51struct resource crashk_res = { 54struct resource crashk_res = {
52 .name = "Crash kernel", 55 .name = "Crash kernel",
@@ -1675,6 +1678,7 @@ int kernel_kexec(void)
1675 } else 1678 } else
1676#endif 1679#endif
1677 { 1680 {
1681 kexec_in_progress = true;
1678 kernel_restart_prepare(NULL); 1682 kernel_restart_prepare(NULL);
1679 printk(KERN_EMERG "Starting new kernel\n"); 1683 printk(KERN_EMERG "Starting new kernel\n");
1680 machine_shutdown(); 1684 machine_shutdown();
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 987293d03ebc..5690b8eabfbc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2840,19 +2840,6 @@ already_gone:
2840 return false; 2840 return false;
2841} 2841}
2842 2842
2843static bool __flush_work(struct work_struct *work)
2844{
2845 struct wq_barrier barr;
2846
2847 if (start_flush_work(work, &barr)) {
2848 wait_for_completion(&barr.done);
2849 destroy_work_on_stack(&barr.work);
2850 return true;
2851 } else {
2852 return false;
2853 }
2854}
2855
2856/** 2843/**
2857 * flush_work - wait for a work to finish executing the last queueing instance 2844 * flush_work - wait for a work to finish executing the last queueing instance
2858 * @work: the work to flush 2845 * @work: the work to flush
@@ -2866,10 +2853,18 @@ static bool __flush_work(struct work_struct *work)
2866 */ 2853 */
2867bool flush_work(struct work_struct *work) 2854bool flush_work(struct work_struct *work)
2868{ 2855{
2856 struct wq_barrier barr;
2857
2869 lock_map_acquire(&work->lockdep_map); 2858 lock_map_acquire(&work->lockdep_map);
2870 lock_map_release(&work->lockdep_map); 2859 lock_map_release(&work->lockdep_map);
2871 2860
2872 return __flush_work(work); 2861 if (start_flush_work(work, &barr)) {
2862 wait_for_completion(&barr.done);
2863 destroy_work_on_stack(&barr.work);
2864 return true;
2865 } else {
2866 return false;
2867 }
2873} 2868}
2874EXPORT_SYMBOL_GPL(flush_work); 2869EXPORT_SYMBOL_GPL(flush_work);
2875 2870
@@ -4814,14 +4809,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4814 4809
4815 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 4810 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4816 schedule_work_on(cpu, &wfc.work); 4811 schedule_work_on(cpu, &wfc.work);
4817 4812 flush_work(&wfc.work);
4818 /*
4819 * The work item is on-stack and can't lead to deadlock through
4820 * flushing. Use __flush_work() to avoid spurious lockdep warnings
4821 * when work_on_cpu()s are nested.
4822 */
4823 __flush_work(&wfc.work);
4824
4825 return wfc.ret; 4813 return wfc.ret;
4826} 4814}
4827EXPORT_SYMBOL_GPL(work_on_cpu); 4815EXPORT_SYMBOL_GPL(work_on_cpu);
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 9c9810030377..9fb30b15c9dc 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2634,10 +2634,13 @@ sub process {
2634 $herecurr); 2634 $herecurr);
2635 } 2635 }
2636 2636
2637# check for declarations of struct pci_device_id 2637# check for uses of DEFINE_PCI_DEVICE_TABLE
2638 if ($line =~ /\bstruct\s+pci_device_id\s+\w+\s*\[\s*\]\s*\=\s*\{/) { 2638 if ($line =~ /\bDEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=/) {
2639 WARN("DEFINE_PCI_DEVICE_TABLE", 2639 if (WARN("DEFINE_PCI_DEVICE_TABLE",
2640 "Use DEFINE_PCI_DEVICE_TABLE for struct pci_device_id\n" . $herecurr); 2640 "Prefer struct pci_device_id over deprecated DEFINE_PCI_DEVICE_TABLE\n" . $herecurr) &&
2641 $fix) {
2642 $fixed[$linenr - 1] =~ s/\b(?:static\s+|)DEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=\s*/static const struct pci_device_id $1\[\] = /;
2643 }
2641 } 2644 }
2642 2645
2643# check for new typedefs, only function parameters and sparse annotations 2646# check for new typedefs, only function parameters and sparse annotations