diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-28 17:20:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-28 17:20:44 -0400 |
commit | 0e59e7e7feb5a12938fbf9135147eeda3238c6c4 (patch) | |
tree | dbe994369ca9cad6893f0fd710f75791bc84b816 /drivers/pci | |
parent | 46b51ea2099fa2082342e52b8284aa828429b80b (diff) | |
parent | a513a99a7cebfb452839cc09c9c0586f72d96414 (diff) |
Merge branch 'next-rebase' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci
* 'next-rebase' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci:
PCI: Clean-up MPS debug output
pci: Clamp pcie_set_readrq() when using "performance" settings
PCI: enable MPS "performance" setting to properly handle bridge MPS
PCI: Workaround for Intel MPS errata
PCI: Add support for PASID capability
PCI: Add implementation for PRI capability
PCI: Export ATS functions to modules
PCI: Move ATS implementation into own file
PCI / PM: Remove unnecessary error variable from acpi_dev_run_wake()
PCI hotplug: acpiphp: Prevent deadlock on PCI-to-PCI bridge remove
PCI / PM: Extend PME polling to all PCI devices
PCI quirk: mmc: Always check for lower base frequency quirk for Ricoh 1180:e823
PCI: Make pci_setup_bridge() non-static for use by arch code
x86: constify PCI raw ops structures
PCI: Add quirk for known incorrect MPSS
PCI: Add Solarflare vendor ID and SFC4000 device IDs
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/Kconfig | 26 | ||||
-rw-r--r-- | drivers/pci/Makefile | 1 | ||||
-rw-r--r-- | drivers/pci/ats.c | 438 | ||||
-rw-r--r-- | drivers/pci/hotplug/acpiphp_glue.c | 109 | ||||
-rw-r--r-- | drivers/pci/iov.c | 142 | ||||
-rw-r--r-- | drivers/pci/pci-acpi.c | 6 | ||||
-rw-r--r-- | drivers/pci/pci.c | 59 | ||||
-rw-r--r-- | drivers/pci/pcie/pme.c | 9 | ||||
-rw-r--r-- | drivers/pci/probe.c | 68 | ||||
-rw-r--r-- | drivers/pci/quirks.c | 111 | ||||
-rw-r--r-- | drivers/pci/setup-bus.c | 2 |
11 files changed, 737 insertions, 234 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 0fa466a91bf4..cec66064ee4b 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
@@ -71,9 +71,13 @@ config HT_IRQ | |||
71 | 71 | ||
72 | If unsure say Y. | 72 | If unsure say Y. |
73 | 73 | ||
74 | config PCI_ATS | ||
75 | bool | ||
76 | |||
74 | config PCI_IOV | 77 | config PCI_IOV |
75 | bool "PCI IOV support" | 78 | bool "PCI IOV support" |
76 | depends on PCI | 79 | depends on PCI |
80 | select PCI_ATS | ||
77 | help | 81 | help |
78 | I/O Virtualization is a PCI feature supported by some devices | 82 | I/O Virtualization is a PCI feature supported by some devices |
79 | which allows them to create virtual devices which share their | 83 | which allows them to create virtual devices which share their |
@@ -81,6 +85,28 @@ config PCI_IOV | |||
81 | 85 | ||
82 | If unsure, say N. | 86 | If unsure, say N. |
83 | 87 | ||
88 | config PCI_PRI | ||
89 | bool "PCI PRI support" | ||
90 | select PCI_ATS | ||
91 | help | ||
92 | PRI is the PCI Page Request Interface. It allows PCI devices that are | ||
93 | behind an IOMMU to recover from page faults. | ||
94 | |||
95 | If unsure, say N. | ||
96 | |||
97 | config PCI_PASID | ||
98 | bool "PCI PASID support" | ||
99 | depends on PCI | ||
100 | select PCI_ATS | ||
101 | help | ||
102 | Process Address Space Identifiers (PASIDs) can be used by PCI devices | ||
103 | to access more than one IO address space at the same time. To make | ||
104 | use of this feature an IOMMU is required which also supports PASIDs. | ||
105 | Select this option if you have such an IOMMU and want to compile the | ||
106 | driver for it into your kernel. | ||
107 | |||
108 | If unsure, say N. | ||
109 | |||
84 | config PCI_IOAPIC | 110 | config PCI_IOAPIC |
85 | bool | 111 | bool |
86 | depends on PCI | 112 | depends on PCI |
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 6fadae3ad134..083a49fee56a 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
@@ -29,6 +29,7 @@ obj-$(CONFIG_PCI_MSI) += msi.o | |||
29 | # Build the Hypertransport interrupt support | 29 | # Build the Hypertransport interrupt support |
30 | obj-$(CONFIG_HT_IRQ) += htirq.o | 30 | obj-$(CONFIG_HT_IRQ) += htirq.o |
31 | 31 | ||
32 | obj-$(CONFIG_PCI_ATS) += ats.o | ||
32 | obj-$(CONFIG_PCI_IOV) += iov.o | 33 | obj-$(CONFIG_PCI_IOV) += iov.o |
33 | 34 | ||
34 | # | 35 | # |
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c new file mode 100644 index 000000000000..f727a09eb72f --- /dev/null +++ b/drivers/pci/ats.c | |||
@@ -0,0 +1,438 @@ | |||
1 | /* | ||
2 | * drivers/pci/ats.c | ||
3 | * | ||
4 | * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com> | ||
5 | * Copyright (C) 2011 Advanced Micro Devices, | ||
6 | * | ||
7 | * PCI Express I/O Virtualization (IOV) support. | ||
8 | * Address Translation Service 1.0 | ||
9 | * Page Request Interface added by Joerg Roedel <joerg.roedel@amd.com> | ||
10 | * PASID support added by Joerg Roedel <joerg.roedel@amd.com> | ||
11 | */ | ||
12 | |||
13 | #include <linux/pci-ats.h> | ||
14 | #include <linux/pci.h> | ||
15 | |||
16 | #include "pci.h" | ||
17 | |||
18 | static int ats_alloc_one(struct pci_dev *dev, int ps) | ||
19 | { | ||
20 | int pos; | ||
21 | u16 cap; | ||
22 | struct pci_ats *ats; | ||
23 | |||
24 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS); | ||
25 | if (!pos) | ||
26 | return -ENODEV; | ||
27 | |||
28 | ats = kzalloc(sizeof(*ats), GFP_KERNEL); | ||
29 | if (!ats) | ||
30 | return -ENOMEM; | ||
31 | |||
32 | ats->pos = pos; | ||
33 | ats->stu = ps; | ||
34 | pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap); | ||
35 | ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : | ||
36 | PCI_ATS_MAX_QDEP; | ||
37 | dev->ats = ats; | ||
38 | |||
39 | return 0; | ||
40 | } | ||
41 | |||
42 | static void ats_free_one(struct pci_dev *dev) | ||
43 | { | ||
44 | kfree(dev->ats); | ||
45 | dev->ats = NULL; | ||
46 | } | ||
47 | |||
48 | /** | ||
49 | * pci_enable_ats - enable the ATS capability | ||
50 | * @dev: the PCI device | ||
51 | * @ps: the IOMMU page shift | ||
52 | * | ||
53 | * Returns 0 on success, or negative on failure. | ||
54 | */ | ||
55 | int pci_enable_ats(struct pci_dev *dev, int ps) | ||
56 | { | ||
57 | int rc; | ||
58 | u16 ctrl; | ||
59 | |||
60 | BUG_ON(dev->ats && dev->ats->is_enabled); | ||
61 | |||
62 | if (ps < PCI_ATS_MIN_STU) | ||
63 | return -EINVAL; | ||
64 | |||
65 | if (dev->is_physfn || dev->is_virtfn) { | ||
66 | struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn; | ||
67 | |||
68 | mutex_lock(&pdev->sriov->lock); | ||
69 | if (pdev->ats) | ||
70 | rc = pdev->ats->stu == ps ? 0 : -EINVAL; | ||
71 | else | ||
72 | rc = ats_alloc_one(pdev, ps); | ||
73 | |||
74 | if (!rc) | ||
75 | pdev->ats->ref_cnt++; | ||
76 | mutex_unlock(&pdev->sriov->lock); | ||
77 | if (rc) | ||
78 | return rc; | ||
79 | } | ||
80 | |||
81 | if (!dev->is_physfn) { | ||
82 | rc = ats_alloc_one(dev, ps); | ||
83 | if (rc) | ||
84 | return rc; | ||
85 | } | ||
86 | |||
87 | ctrl = PCI_ATS_CTRL_ENABLE; | ||
88 | if (!dev->is_virtfn) | ||
89 | ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU); | ||
90 | pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl); | ||
91 | |||
92 | dev->ats->is_enabled = 1; | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | EXPORT_SYMBOL_GPL(pci_enable_ats); | ||
97 | |||
98 | /** | ||
99 | * pci_disable_ats - disable the ATS capability | ||
100 | * @dev: the PCI device | ||
101 | */ | ||
102 | void pci_disable_ats(struct pci_dev *dev) | ||
103 | { | ||
104 | u16 ctrl; | ||
105 | |||
106 | BUG_ON(!dev->ats || !dev->ats->is_enabled); | ||
107 | |||
108 | pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl); | ||
109 | ctrl &= ~PCI_ATS_CTRL_ENABLE; | ||
110 | pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl); | ||
111 | |||
112 | dev->ats->is_enabled = 0; | ||
113 | |||
114 | if (dev->is_physfn || dev->is_virtfn) { | ||
115 | struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn; | ||
116 | |||
117 | mutex_lock(&pdev->sriov->lock); | ||
118 | pdev->ats->ref_cnt--; | ||
119 | if (!pdev->ats->ref_cnt) | ||
120 | ats_free_one(pdev); | ||
121 | mutex_unlock(&pdev->sriov->lock); | ||
122 | } | ||
123 | |||
124 | if (!dev->is_physfn) | ||
125 | ats_free_one(dev); | ||
126 | } | ||
127 | EXPORT_SYMBOL_GPL(pci_disable_ats); | ||
128 | |||
129 | /** | ||
130 | * pci_ats_queue_depth - query the ATS Invalidate Queue Depth | ||
131 | * @dev: the PCI device | ||
132 | * | ||
133 | * Returns the queue depth on success, or negative on failure. | ||
134 | * | ||
135 | * The ATS spec uses 0 in the Invalidate Queue Depth field to | ||
136 | * indicate that the function can accept 32 Invalidate Request. | ||
137 | * But here we use the `real' values (i.e. 1~32) for the Queue | ||
138 | * Depth; and 0 indicates the function shares the Queue with | ||
139 | * other functions (doesn't exclusively own a Queue). | ||
140 | */ | ||
141 | int pci_ats_queue_depth(struct pci_dev *dev) | ||
142 | { | ||
143 | int pos; | ||
144 | u16 cap; | ||
145 | |||
146 | if (dev->is_virtfn) | ||
147 | return 0; | ||
148 | |||
149 | if (dev->ats) | ||
150 | return dev->ats->qdep; | ||
151 | |||
152 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS); | ||
153 | if (!pos) | ||
154 | return -ENODEV; | ||
155 | |||
156 | pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap); | ||
157 | |||
158 | return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : | ||
159 | PCI_ATS_MAX_QDEP; | ||
160 | } | ||
161 | EXPORT_SYMBOL_GPL(pci_ats_queue_depth); | ||
162 | |||
163 | #ifdef CONFIG_PCI_PRI | ||
164 | /** | ||
165 | * pci_enable_pri - Enable PRI capability | ||
166 | * @ pdev: PCI device structure | ||
167 | * | ||
168 | * Returns 0 on success, negative value on error | ||
169 | */ | ||
170 | int pci_enable_pri(struct pci_dev *pdev, u32 reqs) | ||
171 | { | ||
172 | u16 control, status; | ||
173 | u32 max_requests; | ||
174 | int pos; | ||
175 | |||
176 | pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); | ||
177 | if (!pos) | ||
178 | return -EINVAL; | ||
179 | |||
180 | pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); | ||
181 | pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status); | ||
182 | if ((control & PCI_PRI_ENABLE) || !(status & PCI_PRI_STATUS_STOPPED)) | ||
183 | return -EBUSY; | ||
184 | |||
185 | pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ_OFF, &max_requests); | ||
186 | reqs = min(max_requests, reqs); | ||
187 | pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ_OFF, reqs); | ||
188 | |||
189 | control |= PCI_PRI_ENABLE; | ||
190 | pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control); | ||
191 | |||
192 | return 0; | ||
193 | } | ||
194 | EXPORT_SYMBOL_GPL(pci_enable_pri); | ||
195 | |||
196 | /** | ||
197 | * pci_disable_pri - Disable PRI capability | ||
198 | * @pdev: PCI device structure | ||
199 | * | ||
200 | * Only clears the enabled-bit, regardless of its former value | ||
201 | */ | ||
202 | void pci_disable_pri(struct pci_dev *pdev) | ||
203 | { | ||
204 | u16 control; | ||
205 | int pos; | ||
206 | |||
207 | pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); | ||
208 | if (!pos) | ||
209 | return; | ||
210 | |||
211 | pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); | ||
212 | control &= ~PCI_PRI_ENABLE; | ||
213 | pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control); | ||
214 | } | ||
215 | EXPORT_SYMBOL_GPL(pci_disable_pri); | ||
216 | |||
217 | /** | ||
218 | * pci_pri_enabled - Checks if PRI capability is enabled | ||
219 | * @pdev: PCI device structure | ||
220 | * | ||
221 | * Returns true if PRI is enabled on the device, false otherwise | ||
222 | */ | ||
223 | bool pci_pri_enabled(struct pci_dev *pdev) | ||
224 | { | ||
225 | u16 control; | ||
226 | int pos; | ||
227 | |||
228 | pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); | ||
229 | if (!pos) | ||
230 | return false; | ||
231 | |||
232 | pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); | ||
233 | |||
234 | return (control & PCI_PRI_ENABLE) ? true : false; | ||
235 | } | ||
236 | EXPORT_SYMBOL_GPL(pci_pri_enabled); | ||
237 | |||
238 | /** | ||
239 | * pci_reset_pri - Resets device's PRI state | ||
240 | * @pdev: PCI device structure | ||
241 | * | ||
242 | * The PRI capability must be disabled before this function is called. | ||
243 | * Returns 0 on success, negative value on error. | ||
244 | */ | ||
245 | int pci_reset_pri(struct pci_dev *pdev) | ||
246 | { | ||
247 | u16 control; | ||
248 | int pos; | ||
249 | |||
250 | pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); | ||
251 | if (!pos) | ||
252 | return -EINVAL; | ||
253 | |||
254 | pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); | ||
255 | if (control & PCI_PRI_ENABLE) | ||
256 | return -EBUSY; | ||
257 | |||
258 | control |= PCI_PRI_RESET; | ||
259 | |||
260 | pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control); | ||
261 | |||
262 | return 0; | ||
263 | } | ||
264 | EXPORT_SYMBOL_GPL(pci_reset_pri); | ||
265 | |||
266 | /** | ||
267 | * pci_pri_stopped - Checks whether the PRI capability is stopped | ||
268 | * @pdev: PCI device structure | ||
269 | * | ||
270 | * Returns true if the PRI capability on the device is disabled and the | ||
271 | * device has no outstanding PRI requests, false otherwise. The device | ||
272 | * indicates this via the STOPPED bit in the status register of the | ||
273 | * capability. | ||
274 | * The device internal state can be cleared by resetting the PRI state | ||
275 | * with pci_reset_pri(). This can force the capability into the STOPPED | ||
276 | * state. | ||
277 | */ | ||
278 | bool pci_pri_stopped(struct pci_dev *pdev) | ||
279 | { | ||
280 | u16 control, status; | ||
281 | int pos; | ||
282 | |||
283 | pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); | ||
284 | if (!pos) | ||
285 | return true; | ||
286 | |||
287 | pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); | ||
288 | pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status); | ||
289 | |||
290 | if (control & PCI_PRI_ENABLE) | ||
291 | return false; | ||
292 | |||
293 | return (status & PCI_PRI_STATUS_STOPPED) ? true : false; | ||
294 | } | ||
295 | EXPORT_SYMBOL_GPL(pci_pri_stopped); | ||
296 | |||
297 | /** | ||
298 | * pci_pri_status - Request PRI status of a device | ||
299 | * @pdev: PCI device structure | ||
300 | * | ||
301 | * Returns negative value on failure, status on success. The status can | ||
302 | * be checked against status-bits. Supported bits are currently: | ||
303 | * PCI_PRI_STATUS_RF: Response failure | ||
304 | * PCI_PRI_STATUS_UPRGI: Unexpected Page Request Group Index | ||
305 | * PCI_PRI_STATUS_STOPPED: PRI has stopped | ||
306 | */ | ||
307 | int pci_pri_status(struct pci_dev *pdev) | ||
308 | { | ||
309 | u16 status, control; | ||
310 | int pos; | ||
311 | |||
312 | pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); | ||
313 | if (!pos) | ||
314 | return -EINVAL; | ||
315 | |||
316 | pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); | ||
317 | pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status); | ||
318 | |||
319 | /* Stopped bit is undefined when enable == 1, so clear it */ | ||
320 | if (control & PCI_PRI_ENABLE) | ||
321 | status &= ~PCI_PRI_STATUS_STOPPED; | ||
322 | |||
323 | return status; | ||
324 | } | ||
325 | EXPORT_SYMBOL_GPL(pci_pri_status); | ||
326 | #endif /* CONFIG_PCI_PRI */ | ||
327 | |||
328 | #ifdef CONFIG_PCI_PASID | ||
329 | /** | ||
330 | * pci_enable_pasid - Enable the PASID capability | ||
331 | * @pdev: PCI device structure | ||
332 | * @features: Features to enable | ||
333 | * | ||
334 | * Returns 0 on success, negative value on error. This function checks | ||
335 | * whether the features are actually supported by the device and returns | ||
336 | * an error if not. | ||
337 | */ | ||
338 | int pci_enable_pasid(struct pci_dev *pdev, int features) | ||
339 | { | ||
340 | u16 control, supported; | ||
341 | int pos; | ||
342 | |||
343 | pos = pci_find_ext_capability(pdev, PCI_PASID_CAP); | ||
344 | if (!pos) | ||
345 | return -EINVAL; | ||
346 | |||
347 | pci_read_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, &control); | ||
348 | pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported); | ||
349 | |||
350 | if (!(supported & PCI_PASID_ENABLE)) | ||
351 | return -EINVAL; | ||
352 | |||
353 | supported &= PCI_PASID_EXEC | PCI_PASID_PRIV; | ||
354 | |||
355 | /* User wants to enable anything unsupported? */ | ||
356 | if ((supported & features) != features) | ||
357 | return -EINVAL; | ||
358 | |||
359 | control = PCI_PASID_ENABLE | features; | ||
360 | |||
361 | pci_write_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, control); | ||
362 | |||
363 | return 0; | ||
364 | } | ||
365 | EXPORT_SYMBOL_GPL(pci_enable_pasid); | ||
366 | |||
367 | /** | ||
368 | * pci_disable_pasid - Disable the PASID capability | ||
369 | * @pdev: PCI device structure | ||
370 | * | ||
371 | */ | ||
372 | void pci_disable_pasid(struct pci_dev *pdev) | ||
373 | { | ||
374 | u16 control = 0; | ||
375 | int pos; | ||
376 | |||
377 | pos = pci_find_ext_capability(pdev, PCI_PASID_CAP); | ||
378 | if (!pos) | ||
379 | return; | ||
380 | |||
381 | pci_write_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, control); | ||
382 | } | ||
383 | EXPORT_SYMBOL_GPL(pci_disable_pasid); | ||
384 | |||
385 | /** | ||
386 | * pci_pasid_features - Check which PASID features are supported | ||
387 | * @pdev: PCI device structure | ||
388 | * | ||
389 | * Returns a negative value when no PASI capability is present. | ||
390 | * Otherwise is returns a bitmask with supported features. Current | ||
391 | * features reported are: | ||
392 | * PCI_PASID_ENABLE - PASID capability can be enabled | ||
393 | * PCI_PASID_EXEC - Execute permission supported | ||
394 | * PCI_PASID_PRIV - Priviledged mode supported | ||
395 | */ | ||
396 | int pci_pasid_features(struct pci_dev *pdev) | ||
397 | { | ||
398 | u16 supported; | ||
399 | int pos; | ||
400 | |||
401 | pos = pci_find_ext_capability(pdev, PCI_PASID_CAP); | ||
402 | if (!pos) | ||
403 | return -EINVAL; | ||
404 | |||
405 | pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported); | ||
406 | |||
407 | supported &= PCI_PASID_ENABLE | PCI_PASID_EXEC | PCI_PASID_PRIV; | ||
408 | |||
409 | return supported; | ||
410 | } | ||
411 | EXPORT_SYMBOL_GPL(pci_pasid_features); | ||
412 | |||
413 | #define PASID_NUMBER_SHIFT 8 | ||
414 | #define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT) | ||
415 | /** | ||
416 | * pci_max_pasid - Get maximum number of PASIDs supported by device | ||
417 | * @pdev: PCI device structure | ||
418 | * | ||
419 | * Returns negative value when PASID capability is not present. | ||
420 | * Otherwise it returns the numer of supported PASIDs. | ||
421 | */ | ||
422 | int pci_max_pasids(struct pci_dev *pdev) | ||
423 | { | ||
424 | u16 supported; | ||
425 | int pos; | ||
426 | |||
427 | pos = pci_find_ext_capability(pdev, PCI_PASID_CAP); | ||
428 | if (!pos) | ||
429 | return -EINVAL; | ||
430 | |||
431 | pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported); | ||
432 | |||
433 | supported = (supported & PASID_NUMBER_MASK) >> PASID_NUMBER_SHIFT; | ||
434 | |||
435 | return (1 << supported); | ||
436 | } | ||
437 | EXPORT_SYMBOL_GPL(pci_max_pasids); | ||
438 | #endif /* CONFIG_PCI_PASID */ | ||
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 220285760b68..596172b4ae95 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/pci-acpi.h> | 48 | #include <linux/pci-acpi.h> |
49 | #include <linux/mutex.h> | 49 | #include <linux/mutex.h> |
50 | #include <linux/slab.h> | 50 | #include <linux/slab.h> |
51 | #include <linux/acpi.h> | ||
51 | 52 | ||
52 | #include "../pci.h" | 53 | #include "../pci.h" |
53 | #include "acpiphp.h" | 54 | #include "acpiphp.h" |
@@ -1149,15 +1150,35 @@ check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
1149 | return AE_OK ; | 1150 | return AE_OK ; |
1150 | } | 1151 | } |
1151 | 1152 | ||
1152 | /** | 1153 | struct acpiphp_hp_work { |
1153 | * handle_hotplug_event_bridge - handle ACPI event on bridges | 1154 | struct work_struct work; |
1154 | * @handle: Notify()'ed acpi_handle | 1155 | acpi_handle handle; |
1155 | * @type: Notify code | 1156 | u32 type; |
1156 | * @context: pointer to acpiphp_bridge structure | 1157 | void *context; |
1157 | * | 1158 | }; |
1158 | * Handles ACPI event notification on {host,p2p} bridges. | 1159 | |
1159 | */ | 1160 | static void alloc_acpiphp_hp_work(acpi_handle handle, u32 type, |
1160 | static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *context) | 1161 | void *context, |
1162 | void (*func)(struct work_struct *work)) | ||
1163 | { | ||
1164 | struct acpiphp_hp_work *hp_work; | ||
1165 | int ret; | ||
1166 | |||
1167 | hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL); | ||
1168 | if (!hp_work) | ||
1169 | return; | ||
1170 | |||
1171 | hp_work->handle = handle; | ||
1172 | hp_work->type = type; | ||
1173 | hp_work->context = context; | ||
1174 | |||
1175 | INIT_WORK(&hp_work->work, func); | ||
1176 | ret = queue_work(kacpi_hotplug_wq, &hp_work->work); | ||
1177 | if (!ret) | ||
1178 | kfree(hp_work); | ||
1179 | } | ||
1180 | |||
1181 | static void _handle_hotplug_event_bridge(struct work_struct *work) | ||
1161 | { | 1182 | { |
1162 | struct acpiphp_bridge *bridge; | 1183 | struct acpiphp_bridge *bridge; |
1163 | char objname[64]; | 1184 | char objname[64]; |
@@ -1165,11 +1186,18 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont | |||
1165 | .pointer = objname }; | 1186 | .pointer = objname }; |
1166 | struct acpi_device *device; | 1187 | struct acpi_device *device; |
1167 | int num_sub_bridges = 0; | 1188 | int num_sub_bridges = 0; |
1189 | struct acpiphp_hp_work *hp_work; | ||
1190 | acpi_handle handle; | ||
1191 | u32 type; | ||
1192 | |||
1193 | hp_work = container_of(work, struct acpiphp_hp_work, work); | ||
1194 | handle = hp_work->handle; | ||
1195 | type = hp_work->type; | ||
1168 | 1196 | ||
1169 | if (acpi_bus_get_device(handle, &device)) { | 1197 | if (acpi_bus_get_device(handle, &device)) { |
1170 | /* This bridge must have just been physically inserted */ | 1198 | /* This bridge must have just been physically inserted */ |
1171 | handle_bridge_insertion(handle, type); | 1199 | handle_bridge_insertion(handle, type); |
1172 | return; | 1200 | goto out; |
1173 | } | 1201 | } |
1174 | 1202 | ||
1175 | bridge = acpiphp_handle_to_bridge(handle); | 1203 | bridge = acpiphp_handle_to_bridge(handle); |
@@ -1180,7 +1208,7 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont | |||
1180 | 1208 | ||
1181 | if (!bridge && !num_sub_bridges) { | 1209 | if (!bridge && !num_sub_bridges) { |
1182 | err("cannot get bridge info\n"); | 1210 | err("cannot get bridge info\n"); |
1183 | return; | 1211 | goto out; |
1184 | } | 1212 | } |
1185 | 1213 | ||
1186 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); | 1214 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); |
@@ -1241,22 +1269,49 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont | |||
1241 | warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); | 1269 | warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); |
1242 | break; | 1270 | break; |
1243 | } | 1271 | } |
1272 | |||
1273 | out: | ||
1274 | kfree(hp_work); /* allocated in handle_hotplug_event_bridge */ | ||
1244 | } | 1275 | } |
1245 | 1276 | ||
1246 | /** | 1277 | /** |
1247 | * handle_hotplug_event_func - handle ACPI event on functions (i.e. slots) | 1278 | * handle_hotplug_event_bridge - handle ACPI event on bridges |
1248 | * @handle: Notify()'ed acpi_handle | 1279 | * @handle: Notify()'ed acpi_handle |
1249 | * @type: Notify code | 1280 | * @type: Notify code |
1250 | * @context: pointer to acpiphp_func structure | 1281 | * @context: pointer to acpiphp_bridge structure |
1251 | * | 1282 | * |
1252 | * Handles ACPI event notification on slots. | 1283 | * Handles ACPI event notification on {host,p2p} bridges. |
1253 | */ | 1284 | */ |
1254 | static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context) | 1285 | static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, |
1286 | void *context) | ||
1287 | { | ||
1288 | /* | ||
1289 | * Currently the code adds all hotplug events to the kacpid_wq | ||
1290 | * queue when it should add hotplug events to the kacpi_hotplug_wq. | ||
1291 | * The proper way to fix this is to reorganize the code so that | ||
1292 | * drivers (dock, etc.) do not call acpi_os_execute(), etc. | ||
1293 | * For now just re-add this work to the kacpi_hotplug_wq so we | ||
1294 | * don't deadlock on hotplug actions. | ||
1295 | */ | ||
1296 | alloc_acpiphp_hp_work(handle, type, context, | ||
1297 | _handle_hotplug_event_bridge); | ||
1298 | } | ||
1299 | |||
1300 | static void _handle_hotplug_event_func(struct work_struct *work) | ||
1255 | { | 1301 | { |
1256 | struct acpiphp_func *func; | 1302 | struct acpiphp_func *func; |
1257 | char objname[64]; | 1303 | char objname[64]; |
1258 | struct acpi_buffer buffer = { .length = sizeof(objname), | 1304 | struct acpi_buffer buffer = { .length = sizeof(objname), |
1259 | .pointer = objname }; | 1305 | .pointer = objname }; |
1306 | struct acpiphp_hp_work *hp_work; | ||
1307 | acpi_handle handle; | ||
1308 | u32 type; | ||
1309 | void *context; | ||
1310 | |||
1311 | hp_work = container_of(work, struct acpiphp_hp_work, work); | ||
1312 | handle = hp_work->handle; | ||
1313 | type = hp_work->type; | ||
1314 | context = hp_work->context; | ||
1260 | 1315 | ||
1261 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); | 1316 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); |
1262 | 1317 | ||
@@ -1291,8 +1346,32 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *contex | |||
1291 | warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); | 1346 | warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); |
1292 | break; | 1347 | break; |
1293 | } | 1348 | } |
1349 | |||
1350 | kfree(hp_work); /* allocated in handle_hotplug_event_func */ | ||
1294 | } | 1351 | } |
1295 | 1352 | ||
1353 | /** | ||
1354 | * handle_hotplug_event_func - handle ACPI event on functions (i.e. slots) | ||
1355 | * @handle: Notify()'ed acpi_handle | ||
1356 | * @type: Notify code | ||
1357 | * @context: pointer to acpiphp_func structure | ||
1358 | * | ||
1359 | * Handles ACPI event notification on slots. | ||
1360 | */ | ||
1361 | static void handle_hotplug_event_func(acpi_handle handle, u32 type, | ||
1362 | void *context) | ||
1363 | { | ||
1364 | /* | ||
1365 | * Currently the code adds all hotplug events to the kacpid_wq | ||
1366 | * queue when it should add hotplug events to the kacpi_hotplug_wq. | ||
1367 | * The proper way to fix this is to reorganize the code so that | ||
1368 | * drivers (dock, etc.) do not call acpi_os_execute(), etc. | ||
1369 | * For now just re-add this work to the kacpi_hotplug_wq so we | ||
1370 | * don't deadlock on hotplug actions. | ||
1371 | */ | ||
1372 | alloc_acpiphp_hp_work(handle, type, context, | ||
1373 | _handle_hotplug_event_func); | ||
1374 | } | ||
1296 | 1375 | ||
1297 | static acpi_status | 1376 | static acpi_status |
1298 | find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) | 1377 | find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) |
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 42fae4776515..9b4e88c636f8 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c | |||
@@ -722,145 +722,3 @@ int pci_num_vf(struct pci_dev *dev) | |||
722 | return dev->sriov->nr_virtfn; | 722 | return dev->sriov->nr_virtfn; |
723 | } | 723 | } |
724 | EXPORT_SYMBOL_GPL(pci_num_vf); | 724 | EXPORT_SYMBOL_GPL(pci_num_vf); |
725 | |||
726 | static int ats_alloc_one(struct pci_dev *dev, int ps) | ||
727 | { | ||
728 | int pos; | ||
729 | u16 cap; | ||
730 | struct pci_ats *ats; | ||
731 | |||
732 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS); | ||
733 | if (!pos) | ||
734 | return -ENODEV; | ||
735 | |||
736 | ats = kzalloc(sizeof(*ats), GFP_KERNEL); | ||
737 | if (!ats) | ||
738 | return -ENOMEM; | ||
739 | |||
740 | ats->pos = pos; | ||
741 | ats->stu = ps; | ||
742 | pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap); | ||
743 | ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : | ||
744 | PCI_ATS_MAX_QDEP; | ||
745 | dev->ats = ats; | ||
746 | |||
747 | return 0; | ||
748 | } | ||
749 | |||
750 | static void ats_free_one(struct pci_dev *dev) | ||
751 | { | ||
752 | kfree(dev->ats); | ||
753 | dev->ats = NULL; | ||
754 | } | ||
755 | |||
756 | /** | ||
757 | * pci_enable_ats - enable the ATS capability | ||
758 | * @dev: the PCI device | ||
759 | * @ps: the IOMMU page shift | ||
760 | * | ||
761 | * Returns 0 on success, or negative on failure. | ||
762 | */ | ||
763 | int pci_enable_ats(struct pci_dev *dev, int ps) | ||
764 | { | ||
765 | int rc; | ||
766 | u16 ctrl; | ||
767 | |||
768 | BUG_ON(dev->ats && dev->ats->is_enabled); | ||
769 | |||
770 | if (ps < PCI_ATS_MIN_STU) | ||
771 | return -EINVAL; | ||
772 | |||
773 | if (dev->is_physfn || dev->is_virtfn) { | ||
774 | struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn; | ||
775 | |||
776 | mutex_lock(&pdev->sriov->lock); | ||
777 | if (pdev->ats) | ||
778 | rc = pdev->ats->stu == ps ? 0 : -EINVAL; | ||
779 | else | ||
780 | rc = ats_alloc_one(pdev, ps); | ||
781 | |||
782 | if (!rc) | ||
783 | pdev->ats->ref_cnt++; | ||
784 | mutex_unlock(&pdev->sriov->lock); | ||
785 | if (rc) | ||
786 | return rc; | ||
787 | } | ||
788 | |||
789 | if (!dev->is_physfn) { | ||
790 | rc = ats_alloc_one(dev, ps); | ||
791 | if (rc) | ||
792 | return rc; | ||
793 | } | ||
794 | |||
795 | ctrl = PCI_ATS_CTRL_ENABLE; | ||
796 | if (!dev->is_virtfn) | ||
797 | ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU); | ||
798 | pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl); | ||
799 | |||
800 | dev->ats->is_enabled = 1; | ||
801 | |||
802 | return 0; | ||
803 | } | ||
804 | |||
805 | /** | ||
806 | * pci_disable_ats - disable the ATS capability | ||
807 | * @dev: the PCI device | ||
808 | */ | ||
809 | void pci_disable_ats(struct pci_dev *dev) | ||
810 | { | ||
811 | u16 ctrl; | ||
812 | |||
813 | BUG_ON(!dev->ats || !dev->ats->is_enabled); | ||
814 | |||
815 | pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl); | ||
816 | ctrl &= ~PCI_ATS_CTRL_ENABLE; | ||
817 | pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl); | ||
818 | |||
819 | dev->ats->is_enabled = 0; | ||
820 | |||
821 | if (dev->is_physfn || dev->is_virtfn) { | ||
822 | struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn; | ||
823 | |||
824 | mutex_lock(&pdev->sriov->lock); | ||
825 | pdev->ats->ref_cnt--; | ||
826 | if (!pdev->ats->ref_cnt) | ||
827 | ats_free_one(pdev); | ||
828 | mutex_unlock(&pdev->sriov->lock); | ||
829 | } | ||
830 | |||
831 | if (!dev->is_physfn) | ||
832 | ats_free_one(dev); | ||
833 | } | ||
834 | |||
835 | /** | ||
836 | * pci_ats_queue_depth - query the ATS Invalidate Queue Depth | ||
837 | * @dev: the PCI device | ||
838 | * | ||
839 | * Returns the queue depth on success, or negative on failure. | ||
840 | * | ||
841 | * The ATS spec uses 0 in the Invalidate Queue Depth field to | ||
842 | * indicate that the function can accept 32 Invalidate Request. | ||
843 | * But here we use the `real' values (i.e. 1~32) for the Queue | ||
844 | * Depth; and 0 indicates the function shares the Queue with | ||
845 | * other functions (doesn't exclusively own a Queue). | ||
846 | */ | ||
847 | int pci_ats_queue_depth(struct pci_dev *dev) | ||
848 | { | ||
849 | int pos; | ||
850 | u16 cap; | ||
851 | |||
852 | if (dev->is_virtfn) | ||
853 | return 0; | ||
854 | |||
855 | if (dev->ats) | ||
856 | return dev->ats->qdep; | ||
857 | |||
858 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS); | ||
859 | if (!pos) | ||
860 | return -ENODEV; | ||
861 | |||
862 | pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap); | ||
863 | |||
864 | return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : | ||
865 | PCI_ATS_MAX_QDEP; | ||
866 | } | ||
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index d36f41ea8cbf..4ecb6408b0d6 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -46,6 +46,9 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) | |||
46 | struct pci_dev *pci_dev = context; | 46 | struct pci_dev *pci_dev = context; |
47 | 47 | ||
48 | if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { | 48 | if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { |
49 | if (pci_dev->pme_poll) | ||
50 | pci_dev->pme_poll = false; | ||
51 | |||
49 | pci_wakeup_event(pci_dev); | 52 | pci_wakeup_event(pci_dev); |
50 | pci_check_pme_status(pci_dev); | 53 | pci_check_pme_status(pci_dev); |
51 | pm_runtime_resume(&pci_dev->dev); | 54 | pm_runtime_resume(&pci_dev->dev); |
@@ -282,7 +285,6 @@ static int acpi_dev_run_wake(struct device *phys_dev, bool enable) | |||
282 | { | 285 | { |
283 | struct acpi_device *dev; | 286 | struct acpi_device *dev; |
284 | acpi_handle handle; | 287 | acpi_handle handle; |
285 | int error = -ENODEV; | ||
286 | 288 | ||
287 | if (!device_run_wake(phys_dev)) | 289 | if (!device_run_wake(phys_dev)) |
288 | return -EINVAL; | 290 | return -EINVAL; |
@@ -302,7 +304,7 @@ static int acpi_dev_run_wake(struct device *phys_dev, bool enable) | |||
302 | acpi_disable_wakeup_device_power(dev); | 304 | acpi_disable_wakeup_device_power(dev); |
303 | } | 305 | } |
304 | 306 | ||
305 | return error; | 307 | return 0; |
306 | } | 308 | } |
307 | 309 | ||
308 | static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable) | 310 | static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable) |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e9651f0a8817..6f45a73c6e9f 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -1407,13 +1407,16 @@ bool pci_check_pme_status(struct pci_dev *dev) | |||
1407 | /** | 1407 | /** |
1408 | * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. | 1408 | * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. |
1409 | * @dev: Device to handle. | 1409 | * @dev: Device to handle. |
1410 | * @ign: Ignored. | 1410 | * @pme_poll_reset: Whether or not to reset the device's pme_poll flag. |
1411 | * | 1411 | * |
1412 | * Check if @dev has generated PME and queue a resume request for it in that | 1412 | * Check if @dev has generated PME and queue a resume request for it in that |
1413 | * case. | 1413 | * case. |
1414 | */ | 1414 | */ |
1415 | static int pci_pme_wakeup(struct pci_dev *dev, void *ign) | 1415 | static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset) |
1416 | { | 1416 | { |
1417 | if (pme_poll_reset && dev->pme_poll) | ||
1418 | dev->pme_poll = false; | ||
1419 | |||
1417 | if (pci_check_pme_status(dev)) { | 1420 | if (pci_check_pme_status(dev)) { |
1418 | pci_wakeup_event(dev); | 1421 | pci_wakeup_event(dev); |
1419 | pm_request_resume(&dev->dev); | 1422 | pm_request_resume(&dev->dev); |
@@ -1428,7 +1431,7 @@ static int pci_pme_wakeup(struct pci_dev *dev, void *ign) | |||
1428 | void pci_pme_wakeup_bus(struct pci_bus *bus) | 1431 | void pci_pme_wakeup_bus(struct pci_bus *bus) |
1429 | { | 1432 | { |
1430 | if (bus) | 1433 | if (bus) |
1431 | pci_walk_bus(bus, pci_pme_wakeup, NULL); | 1434 | pci_walk_bus(bus, pci_pme_wakeup, (void *)true); |
1432 | } | 1435 | } |
1433 | 1436 | ||
1434 | /** | 1437 | /** |
@@ -1446,31 +1449,26 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) | |||
1446 | 1449 | ||
1447 | static void pci_pme_list_scan(struct work_struct *work) | 1450 | static void pci_pme_list_scan(struct work_struct *work) |
1448 | { | 1451 | { |
1449 | struct pci_pme_device *pme_dev; | 1452 | struct pci_pme_device *pme_dev, *n; |
1450 | 1453 | ||
1451 | mutex_lock(&pci_pme_list_mutex); | 1454 | mutex_lock(&pci_pme_list_mutex); |
1452 | if (!list_empty(&pci_pme_list)) { | 1455 | if (!list_empty(&pci_pme_list)) { |
1453 | list_for_each_entry(pme_dev, &pci_pme_list, list) | 1456 | list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) { |
1454 | pci_pme_wakeup(pme_dev->dev, NULL); | 1457 | if (pme_dev->dev->pme_poll) { |
1455 | schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT)); | 1458 | pci_pme_wakeup(pme_dev->dev, NULL); |
1459 | } else { | ||
1460 | list_del(&pme_dev->list); | ||
1461 | kfree(pme_dev); | ||
1462 | } | ||
1463 | } | ||
1464 | if (!list_empty(&pci_pme_list)) | ||
1465 | schedule_delayed_work(&pci_pme_work, | ||
1466 | msecs_to_jiffies(PME_TIMEOUT)); | ||
1456 | } | 1467 | } |
1457 | mutex_unlock(&pci_pme_list_mutex); | 1468 | mutex_unlock(&pci_pme_list_mutex); |
1458 | } | 1469 | } |
1459 | 1470 | ||
1460 | /** | 1471 | /** |
1461 | * pci_external_pme - is a device an external PCI PME source? | ||
1462 | * @dev: PCI device to check | ||
1463 | * | ||
1464 | */ | ||
1465 | |||
1466 | static bool pci_external_pme(struct pci_dev *dev) | ||
1467 | { | ||
1468 | if (pci_is_pcie(dev) || dev->bus->number == 0) | ||
1469 | return false; | ||
1470 | return true; | ||
1471 | } | ||
1472 | |||
1473 | /** | ||
1474 | * pci_pme_active - enable or disable PCI device's PME# function | 1472 | * pci_pme_active - enable or disable PCI device's PME# function |
1475 | * @dev: PCI device to handle. | 1473 | * @dev: PCI device to handle. |
1476 | * @enable: 'true' to enable PME# generation; 'false' to disable it. | 1474 | * @enable: 'true' to enable PME# generation; 'false' to disable it. |
@@ -1503,7 +1501,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1503 | hit, and the power savings from the devices will still be a | 1501 | hit, and the power savings from the devices will still be a |
1504 | win. */ | 1502 | win. */ |
1505 | 1503 | ||
1506 | if (pci_external_pme(dev)) { | 1504 | if (dev->pme_poll) { |
1507 | struct pci_pme_device *pme_dev; | 1505 | struct pci_pme_device *pme_dev; |
1508 | if (enable) { | 1506 | if (enable) { |
1509 | pme_dev = kmalloc(sizeof(struct pci_pme_device), | 1507 | pme_dev = kmalloc(sizeof(struct pci_pme_device), |
@@ -1821,6 +1819,7 @@ void pci_pm_init(struct pci_dev *dev) | |||
1821 | (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", | 1819 | (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", |
1822 | (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); | 1820 | (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); |
1823 | dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; | 1821 | dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; |
1822 | dev->pme_poll = true; | ||
1824 | /* | 1823 | /* |
1825 | * Make device's PM flags reflect the wake-up capability, but | 1824 | * Make device's PM flags reflect the wake-up capability, but |
1826 | * let the user space enable it to wake up the system as needed. | 1825 | * let the user space enable it to wake up the system as needed. |
@@ -3203,8 +3202,6 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) | |||
3203 | if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) | 3202 | if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) |
3204 | goto out; | 3203 | goto out; |
3205 | 3204 | ||
3206 | v = (ffs(rq) - 8) << 12; | ||
3207 | |||
3208 | cap = pci_pcie_cap(dev); | 3205 | cap = pci_pcie_cap(dev); |
3209 | if (!cap) | 3206 | if (!cap) |
3210 | goto out; | 3207 | goto out; |
@@ -3212,6 +3209,22 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) | |||
3212 | err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); | 3209 | err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); |
3213 | if (err) | 3210 | if (err) |
3214 | goto out; | 3211 | goto out; |
3212 | /* | ||
3213 | * If using the "performance" PCIe config, we clamp the | ||
3214 | * read rq size to the max packet size to prevent the | ||
3215 | * host bridge generating requests larger than we can | ||
3216 | * cope with | ||
3217 | */ | ||
3218 | if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { | ||
3219 | int mps = pcie_get_mps(dev); | ||
3220 | |||
3221 | if (mps < 0) | ||
3222 | return mps; | ||
3223 | if (mps < rq) | ||
3224 | rq = mps; | ||
3225 | } | ||
3226 | |||
3227 | v = (ffs(rq) - 8) << 12; | ||
3215 | 3228 | ||
3216 | if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) { | 3229 | if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) { |
3217 | ctl &= ~PCI_EXP_DEVCTL_READRQ; | 3230 | ctl &= ~PCI_EXP_DEVCTL_READRQ; |
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 0057344a3fcb..001f1b78f39c 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c | |||
@@ -84,6 +84,9 @@ static bool pcie_pme_walk_bus(struct pci_bus *bus) | |||
84 | list_for_each_entry(dev, &bus->devices, bus_list) { | 84 | list_for_each_entry(dev, &bus->devices, bus_list) { |
85 | /* Skip PCIe devices in case we started from a root port. */ | 85 | /* Skip PCIe devices in case we started from a root port. */ |
86 | if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { | 86 | if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { |
87 | if (dev->pme_poll) | ||
88 | dev->pme_poll = false; | ||
89 | |||
87 | pci_wakeup_event(dev); | 90 | pci_wakeup_event(dev); |
88 | pm_request_resume(&dev->dev); | 91 | pm_request_resume(&dev->dev); |
89 | ret = true; | 92 | ret = true; |
@@ -142,6 +145,9 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id) | |||
142 | 145 | ||
143 | /* First, check if the PME is from the root port itself. */ | 146 | /* First, check if the PME is from the root port itself. */ |
144 | if (port->devfn == devfn && port->bus->number == busnr) { | 147 | if (port->devfn == devfn && port->bus->number == busnr) { |
148 | if (port->pme_poll) | ||
149 | port->pme_poll = false; | ||
150 | |||
145 | if (pci_check_pme_status(port)) { | 151 | if (pci_check_pme_status(port)) { |
146 | pm_request_resume(&port->dev); | 152 | pm_request_resume(&port->dev); |
147 | found = true; | 153 | found = true; |
@@ -187,6 +193,9 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id) | |||
187 | /* The device is there, but we have to check its PME status. */ | 193 | /* The device is there, but we have to check its PME status. */ |
188 | found = pci_check_pme_status(dev); | 194 | found = pci_check_pme_status(dev); |
189 | if (found) { | 195 | if (found) { |
196 | if (dev->pme_poll) | ||
197 | dev->pme_poll = false; | ||
198 | |||
190 | pci_wakeup_event(dev); | 199 | pci_wakeup_event(dev); |
191 | pm_request_resume(&dev->dev); | 200 | pm_request_resume(&dev->dev); |
192 | } | 201 | } |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 6ab6bd3df4b2..04e74f485714 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -1363,31 +1363,25 @@ static int pcie_find_smpss(struct pci_dev *dev, void *data) | |||
1363 | 1363 | ||
1364 | static void pcie_write_mps(struct pci_dev *dev, int mps) | 1364 | static void pcie_write_mps(struct pci_dev *dev, int mps) |
1365 | { | 1365 | { |
1366 | int rc, dev_mpss; | 1366 | int rc; |
1367 | |||
1368 | dev_mpss = 128 << dev->pcie_mpss; | ||
1369 | 1367 | ||
1370 | if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { | 1368 | if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { |
1371 | if (dev->bus->self) { | 1369 | mps = 128 << dev->pcie_mpss; |
1372 | dev_dbg(&dev->bus->dev, "Bus MPSS %d\n", | ||
1373 | 128 << dev->bus->self->pcie_mpss); | ||
1374 | 1370 | ||
1375 | /* For "MPS Force Max", the assumption is made that | 1371 | if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && dev->bus->self) |
1372 | /* For "Performance", the assumption is made that | ||
1376 | * downstream communication will never be larger than | 1373 | * downstream communication will never be larger than |
1377 | * the MRRS. So, the MPS only needs to be configured | 1374 | * the MRRS. So, the MPS only needs to be configured |
1378 | * for the upstream communication. This being the case, | 1375 | * for the upstream communication. This being the case, |
1379 | * walk from the top down and set the MPS of the child | 1376 | * walk from the top down and set the MPS of the child |
1380 | * to that of the parent bus. | 1377 | * to that of the parent bus. |
1378 | * | ||
1379 | * Configure the device MPS with the smaller of the | ||
1380 | * device MPSS or the bridge MPS (which is assumed to be | ||
1381 | * properly configured at this point to the largest | ||
1382 | * allowable MPS based on its parent bus). | ||
1381 | */ | 1383 | */ |
1382 | mps = 128 << dev->bus->self->pcie_mpss; | 1384 | mps = min(mps, pcie_get_mps(dev->bus->self)); |
1383 | if (mps > dev_mpss) | ||
1384 | dev_warn(&dev->dev, "MPS configured higher than" | ||
1385 | " maximum supported by the device. If" | ||
1386 | " a bus issue occurs, try running with" | ||
1387 | " pci=pcie_bus_safe.\n"); | ||
1388 | } | ||
1389 | |||
1390 | dev->pcie_mpss = ffs(mps) - 8; | ||
1391 | } | 1385 | } |
1392 | 1386 | ||
1393 | rc = pcie_set_mps(dev, mps); | 1387 | rc = pcie_set_mps(dev, mps); |
@@ -1395,25 +1389,22 @@ static void pcie_write_mps(struct pci_dev *dev, int mps) | |||
1395 | dev_err(&dev->dev, "Failed attempting to set the MPS\n"); | 1389 | dev_err(&dev->dev, "Failed attempting to set the MPS\n"); |
1396 | } | 1390 | } |
1397 | 1391 | ||
1398 | static void pcie_write_mrrs(struct pci_dev *dev, int mps) | 1392 | static void pcie_write_mrrs(struct pci_dev *dev) |
1399 | { | 1393 | { |
1400 | int rc, mrrs, dev_mpss; | 1394 | int rc, mrrs; |
1401 | 1395 | ||
1402 | /* In the "safe" case, do not configure the MRRS. There appear to be | 1396 | /* In the "safe" case, do not configure the MRRS. There appear to be |
1403 | * issues with setting MRRS to 0 on a number of devices. | 1397 | * issues with setting MRRS to 0 on a number of devices. |
1404 | */ | 1398 | */ |
1405 | |||
1406 | if (pcie_bus_config != PCIE_BUS_PERFORMANCE) | 1399 | if (pcie_bus_config != PCIE_BUS_PERFORMANCE) |
1407 | return; | 1400 | return; |
1408 | 1401 | ||
1409 | dev_mpss = 128 << dev->pcie_mpss; | ||
1410 | |||
1411 | /* For Max performance, the MRRS must be set to the largest supported | 1402 | /* For Max performance, the MRRS must be set to the largest supported |
1412 | * value. However, it cannot be configured larger than the MPS the | 1403 | * value. However, it cannot be configured larger than the MPS the |
1413 | * device or the bus can support. This assumes that the largest MRRS | 1404 | * device or the bus can support. This should already be properly |
1414 | * available on the device cannot be smaller than the device MPSS. | 1405 | * configured by a prior call to pcie_write_mps. |
1415 | */ | 1406 | */ |
1416 | mrrs = min(mps, dev_mpss); | 1407 | mrrs = pcie_get_mps(dev); |
1417 | 1408 | ||
1418 | /* MRRS is a R/W register. Invalid values can be written, but a | 1409 | /* MRRS is a R/W register. Invalid values can be written, but a |
1419 | * subsequent read will verify if the value is acceptable or not. | 1410 | * subsequent read will verify if the value is acceptable or not. |
@@ -1421,38 +1412,41 @@ static void pcie_write_mrrs(struct pci_dev *dev, int mps) | |||
1421 | * shrink the value until it is acceptable to the HW. | 1412 | * shrink the value until it is acceptable to the HW. |
1422 | */ | 1413 | */ |
1423 | while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { | 1414 | while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { |
1424 | dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value" | ||
1425 | " to %d. If any issues are encountered, please try " | ||
1426 | "running with pci=pcie_bus_safe\n", mrrs); | ||
1427 | rc = pcie_set_readrq(dev, mrrs); | 1415 | rc = pcie_set_readrq(dev, mrrs); |
1428 | if (rc) | 1416 | if (!rc) |
1429 | dev_err(&dev->dev, | 1417 | break; |
1430 | "Failed attempting to set the MRRS\n"); | ||
1431 | 1418 | ||
1419 | dev_warn(&dev->dev, "Failed attempting to set the MRRS\n"); | ||
1432 | mrrs /= 2; | 1420 | mrrs /= 2; |
1433 | } | 1421 | } |
1422 | |||
1423 | if (mrrs < 128) | ||
1424 | dev_err(&dev->dev, "MRRS was unable to be configured with a " | ||
1425 | "safe value. If problems are experienced, try running " | ||
1426 | "with pci=pcie_bus_safe.\n"); | ||
1434 | } | 1427 | } |
1435 | 1428 | ||
1436 | static int pcie_bus_configure_set(struct pci_dev *dev, void *data) | 1429 | static int pcie_bus_configure_set(struct pci_dev *dev, void *data) |
1437 | { | 1430 | { |
1438 | int mps = 128 << *(u8 *)data; | 1431 | int mps, orig_mps; |
1439 | 1432 | ||
1440 | if (!pci_is_pcie(dev)) | 1433 | if (!pci_is_pcie(dev)) |
1441 | return 0; | 1434 | return 0; |
1442 | 1435 | ||
1443 | dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", | 1436 | mps = 128 << *(u8 *)data; |
1444 | pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev)); | 1437 | orig_mps = pcie_get_mps(dev); |
1445 | 1438 | ||
1446 | pcie_write_mps(dev, mps); | 1439 | pcie_write_mps(dev, mps); |
1447 | pcie_write_mrrs(dev, mps); | 1440 | pcie_write_mrrs(dev); |
1448 | 1441 | ||
1449 | dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", | 1442 | dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), " |
1450 | pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev)); | 1443 | "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss, |
1444 | orig_mps, pcie_get_readrq(dev)); | ||
1451 | 1445 | ||
1452 | return 0; | 1446 | return 0; |
1453 | } | 1447 | } |
1454 | 1448 | ||
1455 | /* pcie_bus_configure_mps requires that pci_walk_bus work in a top-down, | 1449 | /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down, |
1456 | * parents then children fashion. If this changes, then this code will not | 1450 | * parents then children fashion. If this changes, then this code will not |
1457 | * work as designed. | 1451 | * work as designed. |
1458 | */ | 1452 | */ |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index b23856aaf6eb..7285145ac1c9 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -2745,20 +2745,6 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) | |||
2745 | /* disable must be done via function #0 */ | 2745 | /* disable must be done via function #0 */ |
2746 | if (PCI_FUNC(dev->devfn)) | 2746 | if (PCI_FUNC(dev->devfn)) |
2747 | return; | 2747 | return; |
2748 | |||
2749 | pci_read_config_byte(dev, 0xCB, &disable); | ||
2750 | |||
2751 | if (disable & 0x02) | ||
2752 | return; | ||
2753 | |||
2754 | pci_read_config_byte(dev, 0xCA, &write_enable); | ||
2755 | pci_write_config_byte(dev, 0xCA, 0x57); | ||
2756 | pci_write_config_byte(dev, 0xCB, disable | 0x02); | ||
2757 | pci_write_config_byte(dev, 0xCA, write_enable); | ||
2758 | |||
2759 | dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n"); | ||
2760 | dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n"); | ||
2761 | |||
2762 | /* | 2748 | /* |
2763 | * RICOH 0xe823 SD/MMC card reader fails to recognize | 2749 | * RICOH 0xe823 SD/MMC card reader fails to recognize |
2764 | * certain types of SD/MMC cards. Lowering the SD base | 2750 | * certain types of SD/MMC cards. Lowering the SD base |
@@ -2781,6 +2767,20 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) | |||
2781 | 2767 | ||
2782 | dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n"); | 2768 | dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n"); |
2783 | } | 2769 | } |
2770 | |||
2771 | pci_read_config_byte(dev, 0xCB, &disable); | ||
2772 | |||
2773 | if (disable & 0x02) | ||
2774 | return; | ||
2775 | |||
2776 | pci_read_config_byte(dev, 0xCA, &write_enable); | ||
2777 | pci_write_config_byte(dev, 0xCA, 0x57); | ||
2778 | pci_write_config_byte(dev, 0xCB, disable | 0x02); | ||
2779 | pci_write_config_byte(dev, 0xCA, write_enable); | ||
2780 | |||
2781 | dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n"); | ||
2782 | dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n"); | ||
2783 | |||
2784 | } | 2784 | } |
2785 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); | 2785 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); |
2786 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); | 2786 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); |
@@ -2822,6 +2822,89 @@ static void __devinit fixup_ti816x_class(struct pci_dev* dev) | |||
2822 | } | 2822 | } |
2823 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_TI, 0xb800, fixup_ti816x_class); | 2823 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_TI, 0xb800, fixup_ti816x_class); |
2824 | 2824 | ||
2825 | /* Some PCIe devices do not work reliably with the claimed maximum | ||
2826 | * payload size supported. | ||
2827 | */ | ||
2828 | static void __devinit fixup_mpss_256(struct pci_dev *dev) | ||
2829 | { | ||
2830 | dev->pcie_mpss = 1; /* 256 bytes */ | ||
2831 | } | ||
2832 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, | ||
2833 | PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256); | ||
2834 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, | ||
2835 | PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256); | ||
2836 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, | ||
2837 | PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256); | ||
2838 | |||
2839 | /* Intel 5000 and 5100 Memory controllers have an errata with read completion | ||
2840 | * coalescing (which is enabled by default on some BIOSes) and MPS of 256B. | ||
2841 | * Since there is no way of knowing what the PCIE MPS on each fabric will be | ||
2842 | * until all of the devices are discovered and buses walked, read completion | ||
2843 | * coalescing must be disabled. Unfortunately, it cannot be re-enabled because | ||
2844 | * it is possible to hotplug a device with MPS of 256B. | ||
2845 | */ | ||
2846 | static void __devinit quirk_intel_mc_errata(struct pci_dev *dev) | ||
2847 | { | ||
2848 | int err; | ||
2849 | u16 rcc; | ||
2850 | |||
2851 | if (pcie_bus_config == PCIE_BUS_TUNE_OFF) | ||
2852 | return; | ||
2853 | |||
2854 | /* Intel errata specifies bits to change but does not say what they are. | ||
2855 | * Keeping them magical until such time as the registers and values can | ||
2856 | * be explained. | ||
2857 | */ | ||
2858 | err = pci_read_config_word(dev, 0x48, &rcc); | ||
2859 | if (err) { | ||
2860 | dev_err(&dev->dev, "Error attempting to read the read " | ||
2861 | "completion coalescing register.\n"); | ||
2862 | return; | ||
2863 | } | ||
2864 | |||
2865 | if (!(rcc & (1 << 10))) | ||
2866 | return; | ||
2867 | |||
2868 | rcc &= ~(1 << 10); | ||
2869 | |||
2870 | err = pci_write_config_word(dev, 0x48, rcc); | ||
2871 | if (err) { | ||
2872 | dev_err(&dev->dev, "Error attempting to write the read " | ||
2873 | "completion coalescing register.\n"); | ||
2874 | return; | ||
2875 | } | ||
2876 | |||
2877 | pr_info_once("Read completion coalescing disabled due to hardware " | ||
2878 | "errata relating to 256B MPS.\n"); | ||
2879 | } | ||
2880 | /* Intel 5000 series memory controllers and ports 2-7 */ | ||
2881 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata); | ||
2882 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata); | ||
2883 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata); | ||
2884 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata); | ||
2885 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata); | ||
2886 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata); | ||
2887 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata); | ||
2888 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata); | ||
2889 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata); | ||
2890 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata); | ||
2891 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata); | ||
2892 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata); | ||
2893 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata); | ||
2894 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata); | ||
2895 | /* Intel 5100 series memory controllers and ports 2-7 */ | ||
2896 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata); | ||
2897 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata); | ||
2898 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata); | ||
2899 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata); | ||
2900 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata); | ||
2901 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata); | ||
2902 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata); | ||
2903 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata); | ||
2904 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata); | ||
2905 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata); | ||
2906 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata); | ||
2907 | |||
2825 | static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, | 2908 | static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, |
2826 | struct pci_fixup *end) | 2909 | struct pci_fixup *end) |
2827 | { | 2910 | { |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 784da9d36029..86b69f85f900 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -426,7 +426,7 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type) | |||
426 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); | 426 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); |
427 | } | 427 | } |
428 | 428 | ||
429 | static void pci_setup_bridge(struct pci_bus *bus) | 429 | void pci_setup_bridge(struct pci_bus *bus) |
430 | { | 430 | { |
431 | unsigned long type = IORESOURCE_IO | IORESOURCE_MEM | | 431 | unsigned long type = IORESOURCE_IO | IORESOURCE_MEM | |
432 | IORESOURCE_PREFETCH; | 432 | IORESOURCE_PREFETCH; |