diff options
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/Kconfig | 26 | ||||
-rw-r--r-- | drivers/pci/Makefile | 1 | ||||
-rw-r--r-- | drivers/pci/ats.c | 438 | ||||
-rw-r--r-- | drivers/pci/hotplug/acpiphp_glue.c | 109 | ||||
-rw-r--r-- | drivers/pci/hotplug/pcihp_slot.c | 47 | ||||
-rw-r--r-- | drivers/pci/iov.c | 142 | ||||
-rw-r--r-- | drivers/pci/of.c | 2 | ||||
-rw-r--r-- | drivers/pci/pci-acpi.c | 6 | ||||
-rw-r--r-- | drivers/pci/pci.c | 130 | ||||
-rw-r--r-- | drivers/pci/pci.h | 4 | ||||
-rw-r--r-- | drivers/pci/pcie/pme.c | 9 | ||||
-rw-r--r-- | drivers/pci/probe.c | 153 | ||||
-rw-r--r-- | drivers/pci/quirks.c | 113 | ||||
-rw-r--r-- | drivers/pci/setup-bus.c | 168 | ||||
-rw-r--r-- | drivers/pci/setup-res.c | 152 | ||||
-rw-r--r-- | drivers/pci/xen-pcifront.c | 5 |
16 files changed, 1152 insertions, 353 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index b42798fe44a9..b6f9749b4fa7 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
@@ -60,9 +60,13 @@ config HT_IRQ | |||
60 | 60 | ||
61 | If unsure say Y. | 61 | If unsure say Y. |
62 | 62 | ||
63 | config PCI_ATS | ||
64 | bool | ||
65 | |||
63 | config PCI_IOV | 66 | config PCI_IOV |
64 | bool "PCI IOV support" | 67 | bool "PCI IOV support" |
65 | depends on PCI | 68 | depends on PCI |
69 | select PCI_ATS | ||
66 | help | 70 | help |
67 | I/O Virtualization is a PCI feature supported by some devices | 71 | I/O Virtualization is a PCI feature supported by some devices |
68 | which allows them to create virtual devices which share their | 72 | which allows them to create virtual devices which share their |
@@ -70,6 +74,28 @@ config PCI_IOV | |||
70 | 74 | ||
71 | If unsure, say N. | 75 | If unsure, say N. |
72 | 76 | ||
77 | config PCI_PRI | ||
78 | bool "PCI PRI support" | ||
79 | select PCI_ATS | ||
80 | help | ||
81 | PRI is the PCI Page Request Interface. It allows PCI devices that are | ||
82 | behind an IOMMU to recover from page faults. | ||
83 | |||
84 | If unsure, say N. | ||
85 | |||
86 | config PCI_PASID | ||
87 | bool "PCI PASID support" | ||
88 | depends on PCI | ||
89 | select PCI_ATS | ||
90 | help | ||
91 | Process Address Space Identifiers (PASIDs) can be used by PCI devices | ||
92 | to access more than one IO address space at the same time. To make | ||
93 | use of this feature an IOMMU is required which also supports PASIDs. | ||
94 | Select this option if you have such an IOMMU and want to compile the | ||
95 | driver for it into your kernel. | ||
96 | |||
97 | If unsure, say N. | ||
98 | |||
73 | config PCI_IOAPIC | 99 | config PCI_IOAPIC |
74 | bool | 100 | bool |
75 | depends on PCI | 101 | depends on PCI |
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 6fadae3ad134..083a49fee56a 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
@@ -29,6 +29,7 @@ obj-$(CONFIG_PCI_MSI) += msi.o | |||
29 | # Build the Hypertransport interrupt support | 29 | # Build the Hypertransport interrupt support |
30 | obj-$(CONFIG_HT_IRQ) += htirq.o | 30 | obj-$(CONFIG_HT_IRQ) += htirq.o |
31 | 31 | ||
32 | obj-$(CONFIG_PCI_ATS) += ats.o | ||
32 | obj-$(CONFIG_PCI_IOV) += iov.o | 33 | obj-$(CONFIG_PCI_IOV) += iov.o |
33 | 34 | ||
34 | # | 35 | # |
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c new file mode 100644 index 000000000000..f727a09eb72f --- /dev/null +++ b/drivers/pci/ats.c | |||
@@ -0,0 +1,438 @@ | |||
1 | /* | ||
2 | * drivers/pci/ats.c | ||
3 | * | ||
4 | * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com> | ||
5 | * Copyright (C) 2011 Advanced Micro Devices, | ||
6 | * | ||
7 | * PCI Express I/O Virtualization (IOV) support. | ||
8 | * Address Translation Service 1.0 | ||
9 | * Page Request Interface added by Joerg Roedel <joerg.roedel@amd.com> | ||
10 | * PASID support added by Joerg Roedel <joerg.roedel@amd.com> | ||
11 | */ | ||
12 | |||
13 | #include <linux/pci-ats.h> | ||
14 | #include <linux/pci.h> | ||
15 | |||
16 | #include "pci.h" | ||
17 | |||
18 | static int ats_alloc_one(struct pci_dev *dev, int ps) | ||
19 | { | ||
20 | int pos; | ||
21 | u16 cap; | ||
22 | struct pci_ats *ats; | ||
23 | |||
24 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS); | ||
25 | if (!pos) | ||
26 | return -ENODEV; | ||
27 | |||
28 | ats = kzalloc(sizeof(*ats), GFP_KERNEL); | ||
29 | if (!ats) | ||
30 | return -ENOMEM; | ||
31 | |||
32 | ats->pos = pos; | ||
33 | ats->stu = ps; | ||
34 | pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap); | ||
35 | ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : | ||
36 | PCI_ATS_MAX_QDEP; | ||
37 | dev->ats = ats; | ||
38 | |||
39 | return 0; | ||
40 | } | ||
41 | |||
42 | static void ats_free_one(struct pci_dev *dev) | ||
43 | { | ||
44 | kfree(dev->ats); | ||
45 | dev->ats = NULL; | ||
46 | } | ||
47 | |||
48 | /** | ||
49 | * pci_enable_ats - enable the ATS capability | ||
50 | * @dev: the PCI device | ||
51 | * @ps: the IOMMU page shift | ||
52 | * | ||
53 | * Returns 0 on success, or negative on failure. | ||
54 | */ | ||
55 | int pci_enable_ats(struct pci_dev *dev, int ps) | ||
56 | { | ||
57 | int rc; | ||
58 | u16 ctrl; | ||
59 | |||
60 | BUG_ON(dev->ats && dev->ats->is_enabled); | ||
61 | |||
62 | if (ps < PCI_ATS_MIN_STU) | ||
63 | return -EINVAL; | ||
64 | |||
65 | if (dev->is_physfn || dev->is_virtfn) { | ||
66 | struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn; | ||
67 | |||
68 | mutex_lock(&pdev->sriov->lock); | ||
69 | if (pdev->ats) | ||
70 | rc = pdev->ats->stu == ps ? 0 : -EINVAL; | ||
71 | else | ||
72 | rc = ats_alloc_one(pdev, ps); | ||
73 | |||
74 | if (!rc) | ||
75 | pdev->ats->ref_cnt++; | ||
76 | mutex_unlock(&pdev->sriov->lock); | ||
77 | if (rc) | ||
78 | return rc; | ||
79 | } | ||
80 | |||
81 | if (!dev->is_physfn) { | ||
82 | rc = ats_alloc_one(dev, ps); | ||
83 | if (rc) | ||
84 | return rc; | ||
85 | } | ||
86 | |||
87 | ctrl = PCI_ATS_CTRL_ENABLE; | ||
88 | if (!dev->is_virtfn) | ||
89 | ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU); | ||
90 | pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl); | ||
91 | |||
92 | dev->ats->is_enabled = 1; | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | EXPORT_SYMBOL_GPL(pci_enable_ats); | ||
97 | |||
98 | /** | ||
99 | * pci_disable_ats - disable the ATS capability | ||
100 | * @dev: the PCI device | ||
101 | */ | ||
102 | void pci_disable_ats(struct pci_dev *dev) | ||
103 | { | ||
104 | u16 ctrl; | ||
105 | |||
106 | BUG_ON(!dev->ats || !dev->ats->is_enabled); | ||
107 | |||
108 | pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl); | ||
109 | ctrl &= ~PCI_ATS_CTRL_ENABLE; | ||
110 | pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl); | ||
111 | |||
112 | dev->ats->is_enabled = 0; | ||
113 | |||
114 | if (dev->is_physfn || dev->is_virtfn) { | ||
115 | struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn; | ||
116 | |||
117 | mutex_lock(&pdev->sriov->lock); | ||
118 | pdev->ats->ref_cnt--; | ||
119 | if (!pdev->ats->ref_cnt) | ||
120 | ats_free_one(pdev); | ||
121 | mutex_unlock(&pdev->sriov->lock); | ||
122 | } | ||
123 | |||
124 | if (!dev->is_physfn) | ||
125 | ats_free_one(dev); | ||
126 | } | ||
127 | EXPORT_SYMBOL_GPL(pci_disable_ats); | ||
128 | |||
129 | /** | ||
130 | * pci_ats_queue_depth - query the ATS Invalidate Queue Depth | ||
131 | * @dev: the PCI device | ||
132 | * | ||
133 | * Returns the queue depth on success, or negative on failure. | ||
134 | * | ||
135 | * The ATS spec uses 0 in the Invalidate Queue Depth field to | ||
136 | * indicate that the function can accept 32 Invalidate Request. | ||
137 | * But here we use the `real' values (i.e. 1~32) for the Queue | ||
138 | * Depth; and 0 indicates the function shares the Queue with | ||
139 | * other functions (doesn't exclusively own a Queue). | ||
140 | */ | ||
141 | int pci_ats_queue_depth(struct pci_dev *dev) | ||
142 | { | ||
143 | int pos; | ||
144 | u16 cap; | ||
145 | |||
146 | if (dev->is_virtfn) | ||
147 | return 0; | ||
148 | |||
149 | if (dev->ats) | ||
150 | return dev->ats->qdep; | ||
151 | |||
152 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS); | ||
153 | if (!pos) | ||
154 | return -ENODEV; | ||
155 | |||
156 | pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap); | ||
157 | |||
158 | return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : | ||
159 | PCI_ATS_MAX_QDEP; | ||
160 | } | ||
161 | EXPORT_SYMBOL_GPL(pci_ats_queue_depth); | ||
162 | |||
163 | #ifdef CONFIG_PCI_PRI | ||
164 | /** | ||
165 | * pci_enable_pri - Enable PRI capability | ||
166 | * @ pdev: PCI device structure | ||
167 | * | ||
168 | * Returns 0 on success, negative value on error | ||
169 | */ | ||
170 | int pci_enable_pri(struct pci_dev *pdev, u32 reqs) | ||
171 | { | ||
172 | u16 control, status; | ||
173 | u32 max_requests; | ||
174 | int pos; | ||
175 | |||
176 | pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); | ||
177 | if (!pos) | ||
178 | return -EINVAL; | ||
179 | |||
180 | pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); | ||
181 | pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status); | ||
182 | if ((control & PCI_PRI_ENABLE) || !(status & PCI_PRI_STATUS_STOPPED)) | ||
183 | return -EBUSY; | ||
184 | |||
185 | pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ_OFF, &max_requests); | ||
186 | reqs = min(max_requests, reqs); | ||
187 | pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ_OFF, reqs); | ||
188 | |||
189 | control |= PCI_PRI_ENABLE; | ||
190 | pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control); | ||
191 | |||
192 | return 0; | ||
193 | } | ||
194 | EXPORT_SYMBOL_GPL(pci_enable_pri); | ||
195 | |||
196 | /** | ||
197 | * pci_disable_pri - Disable PRI capability | ||
198 | * @pdev: PCI device structure | ||
199 | * | ||
200 | * Only clears the enabled-bit, regardless of its former value | ||
201 | */ | ||
202 | void pci_disable_pri(struct pci_dev *pdev) | ||
203 | { | ||
204 | u16 control; | ||
205 | int pos; | ||
206 | |||
207 | pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); | ||
208 | if (!pos) | ||
209 | return; | ||
210 | |||
211 | pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); | ||
212 | control &= ~PCI_PRI_ENABLE; | ||
213 | pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control); | ||
214 | } | ||
215 | EXPORT_SYMBOL_GPL(pci_disable_pri); | ||
216 | |||
217 | /** | ||
218 | * pci_pri_enabled - Checks if PRI capability is enabled | ||
219 | * @pdev: PCI device structure | ||
220 | * | ||
221 | * Returns true if PRI is enabled on the device, false otherwise | ||
222 | */ | ||
223 | bool pci_pri_enabled(struct pci_dev *pdev) | ||
224 | { | ||
225 | u16 control; | ||
226 | int pos; | ||
227 | |||
228 | pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); | ||
229 | if (!pos) | ||
230 | return false; | ||
231 | |||
232 | pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); | ||
233 | |||
234 | return (control & PCI_PRI_ENABLE) ? true : false; | ||
235 | } | ||
236 | EXPORT_SYMBOL_GPL(pci_pri_enabled); | ||
237 | |||
238 | /** | ||
239 | * pci_reset_pri - Resets device's PRI state | ||
240 | * @pdev: PCI device structure | ||
241 | * | ||
242 | * The PRI capability must be disabled before this function is called. | ||
243 | * Returns 0 on success, negative value on error. | ||
244 | */ | ||
245 | int pci_reset_pri(struct pci_dev *pdev) | ||
246 | { | ||
247 | u16 control; | ||
248 | int pos; | ||
249 | |||
250 | pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); | ||
251 | if (!pos) | ||
252 | return -EINVAL; | ||
253 | |||
254 | pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); | ||
255 | if (control & PCI_PRI_ENABLE) | ||
256 | return -EBUSY; | ||
257 | |||
258 | control |= PCI_PRI_RESET; | ||
259 | |||
260 | pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control); | ||
261 | |||
262 | return 0; | ||
263 | } | ||
264 | EXPORT_SYMBOL_GPL(pci_reset_pri); | ||
265 | |||
266 | /** | ||
267 | * pci_pri_stopped - Checks whether the PRI capability is stopped | ||
268 | * @pdev: PCI device structure | ||
269 | * | ||
270 | * Returns true if the PRI capability on the device is disabled and the | ||
271 | * device has no outstanding PRI requests, false otherwise. The device | ||
272 | * indicates this via the STOPPED bit in the status register of the | ||
273 | * capability. | ||
274 | * The device internal state can be cleared by resetting the PRI state | ||
275 | * with pci_reset_pri(). This can force the capability into the STOPPED | ||
276 | * state. | ||
277 | */ | ||
278 | bool pci_pri_stopped(struct pci_dev *pdev) | ||
279 | { | ||
280 | u16 control, status; | ||
281 | int pos; | ||
282 | |||
283 | pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); | ||
284 | if (!pos) | ||
285 | return true; | ||
286 | |||
287 | pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); | ||
288 | pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status); | ||
289 | |||
290 | if (control & PCI_PRI_ENABLE) | ||
291 | return false; | ||
292 | |||
293 | return (status & PCI_PRI_STATUS_STOPPED) ? true : false; | ||
294 | } | ||
295 | EXPORT_SYMBOL_GPL(pci_pri_stopped); | ||
296 | |||
297 | /** | ||
298 | * pci_pri_status - Request PRI status of a device | ||
299 | * @pdev: PCI device structure | ||
300 | * | ||
301 | * Returns negative value on failure, status on success. The status can | ||
302 | * be checked against status-bits. Supported bits are currently: | ||
303 | * PCI_PRI_STATUS_RF: Response failure | ||
304 | * PCI_PRI_STATUS_UPRGI: Unexpected Page Request Group Index | ||
305 | * PCI_PRI_STATUS_STOPPED: PRI has stopped | ||
306 | */ | ||
307 | int pci_pri_status(struct pci_dev *pdev) | ||
308 | { | ||
309 | u16 status, control; | ||
310 | int pos; | ||
311 | |||
312 | pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); | ||
313 | if (!pos) | ||
314 | return -EINVAL; | ||
315 | |||
316 | pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); | ||
317 | pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status); | ||
318 | |||
319 | /* Stopped bit is undefined when enable == 1, so clear it */ | ||
320 | if (control & PCI_PRI_ENABLE) | ||
321 | status &= ~PCI_PRI_STATUS_STOPPED; | ||
322 | |||
323 | return status; | ||
324 | } | ||
325 | EXPORT_SYMBOL_GPL(pci_pri_status); | ||
326 | #endif /* CONFIG_PCI_PRI */ | ||
327 | |||
328 | #ifdef CONFIG_PCI_PASID | ||
329 | /** | ||
330 | * pci_enable_pasid - Enable the PASID capability | ||
331 | * @pdev: PCI device structure | ||
332 | * @features: Features to enable | ||
333 | * | ||
334 | * Returns 0 on success, negative value on error. This function checks | ||
335 | * whether the features are actually supported by the device and returns | ||
336 | * an error if not. | ||
337 | */ | ||
338 | int pci_enable_pasid(struct pci_dev *pdev, int features) | ||
339 | { | ||
340 | u16 control, supported; | ||
341 | int pos; | ||
342 | |||
343 | pos = pci_find_ext_capability(pdev, PCI_PASID_CAP); | ||
344 | if (!pos) | ||
345 | return -EINVAL; | ||
346 | |||
347 | pci_read_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, &control); | ||
348 | pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported); | ||
349 | |||
350 | if (!(supported & PCI_PASID_ENABLE)) | ||
351 | return -EINVAL; | ||
352 | |||
353 | supported &= PCI_PASID_EXEC | PCI_PASID_PRIV; | ||
354 | |||
355 | /* User wants to enable anything unsupported? */ | ||
356 | if ((supported & features) != features) | ||
357 | return -EINVAL; | ||
358 | |||
359 | control = PCI_PASID_ENABLE | features; | ||
360 | |||
361 | pci_write_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, control); | ||
362 | |||
363 | return 0; | ||
364 | } | ||
365 | EXPORT_SYMBOL_GPL(pci_enable_pasid); | ||
366 | |||
367 | /** | ||
368 | * pci_disable_pasid - Disable the PASID capability | ||
369 | * @pdev: PCI device structure | ||
370 | * | ||
371 | */ | ||
372 | void pci_disable_pasid(struct pci_dev *pdev) | ||
373 | { | ||
374 | u16 control = 0; | ||
375 | int pos; | ||
376 | |||
377 | pos = pci_find_ext_capability(pdev, PCI_PASID_CAP); | ||
378 | if (!pos) | ||
379 | return; | ||
380 | |||
381 | pci_write_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, control); | ||
382 | } | ||
383 | EXPORT_SYMBOL_GPL(pci_disable_pasid); | ||
384 | |||
385 | /** | ||
386 | * pci_pasid_features - Check which PASID features are supported | ||
387 | * @pdev: PCI device structure | ||
388 | * | ||
389 | * Returns a negative value when no PASI capability is present. | ||
390 | * Otherwise is returns a bitmask with supported features. Current | ||
391 | * features reported are: | ||
392 | * PCI_PASID_ENABLE - PASID capability can be enabled | ||
393 | * PCI_PASID_EXEC - Execute permission supported | ||
394 | * PCI_PASID_PRIV - Priviledged mode supported | ||
395 | */ | ||
396 | int pci_pasid_features(struct pci_dev *pdev) | ||
397 | { | ||
398 | u16 supported; | ||
399 | int pos; | ||
400 | |||
401 | pos = pci_find_ext_capability(pdev, PCI_PASID_CAP); | ||
402 | if (!pos) | ||
403 | return -EINVAL; | ||
404 | |||
405 | pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported); | ||
406 | |||
407 | supported &= PCI_PASID_ENABLE | PCI_PASID_EXEC | PCI_PASID_PRIV; | ||
408 | |||
409 | return supported; | ||
410 | } | ||
411 | EXPORT_SYMBOL_GPL(pci_pasid_features); | ||
412 | |||
413 | #define PASID_NUMBER_SHIFT 8 | ||
414 | #define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT) | ||
415 | /** | ||
416 | * pci_max_pasid - Get maximum number of PASIDs supported by device | ||
417 | * @pdev: PCI device structure | ||
418 | * | ||
419 | * Returns negative value when PASID capability is not present. | ||
420 | * Otherwise it returns the numer of supported PASIDs. | ||
421 | */ | ||
422 | int pci_max_pasids(struct pci_dev *pdev) | ||
423 | { | ||
424 | u16 supported; | ||
425 | int pos; | ||
426 | |||
427 | pos = pci_find_ext_capability(pdev, PCI_PASID_CAP); | ||
428 | if (!pos) | ||
429 | return -EINVAL; | ||
430 | |||
431 | pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported); | ||
432 | |||
433 | supported = (supported & PASID_NUMBER_MASK) >> PASID_NUMBER_SHIFT; | ||
434 | |||
435 | return (1 << supported); | ||
436 | } | ||
437 | EXPORT_SYMBOL_GPL(pci_max_pasids); | ||
438 | #endif /* CONFIG_PCI_PASID */ | ||
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 220285760b68..596172b4ae95 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/pci-acpi.h> | 48 | #include <linux/pci-acpi.h> |
49 | #include <linux/mutex.h> | 49 | #include <linux/mutex.h> |
50 | #include <linux/slab.h> | 50 | #include <linux/slab.h> |
51 | #include <linux/acpi.h> | ||
51 | 52 | ||
52 | #include "../pci.h" | 53 | #include "../pci.h" |
53 | #include "acpiphp.h" | 54 | #include "acpiphp.h" |
@@ -1149,15 +1150,35 @@ check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
1149 | return AE_OK ; | 1150 | return AE_OK ; |
1150 | } | 1151 | } |
1151 | 1152 | ||
1152 | /** | 1153 | struct acpiphp_hp_work { |
1153 | * handle_hotplug_event_bridge - handle ACPI event on bridges | 1154 | struct work_struct work; |
1154 | * @handle: Notify()'ed acpi_handle | 1155 | acpi_handle handle; |
1155 | * @type: Notify code | 1156 | u32 type; |
1156 | * @context: pointer to acpiphp_bridge structure | 1157 | void *context; |
1157 | * | 1158 | }; |
1158 | * Handles ACPI event notification on {host,p2p} bridges. | 1159 | |
1159 | */ | 1160 | static void alloc_acpiphp_hp_work(acpi_handle handle, u32 type, |
1160 | static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *context) | 1161 | void *context, |
1162 | void (*func)(struct work_struct *work)) | ||
1163 | { | ||
1164 | struct acpiphp_hp_work *hp_work; | ||
1165 | int ret; | ||
1166 | |||
1167 | hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL); | ||
1168 | if (!hp_work) | ||
1169 | return; | ||
1170 | |||
1171 | hp_work->handle = handle; | ||
1172 | hp_work->type = type; | ||
1173 | hp_work->context = context; | ||
1174 | |||
1175 | INIT_WORK(&hp_work->work, func); | ||
1176 | ret = queue_work(kacpi_hotplug_wq, &hp_work->work); | ||
1177 | if (!ret) | ||
1178 | kfree(hp_work); | ||
1179 | } | ||
1180 | |||
1181 | static void _handle_hotplug_event_bridge(struct work_struct *work) | ||
1161 | { | 1182 | { |
1162 | struct acpiphp_bridge *bridge; | 1183 | struct acpiphp_bridge *bridge; |
1163 | char objname[64]; | 1184 | char objname[64]; |
@@ -1165,11 +1186,18 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont | |||
1165 | .pointer = objname }; | 1186 | .pointer = objname }; |
1166 | struct acpi_device *device; | 1187 | struct acpi_device *device; |
1167 | int num_sub_bridges = 0; | 1188 | int num_sub_bridges = 0; |
1189 | struct acpiphp_hp_work *hp_work; | ||
1190 | acpi_handle handle; | ||
1191 | u32 type; | ||
1192 | |||
1193 | hp_work = container_of(work, struct acpiphp_hp_work, work); | ||
1194 | handle = hp_work->handle; | ||
1195 | type = hp_work->type; | ||
1168 | 1196 | ||
1169 | if (acpi_bus_get_device(handle, &device)) { | 1197 | if (acpi_bus_get_device(handle, &device)) { |
1170 | /* This bridge must have just been physically inserted */ | 1198 | /* This bridge must have just been physically inserted */ |
1171 | handle_bridge_insertion(handle, type); | 1199 | handle_bridge_insertion(handle, type); |
1172 | return; | 1200 | goto out; |
1173 | } | 1201 | } |
1174 | 1202 | ||
1175 | bridge = acpiphp_handle_to_bridge(handle); | 1203 | bridge = acpiphp_handle_to_bridge(handle); |
@@ -1180,7 +1208,7 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont | |||
1180 | 1208 | ||
1181 | if (!bridge && !num_sub_bridges) { | 1209 | if (!bridge && !num_sub_bridges) { |
1182 | err("cannot get bridge info\n"); | 1210 | err("cannot get bridge info\n"); |
1183 | return; | 1211 | goto out; |
1184 | } | 1212 | } |
1185 | 1213 | ||
1186 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); | 1214 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); |
@@ -1241,22 +1269,49 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont | |||
1241 | warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); | 1269 | warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); |
1242 | break; | 1270 | break; |
1243 | } | 1271 | } |
1272 | |||
1273 | out: | ||
1274 | kfree(hp_work); /* allocated in handle_hotplug_event_bridge */ | ||
1244 | } | 1275 | } |
1245 | 1276 | ||
1246 | /** | 1277 | /** |
1247 | * handle_hotplug_event_func - handle ACPI event on functions (i.e. slots) | 1278 | * handle_hotplug_event_bridge - handle ACPI event on bridges |
1248 | * @handle: Notify()'ed acpi_handle | 1279 | * @handle: Notify()'ed acpi_handle |
1249 | * @type: Notify code | 1280 | * @type: Notify code |
1250 | * @context: pointer to acpiphp_func structure | 1281 | * @context: pointer to acpiphp_bridge structure |
1251 | * | 1282 | * |
1252 | * Handles ACPI event notification on slots. | 1283 | * Handles ACPI event notification on {host,p2p} bridges. |
1253 | */ | 1284 | */ |
1254 | static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context) | 1285 | static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, |
1286 | void *context) | ||
1287 | { | ||
1288 | /* | ||
1289 | * Currently the code adds all hotplug events to the kacpid_wq | ||
1290 | * queue when it should add hotplug events to the kacpi_hotplug_wq. | ||
1291 | * The proper way to fix this is to reorganize the code so that | ||
1292 | * drivers (dock, etc.) do not call acpi_os_execute(), etc. | ||
1293 | * For now just re-add this work to the kacpi_hotplug_wq so we | ||
1294 | * don't deadlock on hotplug actions. | ||
1295 | */ | ||
1296 | alloc_acpiphp_hp_work(handle, type, context, | ||
1297 | _handle_hotplug_event_bridge); | ||
1298 | } | ||
1299 | |||
1300 | static void _handle_hotplug_event_func(struct work_struct *work) | ||
1255 | { | 1301 | { |
1256 | struct acpiphp_func *func; | 1302 | struct acpiphp_func *func; |
1257 | char objname[64]; | 1303 | char objname[64]; |
1258 | struct acpi_buffer buffer = { .length = sizeof(objname), | 1304 | struct acpi_buffer buffer = { .length = sizeof(objname), |
1259 | .pointer = objname }; | 1305 | .pointer = objname }; |
1306 | struct acpiphp_hp_work *hp_work; | ||
1307 | acpi_handle handle; | ||
1308 | u32 type; | ||
1309 | void *context; | ||
1310 | |||
1311 | hp_work = container_of(work, struct acpiphp_hp_work, work); | ||
1312 | handle = hp_work->handle; | ||
1313 | type = hp_work->type; | ||
1314 | context = hp_work->context; | ||
1260 | 1315 | ||
1261 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); | 1316 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); |
1262 | 1317 | ||
@@ -1291,8 +1346,32 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *contex | |||
1291 | warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); | 1346 | warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); |
1292 | break; | 1347 | break; |
1293 | } | 1348 | } |
1349 | |||
1350 | kfree(hp_work); /* allocated in handle_hotplug_event_func */ | ||
1294 | } | 1351 | } |
1295 | 1352 | ||
1353 | /** | ||
1354 | * handle_hotplug_event_func - handle ACPI event on functions (i.e. slots) | ||
1355 | * @handle: Notify()'ed acpi_handle | ||
1356 | * @type: Notify code | ||
1357 | * @context: pointer to acpiphp_func structure | ||
1358 | * | ||
1359 | * Handles ACPI event notification on slots. | ||
1360 | */ | ||
1361 | static void handle_hotplug_event_func(acpi_handle handle, u32 type, | ||
1362 | void *context) | ||
1363 | { | ||
1364 | /* | ||
1365 | * Currently the code adds all hotplug events to the kacpid_wq | ||
1366 | * queue when it should add hotplug events to the kacpi_hotplug_wq. | ||
1367 | * The proper way to fix this is to reorganize the code so that | ||
1368 | * drivers (dock, etc.) do not call acpi_os_execute(), etc. | ||
1369 | * For now just re-add this work to the kacpi_hotplug_wq so we | ||
1370 | * don't deadlock on hotplug actions. | ||
1371 | */ | ||
1372 | alloc_acpiphp_hp_work(handle, type, context, | ||
1373 | _handle_hotplug_event_func); | ||
1374 | } | ||
1296 | 1375 | ||
1297 | static acpi_status | 1376 | static acpi_status |
1298 | find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) | 1377 | find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) |
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c index 749fdf070319..3ffd9c1acc0a 100644 --- a/drivers/pci/hotplug/pcihp_slot.c +++ b/drivers/pci/hotplug/pcihp_slot.c | |||
@@ -158,47 +158,6 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) | |||
158 | */ | 158 | */ |
159 | } | 159 | } |
160 | 160 | ||
161 | /* Program PCIE MaxPayload setting on device: ensure parent maxpayload <= device */ | ||
162 | static int pci_set_payload(struct pci_dev *dev) | ||
163 | { | ||
164 | int pos, ppos; | ||
165 | u16 pctl, psz; | ||
166 | u16 dctl, dsz, dcap, dmax; | ||
167 | struct pci_dev *parent; | ||
168 | |||
169 | parent = dev->bus->self; | ||
170 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
171 | if (!pos) | ||
172 | return 0; | ||
173 | |||
174 | /* Read Device MaxPayload capability and setting */ | ||
175 | pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &dctl); | ||
176 | pci_read_config_word(dev, pos + PCI_EXP_DEVCAP, &dcap); | ||
177 | dsz = (dctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5; | ||
178 | dmax = (dcap & PCI_EXP_DEVCAP_PAYLOAD); | ||
179 | |||
180 | /* Read Parent MaxPayload setting */ | ||
181 | ppos = pci_find_capability(parent, PCI_CAP_ID_EXP); | ||
182 | if (!ppos) | ||
183 | return 0; | ||
184 | pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl); | ||
185 | psz = (pctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5; | ||
186 | |||
187 | /* If parent payload > device max payload -> error | ||
188 | * If parent payload > device payload -> set speed | ||
189 | * If parent payload <= device payload -> do nothing | ||
190 | */ | ||
191 | if (psz > dmax) | ||
192 | return -1; | ||
193 | else if (psz > dsz) { | ||
194 | dev_info(&dev->dev, "Setting MaxPayload to %d\n", 128 << psz); | ||
195 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, | ||
196 | (dctl & ~PCI_EXP_DEVCTL_PAYLOAD) + | ||
197 | (psz << 5)); | ||
198 | } | ||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | void pci_configure_slot(struct pci_dev *dev) | 161 | void pci_configure_slot(struct pci_dev *dev) |
203 | { | 162 | { |
204 | struct pci_dev *cdev; | 163 | struct pci_dev *cdev; |
@@ -210,9 +169,9 @@ void pci_configure_slot(struct pci_dev *dev) | |||
210 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) | 169 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) |
211 | return; | 170 | return; |
212 | 171 | ||
213 | ret = pci_set_payload(dev); | 172 | if (dev->bus && dev->bus->self) |
214 | if (ret) | 173 | pcie_bus_configure_settings(dev->bus, |
215 | dev_warn(&dev->dev, "could not set device max payload\n"); | 174 | dev->bus->self->pcie_mpss); |
216 | 175 | ||
217 | memset(&hpp, 0, sizeof(hpp)); | 176 | memset(&hpp, 0, sizeof(hpp)); |
218 | ret = pci_get_hp_params(dev, &hpp); | 177 | ret = pci_get_hp_params(dev, &hpp); |
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 42fae4776515..9b4e88c636f8 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c | |||
@@ -722,145 +722,3 @@ int pci_num_vf(struct pci_dev *dev) | |||
722 | return dev->sriov->nr_virtfn; | 722 | return dev->sriov->nr_virtfn; |
723 | } | 723 | } |
724 | EXPORT_SYMBOL_GPL(pci_num_vf); | 724 | EXPORT_SYMBOL_GPL(pci_num_vf); |
725 | |||
726 | static int ats_alloc_one(struct pci_dev *dev, int ps) | ||
727 | { | ||
728 | int pos; | ||
729 | u16 cap; | ||
730 | struct pci_ats *ats; | ||
731 | |||
732 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS); | ||
733 | if (!pos) | ||
734 | return -ENODEV; | ||
735 | |||
736 | ats = kzalloc(sizeof(*ats), GFP_KERNEL); | ||
737 | if (!ats) | ||
738 | return -ENOMEM; | ||
739 | |||
740 | ats->pos = pos; | ||
741 | ats->stu = ps; | ||
742 | pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap); | ||
743 | ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : | ||
744 | PCI_ATS_MAX_QDEP; | ||
745 | dev->ats = ats; | ||
746 | |||
747 | return 0; | ||
748 | } | ||
749 | |||
750 | static void ats_free_one(struct pci_dev *dev) | ||
751 | { | ||
752 | kfree(dev->ats); | ||
753 | dev->ats = NULL; | ||
754 | } | ||
755 | |||
756 | /** | ||
757 | * pci_enable_ats - enable the ATS capability | ||
758 | * @dev: the PCI device | ||
759 | * @ps: the IOMMU page shift | ||
760 | * | ||
761 | * Returns 0 on success, or negative on failure. | ||
762 | */ | ||
763 | int pci_enable_ats(struct pci_dev *dev, int ps) | ||
764 | { | ||
765 | int rc; | ||
766 | u16 ctrl; | ||
767 | |||
768 | BUG_ON(dev->ats && dev->ats->is_enabled); | ||
769 | |||
770 | if (ps < PCI_ATS_MIN_STU) | ||
771 | return -EINVAL; | ||
772 | |||
773 | if (dev->is_physfn || dev->is_virtfn) { | ||
774 | struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn; | ||
775 | |||
776 | mutex_lock(&pdev->sriov->lock); | ||
777 | if (pdev->ats) | ||
778 | rc = pdev->ats->stu == ps ? 0 : -EINVAL; | ||
779 | else | ||
780 | rc = ats_alloc_one(pdev, ps); | ||
781 | |||
782 | if (!rc) | ||
783 | pdev->ats->ref_cnt++; | ||
784 | mutex_unlock(&pdev->sriov->lock); | ||
785 | if (rc) | ||
786 | return rc; | ||
787 | } | ||
788 | |||
789 | if (!dev->is_physfn) { | ||
790 | rc = ats_alloc_one(dev, ps); | ||
791 | if (rc) | ||
792 | return rc; | ||
793 | } | ||
794 | |||
795 | ctrl = PCI_ATS_CTRL_ENABLE; | ||
796 | if (!dev->is_virtfn) | ||
797 | ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU); | ||
798 | pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl); | ||
799 | |||
800 | dev->ats->is_enabled = 1; | ||
801 | |||
802 | return 0; | ||
803 | } | ||
804 | |||
805 | /** | ||
806 | * pci_disable_ats - disable the ATS capability | ||
807 | * @dev: the PCI device | ||
808 | */ | ||
809 | void pci_disable_ats(struct pci_dev *dev) | ||
810 | { | ||
811 | u16 ctrl; | ||
812 | |||
813 | BUG_ON(!dev->ats || !dev->ats->is_enabled); | ||
814 | |||
815 | pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl); | ||
816 | ctrl &= ~PCI_ATS_CTRL_ENABLE; | ||
817 | pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl); | ||
818 | |||
819 | dev->ats->is_enabled = 0; | ||
820 | |||
821 | if (dev->is_physfn || dev->is_virtfn) { | ||
822 | struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn; | ||
823 | |||
824 | mutex_lock(&pdev->sriov->lock); | ||
825 | pdev->ats->ref_cnt--; | ||
826 | if (!pdev->ats->ref_cnt) | ||
827 | ats_free_one(pdev); | ||
828 | mutex_unlock(&pdev->sriov->lock); | ||
829 | } | ||
830 | |||
831 | if (!dev->is_physfn) | ||
832 | ats_free_one(dev); | ||
833 | } | ||
834 | |||
835 | /** | ||
836 | * pci_ats_queue_depth - query the ATS Invalidate Queue Depth | ||
837 | * @dev: the PCI device | ||
838 | * | ||
839 | * Returns the queue depth on success, or negative on failure. | ||
840 | * | ||
841 | * The ATS spec uses 0 in the Invalidate Queue Depth field to | ||
842 | * indicate that the function can accept 32 Invalidate Request. | ||
843 | * But here we use the `real' values (i.e. 1~32) for the Queue | ||
844 | * Depth; and 0 indicates the function shares the Queue with | ||
845 | * other functions (doesn't exclusively own a Queue). | ||
846 | */ | ||
847 | int pci_ats_queue_depth(struct pci_dev *dev) | ||
848 | { | ||
849 | int pos; | ||
850 | u16 cap; | ||
851 | |||
852 | if (dev->is_virtfn) | ||
853 | return 0; | ||
854 | |||
855 | if (dev->ats) | ||
856 | return dev->ats->qdep; | ||
857 | |||
858 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS); | ||
859 | if (!pos) | ||
860 | return -ENODEV; | ||
861 | |||
862 | pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap); | ||
863 | |||
864 | return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : | ||
865 | PCI_ATS_MAX_QDEP; | ||
866 | } | ||
diff --git a/drivers/pci/of.c b/drivers/pci/of.c index c94d37ec55c8..f0929934bb7a 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c | |||
@@ -55,7 +55,7 @@ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus) | |||
55 | */ | 55 | */ |
56 | if (bus->bridge->of_node) | 56 | if (bus->bridge->of_node) |
57 | return of_node_get(bus->bridge->of_node); | 57 | return of_node_get(bus->bridge->of_node); |
58 | if (bus->bridge->parent->of_node) | 58 | if (bus->bridge->parent && bus->bridge->parent->of_node) |
59 | return of_node_get(bus->bridge->parent->of_node); | 59 | return of_node_get(bus->bridge->parent->of_node); |
60 | return NULL; | 60 | return NULL; |
61 | } | 61 | } |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index d36f41ea8cbf..4ecb6408b0d6 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -46,6 +46,9 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) | |||
46 | struct pci_dev *pci_dev = context; | 46 | struct pci_dev *pci_dev = context; |
47 | 47 | ||
48 | if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { | 48 | if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { |
49 | if (pci_dev->pme_poll) | ||
50 | pci_dev->pme_poll = false; | ||
51 | |||
49 | pci_wakeup_event(pci_dev); | 52 | pci_wakeup_event(pci_dev); |
50 | pci_check_pme_status(pci_dev); | 53 | pci_check_pme_status(pci_dev); |
51 | pm_runtime_resume(&pci_dev->dev); | 54 | pm_runtime_resume(&pci_dev->dev); |
@@ -282,7 +285,6 @@ static int acpi_dev_run_wake(struct device *phys_dev, bool enable) | |||
282 | { | 285 | { |
283 | struct acpi_device *dev; | 286 | struct acpi_device *dev; |
284 | acpi_handle handle; | 287 | acpi_handle handle; |
285 | int error = -ENODEV; | ||
286 | 288 | ||
287 | if (!device_run_wake(phys_dev)) | 289 | if (!device_run_wake(phys_dev)) |
288 | return -EINVAL; | 290 | return -EINVAL; |
@@ -302,7 +304,7 @@ static int acpi_dev_run_wake(struct device *phys_dev, bool enable) | |||
302 | acpi_disable_wakeup_device_power(dev); | 304 | acpi_disable_wakeup_device_power(dev); |
303 | } | 305 | } |
304 | 306 | ||
305 | return error; | 307 | return 0; |
306 | } | 308 | } |
307 | 309 | ||
308 | static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable) | 310 | static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable) |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 08a95b369d85..6f45a73c6e9f 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -77,6 +77,8 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; | |||
77 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; | 77 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; |
78 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; | 78 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; |
79 | 79 | ||
80 | enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; | ||
81 | |||
80 | /* | 82 | /* |
81 | * The default CLS is used if arch didn't set CLS explicitly and not | 83 | * The default CLS is used if arch didn't set CLS explicitly and not |
82 | * all pci devices agree on the same value. Arch can override either | 84 | * all pci devices agree on the same value. Arch can override either |
@@ -1405,13 +1407,16 @@ bool pci_check_pme_status(struct pci_dev *dev) | |||
1405 | /** | 1407 | /** |
1406 | * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. | 1408 | * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. |
1407 | * @dev: Device to handle. | 1409 | * @dev: Device to handle. |
1408 | * @ign: Ignored. | 1410 | * @pme_poll_reset: Whether or not to reset the device's pme_poll flag. |
1409 | * | 1411 | * |
1410 | * Check if @dev has generated PME and queue a resume request for it in that | 1412 | * Check if @dev has generated PME and queue a resume request for it in that |
1411 | * case. | 1413 | * case. |
1412 | */ | 1414 | */ |
1413 | static int pci_pme_wakeup(struct pci_dev *dev, void *ign) | 1415 | static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset) |
1414 | { | 1416 | { |
1417 | if (pme_poll_reset && dev->pme_poll) | ||
1418 | dev->pme_poll = false; | ||
1419 | |||
1415 | if (pci_check_pme_status(dev)) { | 1420 | if (pci_check_pme_status(dev)) { |
1416 | pci_wakeup_event(dev); | 1421 | pci_wakeup_event(dev); |
1417 | pm_request_resume(&dev->dev); | 1422 | pm_request_resume(&dev->dev); |
@@ -1426,7 +1431,7 @@ static int pci_pme_wakeup(struct pci_dev *dev, void *ign) | |||
1426 | void pci_pme_wakeup_bus(struct pci_bus *bus) | 1431 | void pci_pme_wakeup_bus(struct pci_bus *bus) |
1427 | { | 1432 | { |
1428 | if (bus) | 1433 | if (bus) |
1429 | pci_walk_bus(bus, pci_pme_wakeup, NULL); | 1434 | pci_walk_bus(bus, pci_pme_wakeup, (void *)true); |
1430 | } | 1435 | } |
1431 | 1436 | ||
1432 | /** | 1437 | /** |
@@ -1444,31 +1449,26 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) | |||
1444 | 1449 | ||
1445 | static void pci_pme_list_scan(struct work_struct *work) | 1450 | static void pci_pme_list_scan(struct work_struct *work) |
1446 | { | 1451 | { |
1447 | struct pci_pme_device *pme_dev; | 1452 | struct pci_pme_device *pme_dev, *n; |
1448 | 1453 | ||
1449 | mutex_lock(&pci_pme_list_mutex); | 1454 | mutex_lock(&pci_pme_list_mutex); |
1450 | if (!list_empty(&pci_pme_list)) { | 1455 | if (!list_empty(&pci_pme_list)) { |
1451 | list_for_each_entry(pme_dev, &pci_pme_list, list) | 1456 | list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) { |
1452 | pci_pme_wakeup(pme_dev->dev, NULL); | 1457 | if (pme_dev->dev->pme_poll) { |
1453 | schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT)); | 1458 | pci_pme_wakeup(pme_dev->dev, NULL); |
1459 | } else { | ||
1460 | list_del(&pme_dev->list); | ||
1461 | kfree(pme_dev); | ||
1462 | } | ||
1463 | } | ||
1464 | if (!list_empty(&pci_pme_list)) | ||
1465 | schedule_delayed_work(&pci_pme_work, | ||
1466 | msecs_to_jiffies(PME_TIMEOUT)); | ||
1454 | } | 1467 | } |
1455 | mutex_unlock(&pci_pme_list_mutex); | 1468 | mutex_unlock(&pci_pme_list_mutex); |
1456 | } | 1469 | } |
1457 | 1470 | ||
1458 | /** | 1471 | /** |
1459 | * pci_external_pme - is a device an external PCI PME source? | ||
1460 | * @dev: PCI device to check | ||
1461 | * | ||
1462 | */ | ||
1463 | |||
1464 | static bool pci_external_pme(struct pci_dev *dev) | ||
1465 | { | ||
1466 | if (pci_is_pcie(dev) || dev->bus->number == 0) | ||
1467 | return false; | ||
1468 | return true; | ||
1469 | } | ||
1470 | |||
1471 | /** | ||
1472 | * pci_pme_active - enable or disable PCI device's PME# function | 1472 | * pci_pme_active - enable or disable PCI device's PME# function |
1473 | * @dev: PCI device to handle. | 1473 | * @dev: PCI device to handle. |
1474 | * @enable: 'true' to enable PME# generation; 'false' to disable it. | 1474 | * @enable: 'true' to enable PME# generation; 'false' to disable it. |
@@ -1501,7 +1501,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1501 | hit, and the power savings from the devices will still be a | 1501 | hit, and the power savings from the devices will still be a |
1502 | win. */ | 1502 | win. */ |
1503 | 1503 | ||
1504 | if (pci_external_pme(dev)) { | 1504 | if (dev->pme_poll) { |
1505 | struct pci_pme_device *pme_dev; | 1505 | struct pci_pme_device *pme_dev; |
1506 | if (enable) { | 1506 | if (enable) { |
1507 | pme_dev = kmalloc(sizeof(struct pci_pme_device), | 1507 | pme_dev = kmalloc(sizeof(struct pci_pme_device), |
@@ -1819,6 +1819,7 @@ void pci_pm_init(struct pci_dev *dev) | |||
1819 | (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", | 1819 | (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", |
1820 | (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); | 1820 | (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); |
1821 | dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; | 1821 | dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; |
1822 | dev->pme_poll = true; | ||
1822 | /* | 1823 | /* |
1823 | * Make device's PM flags reflect the wake-up capability, but | 1824 | * Make device's PM flags reflect the wake-up capability, but |
1824 | * let the user space enable it to wake up the system as needed. | 1825 | * let the user space enable it to wake up the system as needed. |
@@ -3201,8 +3202,6 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) | |||
3201 | if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) | 3202 | if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) |
3202 | goto out; | 3203 | goto out; |
3203 | 3204 | ||
3204 | v = (ffs(rq) - 8) << 12; | ||
3205 | |||
3206 | cap = pci_pcie_cap(dev); | 3205 | cap = pci_pcie_cap(dev); |
3207 | if (!cap) | 3206 | if (!cap) |
3208 | goto out; | 3207 | goto out; |
@@ -3210,6 +3209,22 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) | |||
3210 | err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); | 3209 | err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); |
3211 | if (err) | 3210 | if (err) |
3212 | goto out; | 3211 | goto out; |
3212 | /* | ||
3213 | * If using the "performance" PCIe config, we clamp the | ||
3214 | * read rq size to the max packet size to prevent the | ||
3215 | * host bridge generating requests larger than we can | ||
3216 | * cope with | ||
3217 | */ | ||
3218 | if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { | ||
3219 | int mps = pcie_get_mps(dev); | ||
3220 | |||
3221 | if (mps < 0) | ||
3222 | return mps; | ||
3223 | if (mps < rq) | ||
3224 | rq = mps; | ||
3225 | } | ||
3226 | |||
3227 | v = (ffs(rq) - 8) << 12; | ||
3213 | 3228 | ||
3214 | if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) { | 3229 | if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) { |
3215 | ctl &= ~PCI_EXP_DEVCTL_READRQ; | 3230 | ctl &= ~PCI_EXP_DEVCTL_READRQ; |
@@ -3223,6 +3238,67 @@ out: | |||
3223 | EXPORT_SYMBOL(pcie_set_readrq); | 3238 | EXPORT_SYMBOL(pcie_set_readrq); |
3224 | 3239 | ||
3225 | /** | 3240 | /** |
3241 | * pcie_get_mps - get PCI Express maximum payload size | ||
3242 | * @dev: PCI device to query | ||
3243 | * | ||
3244 | * Returns maximum payload size in bytes | ||
3245 | * or appropriate error value. | ||
3246 | */ | ||
3247 | int pcie_get_mps(struct pci_dev *dev) | ||
3248 | { | ||
3249 | int ret, cap; | ||
3250 | u16 ctl; | ||
3251 | |||
3252 | cap = pci_pcie_cap(dev); | ||
3253 | if (!cap) | ||
3254 | return -EINVAL; | ||
3255 | |||
3256 | ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); | ||
3257 | if (!ret) | ||
3258 | ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); | ||
3259 | |||
3260 | return ret; | ||
3261 | } | ||
3262 | |||
3263 | /** | ||
3264 | * pcie_set_mps - set PCI Express maximum payload size | ||
3265 | * @dev: PCI device to query | ||
3266 | * @mps: maximum payload size in bytes | ||
3267 | * valid values are 128, 256, 512, 1024, 2048, 4096 | ||
3268 | * | ||
3269 | * If possible sets maximum payload size | ||
3270 | */ | ||
3271 | int pcie_set_mps(struct pci_dev *dev, int mps) | ||
3272 | { | ||
3273 | int cap, err = -EINVAL; | ||
3274 | u16 ctl, v; | ||
3275 | |||
3276 | if (mps < 128 || mps > 4096 || !is_power_of_2(mps)) | ||
3277 | goto out; | ||
3278 | |||
3279 | v = ffs(mps) - 8; | ||
3280 | if (v > dev->pcie_mpss) | ||
3281 | goto out; | ||
3282 | v <<= 5; | ||
3283 | |||
3284 | cap = pci_pcie_cap(dev); | ||
3285 | if (!cap) | ||
3286 | goto out; | ||
3287 | |||
3288 | err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); | ||
3289 | if (err) | ||
3290 | goto out; | ||
3291 | |||
3292 | if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) { | ||
3293 | ctl &= ~PCI_EXP_DEVCTL_PAYLOAD; | ||
3294 | ctl |= v; | ||
3295 | err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl); | ||
3296 | } | ||
3297 | out: | ||
3298 | return err; | ||
3299 | } | ||
3300 | |||
3301 | /** | ||
3226 | * pci_select_bars - Make BAR mask from the type of resource | 3302 | * pci_select_bars - Make BAR mask from the type of resource |
3227 | * @dev: the PCI device for which BAR mask is made | 3303 | * @dev: the PCI device for which BAR mask is made |
3228 | * @flags: resource type mask to be selected | 3304 | * @flags: resource type mask to be selected |
@@ -3505,6 +3581,14 @@ static int __init pci_setup(char *str) | |||
3505 | pci_hotplug_io_size = memparse(str + 9, &str); | 3581 | pci_hotplug_io_size = memparse(str + 9, &str); |
3506 | } else if (!strncmp(str, "hpmemsize=", 10)) { | 3582 | } else if (!strncmp(str, "hpmemsize=", 10)) { |
3507 | pci_hotplug_mem_size = memparse(str + 10, &str); | 3583 | pci_hotplug_mem_size = memparse(str + 10, &str); |
3584 | } else if (!strncmp(str, "pcie_bus_tune_off", 17)) { | ||
3585 | pcie_bus_config = PCIE_BUS_TUNE_OFF; | ||
3586 | } else if (!strncmp(str, "pcie_bus_safe", 13)) { | ||
3587 | pcie_bus_config = PCIE_BUS_SAFE; | ||
3588 | } else if (!strncmp(str, "pcie_bus_perf", 13)) { | ||
3589 | pcie_bus_config = PCIE_BUS_PERFORMANCE; | ||
3590 | } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) { | ||
3591 | pcie_bus_config = PCIE_BUS_PEER2PEER; | ||
3508 | } else { | 3592 | } else { |
3509 | printk(KERN_ERR "PCI: Unknown option `%s'\n", | 3593 | printk(KERN_ERR "PCI: Unknown option `%s'\n", |
3510 | str); | 3594 | str); |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index c8cee764b0de..b74084e9ca12 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -283,6 +283,8 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) | |||
283 | 283 | ||
284 | #endif /* CONFIG_PCI_IOV */ | 284 | #endif /* CONFIG_PCI_IOV */ |
285 | 285 | ||
286 | extern unsigned long pci_cardbus_resource_alignment(struct resource *); | ||
287 | |||
286 | static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, | 288 | static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, |
287 | struct resource *res) | 289 | struct resource *res) |
288 | { | 290 | { |
@@ -292,6 +294,8 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, | |||
292 | if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) | 294 | if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) |
293 | return pci_sriov_resource_alignment(dev, resno); | 295 | return pci_sriov_resource_alignment(dev, resno); |
294 | #endif | 296 | #endif |
297 | if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS) | ||
298 | return pci_cardbus_resource_alignment(res); | ||
295 | return resource_alignment(res); | 299 | return resource_alignment(res); |
296 | } | 300 | } |
297 | 301 | ||
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 0057344a3fcb..001f1b78f39c 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c | |||
@@ -84,6 +84,9 @@ static bool pcie_pme_walk_bus(struct pci_bus *bus) | |||
84 | list_for_each_entry(dev, &bus->devices, bus_list) { | 84 | list_for_each_entry(dev, &bus->devices, bus_list) { |
85 | /* Skip PCIe devices in case we started from a root port. */ | 85 | /* Skip PCIe devices in case we started from a root port. */ |
86 | if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { | 86 | if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { |
87 | if (dev->pme_poll) | ||
88 | dev->pme_poll = false; | ||
89 | |||
87 | pci_wakeup_event(dev); | 90 | pci_wakeup_event(dev); |
88 | pm_request_resume(&dev->dev); | 91 | pm_request_resume(&dev->dev); |
89 | ret = true; | 92 | ret = true; |
@@ -142,6 +145,9 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id) | |||
142 | 145 | ||
143 | /* First, check if the PME is from the root port itself. */ | 146 | /* First, check if the PME is from the root port itself. */ |
144 | if (port->devfn == devfn && port->bus->number == busnr) { | 147 | if (port->devfn == devfn && port->bus->number == busnr) { |
148 | if (port->pme_poll) | ||
149 | port->pme_poll = false; | ||
150 | |||
145 | if (pci_check_pme_status(port)) { | 151 | if (pci_check_pme_status(port)) { |
146 | pm_request_resume(&port->dev); | 152 | pm_request_resume(&port->dev); |
147 | found = true; | 153 | found = true; |
@@ -187,6 +193,9 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id) | |||
187 | /* The device is there, but we have to check its PME status. */ | 193 | /* The device is there, but we have to check its PME status. */ |
188 | found = pci_check_pme_status(dev); | 194 | found = pci_check_pme_status(dev); |
189 | if (found) { | 195 | if (found) { |
196 | if (dev->pme_poll) | ||
197 | dev->pme_poll = false; | ||
198 | |||
190 | pci_wakeup_event(dev); | 199 | pci_wakeup_event(dev); |
191 | pm_request_resume(&dev->dev); | 200 | pm_request_resume(&dev->dev); |
192 | } | 201 | } |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 795c9026d55f..04e74f485714 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -856,6 +856,8 @@ void set_pcie_port_type(struct pci_dev *pdev) | |||
856 | pdev->pcie_cap = pos; | 856 | pdev->pcie_cap = pos; |
857 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); | 857 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); |
858 | pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; | 858 | pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; |
859 | pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); | ||
860 | pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; | ||
859 | } | 861 | } |
860 | 862 | ||
861 | void set_pcie_hotplug_bridge(struct pci_dev *pdev) | 863 | void set_pcie_hotplug_bridge(struct pci_dev *pdev) |
@@ -1326,6 +1328,157 @@ int pci_scan_slot(struct pci_bus *bus, int devfn) | |||
1326 | return nr; | 1328 | return nr; |
1327 | } | 1329 | } |
1328 | 1330 | ||
1331 | static int pcie_find_smpss(struct pci_dev *dev, void *data) | ||
1332 | { | ||
1333 | u8 *smpss = data; | ||
1334 | |||
1335 | if (!pci_is_pcie(dev)) | ||
1336 | return 0; | ||
1337 | |||
1338 | /* For PCIE hotplug enabled slots not connected directly to a | ||
1339 | * PCI-E root port, there can be problems when hotplugging | ||
1340 | * devices. This is due to the possibility of hotplugging a | ||
1341 | * device into the fabric with a smaller MPS that the devices | ||
1342 | * currently running have configured. Modifying the MPS on the | ||
1343 | * running devices could cause a fatal bus error due to an | ||
1344 | * incoming frame being larger than the newly configured MPS. | ||
1345 | * To work around this, the MPS for the entire fabric must be | ||
1346 | * set to the minimum size. Any devices hotplugged into this | ||
1347 | * fabric will have the minimum MPS set. If the PCI hotplug | ||
1348 | * slot is directly connected to the root port and there are not | ||
1349 | * other devices on the fabric (which seems to be the most | ||
1350 | * common case), then this is not an issue and MPS discovery | ||
1351 | * will occur as normal. | ||
1352 | */ | ||
1353 | if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || | ||
1354 | (dev->bus->self && | ||
1355 | dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT))) | ||
1356 | *smpss = 0; | ||
1357 | |||
1358 | if (*smpss > dev->pcie_mpss) | ||
1359 | *smpss = dev->pcie_mpss; | ||
1360 | |||
1361 | return 0; | ||
1362 | } | ||
1363 | |||
1364 | static void pcie_write_mps(struct pci_dev *dev, int mps) | ||
1365 | { | ||
1366 | int rc; | ||
1367 | |||
1368 | if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { | ||
1369 | mps = 128 << dev->pcie_mpss; | ||
1370 | |||
1371 | if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && dev->bus->self) | ||
1372 | /* For "Performance", the assumption is made that | ||
1373 | * downstream communication will never be larger than | ||
1374 | * the MRRS. So, the MPS only needs to be configured | ||
1375 | * for the upstream communication. This being the case, | ||
1376 | * walk from the top down and set the MPS of the child | ||
1377 | * to that of the parent bus. | ||
1378 | * | ||
1379 | * Configure the device MPS with the smaller of the | ||
1380 | * device MPSS or the bridge MPS (which is assumed to be | ||
1381 | * properly configured at this point to the largest | ||
1382 | * allowable MPS based on its parent bus). | ||
1383 | */ | ||
1384 | mps = min(mps, pcie_get_mps(dev->bus->self)); | ||
1385 | } | ||
1386 | |||
1387 | rc = pcie_set_mps(dev, mps); | ||
1388 | if (rc) | ||
1389 | dev_err(&dev->dev, "Failed attempting to set the MPS\n"); | ||
1390 | } | ||
1391 | |||
1392 | static void pcie_write_mrrs(struct pci_dev *dev) | ||
1393 | { | ||
1394 | int rc, mrrs; | ||
1395 | |||
1396 | /* In the "safe" case, do not configure the MRRS. There appear to be | ||
1397 | * issues with setting MRRS to 0 on a number of devices. | ||
1398 | */ | ||
1399 | if (pcie_bus_config != PCIE_BUS_PERFORMANCE) | ||
1400 | return; | ||
1401 | |||
1402 | /* For Max performance, the MRRS must be set to the largest supported | ||
1403 | * value. However, it cannot be configured larger than the MPS the | ||
1404 | * device or the bus can support. This should already be properly | ||
1405 | * configured by a prior call to pcie_write_mps. | ||
1406 | */ | ||
1407 | mrrs = pcie_get_mps(dev); | ||
1408 | |||
1409 | /* MRRS is a R/W register. Invalid values can be written, but a | ||
1410 | * subsequent read will verify if the value is acceptable or not. | ||
1411 | * If the MRRS value provided is not acceptable (e.g., too large), | ||
1412 | * shrink the value until it is acceptable to the HW. | ||
1413 | */ | ||
1414 | while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { | ||
1415 | rc = pcie_set_readrq(dev, mrrs); | ||
1416 | if (!rc) | ||
1417 | break; | ||
1418 | |||
1419 | dev_warn(&dev->dev, "Failed attempting to set the MRRS\n"); | ||
1420 | mrrs /= 2; | ||
1421 | } | ||
1422 | |||
1423 | if (mrrs < 128) | ||
1424 | dev_err(&dev->dev, "MRRS was unable to be configured with a " | ||
1425 | "safe value. If problems are experienced, try running " | ||
1426 | "with pci=pcie_bus_safe.\n"); | ||
1427 | } | ||
1428 | |||
1429 | static int pcie_bus_configure_set(struct pci_dev *dev, void *data) | ||
1430 | { | ||
1431 | int mps, orig_mps; | ||
1432 | |||
1433 | if (!pci_is_pcie(dev)) | ||
1434 | return 0; | ||
1435 | |||
1436 | mps = 128 << *(u8 *)data; | ||
1437 | orig_mps = pcie_get_mps(dev); | ||
1438 | |||
1439 | pcie_write_mps(dev, mps); | ||
1440 | pcie_write_mrrs(dev); | ||
1441 | |||
1442 | dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), " | ||
1443 | "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss, | ||
1444 | orig_mps, pcie_get_readrq(dev)); | ||
1445 | |||
1446 | return 0; | ||
1447 | } | ||
1448 | |||
1449 | /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down, | ||
1450 | * parents then children fashion. If this changes, then this code will not | ||
1451 | * work as designed. | ||
1452 | */ | ||
1453 | void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) | ||
1454 | { | ||
1455 | u8 smpss; | ||
1456 | |||
1457 | if (!pci_is_pcie(bus->self)) | ||
1458 | return; | ||
1459 | |||
1460 | if (pcie_bus_config == PCIE_BUS_TUNE_OFF) | ||
1461 | return; | ||
1462 | |||
1463 | /* FIXME - Peer to peer DMA is possible, though the endpoint would need | ||
1464 | * to be aware to the MPS of the destination. To work around this, | ||
1465 | * simply force the MPS of the entire system to the smallest possible. | ||
1466 | */ | ||
1467 | if (pcie_bus_config == PCIE_BUS_PEER2PEER) | ||
1468 | smpss = 0; | ||
1469 | |||
1470 | if (pcie_bus_config == PCIE_BUS_SAFE) { | ||
1471 | smpss = mpss; | ||
1472 | |||
1473 | pcie_find_smpss(bus->self, &smpss); | ||
1474 | pci_walk_bus(bus, pcie_find_smpss, &smpss); | ||
1475 | } | ||
1476 | |||
1477 | pcie_bus_configure_set(bus->self, &smpss); | ||
1478 | pci_walk_bus(bus, pcie_bus_configure_set, &smpss); | ||
1479 | } | ||
1480 | EXPORT_SYMBOL_GPL(pcie_bus_configure_settings); | ||
1481 | |||
1329 | unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) | 1482 | unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) |
1330 | { | 1483 | { |
1331 | unsigned int devfn, pass, max = bus->secondary; | 1484 | unsigned int devfn, pass, max = bus->secondary; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 1196f61a4ab6..7285145ac1c9 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -2745,20 +2745,6 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) | |||
2745 | /* disable must be done via function #0 */ | 2745 | /* disable must be done via function #0 */ |
2746 | if (PCI_FUNC(dev->devfn)) | 2746 | if (PCI_FUNC(dev->devfn)) |
2747 | return; | 2747 | return; |
2748 | |||
2749 | pci_read_config_byte(dev, 0xCB, &disable); | ||
2750 | |||
2751 | if (disable & 0x02) | ||
2752 | return; | ||
2753 | |||
2754 | pci_read_config_byte(dev, 0xCA, &write_enable); | ||
2755 | pci_write_config_byte(dev, 0xCA, 0x57); | ||
2756 | pci_write_config_byte(dev, 0xCB, disable | 0x02); | ||
2757 | pci_write_config_byte(dev, 0xCA, write_enable); | ||
2758 | |||
2759 | dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n"); | ||
2760 | dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n"); | ||
2761 | |||
2762 | /* | 2748 | /* |
2763 | * RICOH 0xe823 SD/MMC card reader fails to recognize | 2749 | * RICOH 0xe823 SD/MMC card reader fails to recognize |
2764 | * certain types of SD/MMC cards. Lowering the SD base | 2750 | * certain types of SD/MMC cards. Lowering the SD base |
@@ -2781,6 +2767,20 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) | |||
2781 | 2767 | ||
2782 | dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n"); | 2768 | dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n"); |
2783 | } | 2769 | } |
2770 | |||
2771 | pci_read_config_byte(dev, 0xCB, &disable); | ||
2772 | |||
2773 | if (disable & 0x02) | ||
2774 | return; | ||
2775 | |||
2776 | pci_read_config_byte(dev, 0xCA, &write_enable); | ||
2777 | pci_write_config_byte(dev, 0xCA, 0x57); | ||
2778 | pci_write_config_byte(dev, 0xCB, disable | 0x02); | ||
2779 | pci_write_config_byte(dev, 0xCA, write_enable); | ||
2780 | |||
2781 | dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n"); | ||
2782 | dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n"); | ||
2783 | |||
2784 | } | 2784 | } |
2785 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); | 2785 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); |
2786 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); | 2786 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); |
@@ -2788,7 +2788,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_ | |||
2788 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); | 2788 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); |
2789 | #endif /*CONFIG_MMC_RICOH_MMC*/ | 2789 | #endif /*CONFIG_MMC_RICOH_MMC*/ |
2790 | 2790 | ||
2791 | #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) | 2791 | #ifdef CONFIG_DMAR_TABLE |
2792 | #define VTUNCERRMSK_REG 0x1ac | 2792 | #define VTUNCERRMSK_REG 0x1ac |
2793 | #define VTD_MSK_SPEC_ERRORS (1 << 31) | 2793 | #define VTD_MSK_SPEC_ERRORS (1 << 31) |
2794 | /* | 2794 | /* |
@@ -2822,6 +2822,89 @@ static void __devinit fixup_ti816x_class(struct pci_dev* dev) | |||
2822 | } | 2822 | } |
2823 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_TI, 0xb800, fixup_ti816x_class); | 2823 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_TI, 0xb800, fixup_ti816x_class); |
2824 | 2824 | ||
2825 | /* Some PCIe devices do not work reliably with the claimed maximum | ||
2826 | * payload size supported. | ||
2827 | */ | ||
2828 | static void __devinit fixup_mpss_256(struct pci_dev *dev) | ||
2829 | { | ||
2830 | dev->pcie_mpss = 1; /* 256 bytes */ | ||
2831 | } | ||
2832 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, | ||
2833 | PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256); | ||
2834 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, | ||
2835 | PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256); | ||
2836 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, | ||
2837 | PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256); | ||
2838 | |||
2839 | /* Intel 5000 and 5100 Memory controllers have an errata with read completion | ||
2840 | * coalescing (which is enabled by default on some BIOSes) and MPS of 256B. | ||
2841 | * Since there is no way of knowing what the PCIE MPS on each fabric will be | ||
2842 | * until all of the devices are discovered and buses walked, read completion | ||
2843 | * coalescing must be disabled. Unfortunately, it cannot be re-enabled because | ||
2844 | * it is possible to hotplug a device with MPS of 256B. | ||
2845 | */ | ||
2846 | static void __devinit quirk_intel_mc_errata(struct pci_dev *dev) | ||
2847 | { | ||
2848 | int err; | ||
2849 | u16 rcc; | ||
2850 | |||
2851 | if (pcie_bus_config == PCIE_BUS_TUNE_OFF) | ||
2852 | return; | ||
2853 | |||
2854 | /* Intel errata specifies bits to change but does not say what they are. | ||
2855 | * Keeping them magical until such time as the registers and values can | ||
2856 | * be explained. | ||
2857 | */ | ||
2858 | err = pci_read_config_word(dev, 0x48, &rcc); | ||
2859 | if (err) { | ||
2860 | dev_err(&dev->dev, "Error attempting to read the read " | ||
2861 | "completion coalescing register.\n"); | ||
2862 | return; | ||
2863 | } | ||
2864 | |||
2865 | if (!(rcc & (1 << 10))) | ||
2866 | return; | ||
2867 | |||
2868 | rcc &= ~(1 << 10); | ||
2869 | |||
2870 | err = pci_write_config_word(dev, 0x48, rcc); | ||
2871 | if (err) { | ||
2872 | dev_err(&dev->dev, "Error attempting to write the read " | ||
2873 | "completion coalescing register.\n"); | ||
2874 | return; | ||
2875 | } | ||
2876 | |||
2877 | pr_info_once("Read completion coalescing disabled due to hardware " | ||
2878 | "errata relating to 256B MPS.\n"); | ||
2879 | } | ||
2880 | /* Intel 5000 series memory controllers and ports 2-7 */ | ||
2881 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata); | ||
2882 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata); | ||
2883 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata); | ||
2884 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata); | ||
2885 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata); | ||
2886 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata); | ||
2887 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata); | ||
2888 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata); | ||
2889 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata); | ||
2890 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata); | ||
2891 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata); | ||
2892 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata); | ||
2893 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata); | ||
2894 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata); | ||
2895 | /* Intel 5100 series memory controllers and ports 2-7 */ | ||
2896 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata); | ||
2897 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata); | ||
2898 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata); | ||
2899 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata); | ||
2900 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata); | ||
2901 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata); | ||
2902 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata); | ||
2903 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata); | ||
2904 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata); | ||
2905 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata); | ||
2906 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata); | ||
2907 | |||
2825 | static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, | 2908 | static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, |
2826 | struct pci_fixup *end) | 2909 | struct pci_fixup *end) |
2827 | { | 2910 | { |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 8a1d3c7863a8..86b69f85f900 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -34,6 +34,7 @@ struct resource_list_x { | |||
34 | resource_size_t start; | 34 | resource_size_t start; |
35 | resource_size_t end; | 35 | resource_size_t end; |
36 | resource_size_t add_size; | 36 | resource_size_t add_size; |
37 | resource_size_t min_align; | ||
37 | unsigned long flags; | 38 | unsigned long flags; |
38 | }; | 39 | }; |
39 | 40 | ||
@@ -65,7 +66,7 @@ void pci_realloc(void) | |||
65 | */ | 66 | */ |
66 | static void add_to_list(struct resource_list_x *head, | 67 | static void add_to_list(struct resource_list_x *head, |
67 | struct pci_dev *dev, struct resource *res, | 68 | struct pci_dev *dev, struct resource *res, |
68 | resource_size_t add_size) | 69 | resource_size_t add_size, resource_size_t min_align) |
69 | { | 70 | { |
70 | struct resource_list_x *list = head; | 71 | struct resource_list_x *list = head; |
71 | struct resource_list_x *ln = list->next; | 72 | struct resource_list_x *ln = list->next; |
@@ -84,13 +85,16 @@ static void add_to_list(struct resource_list_x *head, | |||
84 | tmp->end = res->end; | 85 | tmp->end = res->end; |
85 | tmp->flags = res->flags; | 86 | tmp->flags = res->flags; |
86 | tmp->add_size = add_size; | 87 | tmp->add_size = add_size; |
88 | tmp->min_align = min_align; | ||
87 | list->next = tmp; | 89 | list->next = tmp; |
88 | } | 90 | } |
89 | 91 | ||
90 | static void add_to_failed_list(struct resource_list_x *head, | 92 | static void add_to_failed_list(struct resource_list_x *head, |
91 | struct pci_dev *dev, struct resource *res) | 93 | struct pci_dev *dev, struct resource *res) |
92 | { | 94 | { |
93 | add_to_list(head, dev, res, 0); | 95 | add_to_list(head, dev, res, |
96 | 0 /* dont care */, | ||
97 | 0 /* dont care */); | ||
94 | } | 98 | } |
95 | 99 | ||
96 | static void __dev_sort_resources(struct pci_dev *dev, | 100 | static void __dev_sort_resources(struct pci_dev *dev, |
@@ -121,18 +125,18 @@ static inline void reset_resource(struct resource *res) | |||
121 | } | 125 | } |
122 | 126 | ||
123 | /** | 127 | /** |
124 | * adjust_resources_sorted() - satisfy any additional resource requests | 128 | * reassign_resources_sorted() - satisfy any additional resource requests |
125 | * | 129 | * |
126 | * @add_head : head of the list tracking requests requiring additional | 130 | * @realloc_head : head of the list tracking requests requiring additional |
127 | * resources | 131 | * resources |
128 | * @head : head of the list tracking requests with allocated | 132 | * @head : head of the list tracking requests with allocated |
129 | * resources | 133 | * resources |
130 | * | 134 | * |
131 | * Walk through each element of the add_head and try to procure | 135 | * Walk through each element of the realloc_head and try to procure |
132 | * additional resources for the element, provided the element | 136 | * additional resources for the element, provided the element |
133 | * is in the head list. | 137 | * is in the head list. |
134 | */ | 138 | */ |
135 | static void adjust_resources_sorted(struct resource_list_x *add_head, | 139 | static void reassign_resources_sorted(struct resource_list_x *realloc_head, |
136 | struct resource_list *head) | 140 | struct resource_list *head) |
137 | { | 141 | { |
138 | struct resource *res; | 142 | struct resource *res; |
@@ -141,8 +145,8 @@ static void adjust_resources_sorted(struct resource_list_x *add_head, | |||
141 | resource_size_t add_size; | 145 | resource_size_t add_size; |
142 | int idx; | 146 | int idx; |
143 | 147 | ||
144 | prev = add_head; | 148 | prev = realloc_head; |
145 | for (list = add_head->next; list;) { | 149 | for (list = realloc_head->next; list;) { |
146 | res = list->res; | 150 | res = list->res; |
147 | /* skip resource that has been reset */ | 151 | /* skip resource that has been reset */ |
148 | if (!res->flags) | 152 | if (!res->flags) |
@@ -159,13 +163,17 @@ static void adjust_resources_sorted(struct resource_list_x *add_head, | |||
159 | 163 | ||
160 | idx = res - &list->dev->resource[0]; | 164 | idx = res - &list->dev->resource[0]; |
161 | add_size=list->add_size; | 165 | add_size=list->add_size; |
162 | if (!resource_size(res) && add_size) { | 166 | if (!resource_size(res)) { |
163 | res->end = res->start + add_size - 1; | 167 | res->start = list->start; |
164 | if(pci_assign_resource(list->dev, idx)) | 168 | res->end = res->start + add_size - 1; |
169 | if(pci_assign_resource(list->dev, idx)) | ||
165 | reset_resource(res); | 170 | reset_resource(res); |
166 | } else if (add_size) { | 171 | } else { |
167 | adjust_resource(res, res->start, | 172 | resource_size_t align = list->min_align; |
168 | resource_size(res) + add_size); | 173 | res->flags |= list->flags & (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN); |
174 | if (pci_reassign_resource(list->dev, idx, add_size, align)) | ||
175 | dev_printk(KERN_DEBUG, &list->dev->dev, "failed to add optional resources res=%pR\n", | ||
176 | res); | ||
169 | } | 177 | } |
170 | out: | 178 | out: |
171 | tmp = list; | 179 | tmp = list; |
@@ -210,16 +218,16 @@ static void assign_requested_resources_sorted(struct resource_list *head, | |||
210 | } | 218 | } |
211 | 219 | ||
212 | static void __assign_resources_sorted(struct resource_list *head, | 220 | static void __assign_resources_sorted(struct resource_list *head, |
213 | struct resource_list_x *add_head, | 221 | struct resource_list_x *realloc_head, |
214 | struct resource_list_x *fail_head) | 222 | struct resource_list_x *fail_head) |
215 | { | 223 | { |
216 | /* Satisfy the must-have resource requests */ | 224 | /* Satisfy the must-have resource requests */ |
217 | assign_requested_resources_sorted(head, fail_head); | 225 | assign_requested_resources_sorted(head, fail_head); |
218 | 226 | ||
219 | /* Try to satisfy any additional nice-to-have resource | 227 | /* Try to satisfy any additional optional resource |
220 | requests */ | 228 | requests */ |
221 | if (add_head) | 229 | if (realloc_head) |
222 | adjust_resources_sorted(add_head, head); | 230 | reassign_resources_sorted(realloc_head, head); |
223 | free_list(resource_list, head); | 231 | free_list(resource_list, head); |
224 | } | 232 | } |
225 | 233 | ||
@@ -235,7 +243,7 @@ static void pdev_assign_resources_sorted(struct pci_dev *dev, | |||
235 | } | 243 | } |
236 | 244 | ||
237 | static void pbus_assign_resources_sorted(const struct pci_bus *bus, | 245 | static void pbus_assign_resources_sorted(const struct pci_bus *bus, |
238 | struct resource_list_x *add_head, | 246 | struct resource_list_x *realloc_head, |
239 | struct resource_list_x *fail_head) | 247 | struct resource_list_x *fail_head) |
240 | { | 248 | { |
241 | struct pci_dev *dev; | 249 | struct pci_dev *dev; |
@@ -245,7 +253,7 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus, | |||
245 | list_for_each_entry(dev, &bus->devices, bus_list) | 253 | list_for_each_entry(dev, &bus->devices, bus_list) |
246 | __dev_sort_resources(dev, &head); | 254 | __dev_sort_resources(dev, &head); |
247 | 255 | ||
248 | __assign_resources_sorted(&head, add_head, fail_head); | 256 | __assign_resources_sorted(&head, realloc_head, fail_head); |
249 | } | 257 | } |
250 | 258 | ||
251 | void pci_setup_cardbus(struct pci_bus *bus) | 259 | void pci_setup_cardbus(struct pci_bus *bus) |
@@ -418,7 +426,7 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type) | |||
418 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); | 426 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); |
419 | } | 427 | } |
420 | 428 | ||
421 | static void pci_setup_bridge(struct pci_bus *bus) | 429 | void pci_setup_bridge(struct pci_bus *bus) |
422 | { | 430 | { |
423 | unsigned long type = IORESOURCE_IO | IORESOURCE_MEM | | 431 | unsigned long type = IORESOURCE_IO | IORESOURCE_MEM | |
424 | IORESOURCE_PREFETCH; | 432 | IORESOURCE_PREFETCH; |
@@ -540,13 +548,27 @@ static resource_size_t calculate_memsize(resource_size_t size, | |||
540 | return size; | 548 | return size; |
541 | } | 549 | } |
542 | 550 | ||
551 | static resource_size_t get_res_add_size(struct resource_list_x *realloc_head, | ||
552 | struct resource *res) | ||
553 | { | ||
554 | struct resource_list_x *list; | ||
555 | |||
556 | /* check if it is in realloc_head list */ | ||
557 | for (list = realloc_head->next; list && list->res != res; | ||
558 | list = list->next); | ||
559 | if (list) | ||
560 | return list->add_size; | ||
561 | |||
562 | return 0; | ||
563 | } | ||
564 | |||
543 | /** | 565 | /** |
544 | * pbus_size_io() - size the io window of a given bus | 566 | * pbus_size_io() - size the io window of a given bus |
545 | * | 567 | * |
546 | * @bus : the bus | 568 | * @bus : the bus |
547 | * @min_size : the minimum io window that must to be allocated | 569 | * @min_size : the minimum io window that must to be allocated |
548 | * @add_size : additional optional io window | 570 | * @add_size : additional optional io window |
549 | * @add_head : track the additional io window on this list | 571 | * @realloc_head : track the additional io window on this list |
550 | * | 572 | * |
551 | * Sizing the IO windows of the PCI-PCI bridge is trivial, | 573 | * Sizing the IO windows of the PCI-PCI bridge is trivial, |
552 | * since these windows have 4K granularity and the IO ranges | 574 | * since these windows have 4K granularity and the IO ranges |
@@ -554,11 +576,12 @@ static resource_size_t calculate_memsize(resource_size_t size, | |||
554 | * We must be careful with the ISA aliasing though. | 576 | * We must be careful with the ISA aliasing though. |
555 | */ | 577 | */ |
556 | static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | 578 | static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, |
557 | resource_size_t add_size, struct resource_list_x *add_head) | 579 | resource_size_t add_size, struct resource_list_x *realloc_head) |
558 | { | 580 | { |
559 | struct pci_dev *dev; | 581 | struct pci_dev *dev; |
560 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); | 582 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); |
561 | unsigned long size = 0, size0 = 0, size1 = 0; | 583 | unsigned long size = 0, size0 = 0, size1 = 0; |
584 | resource_size_t children_add_size = 0; | ||
562 | 585 | ||
563 | if (!b_res) | 586 | if (!b_res) |
564 | return; | 587 | return; |
@@ -579,11 +602,16 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | |||
579 | size += r_size; | 602 | size += r_size; |
580 | else | 603 | else |
581 | size1 += r_size; | 604 | size1 += r_size; |
605 | |||
606 | if (realloc_head) | ||
607 | children_add_size += get_res_add_size(realloc_head, r); | ||
582 | } | 608 | } |
583 | } | 609 | } |
584 | size0 = calculate_iosize(size, min_size, size1, | 610 | size0 = calculate_iosize(size, min_size, size1, |
585 | resource_size(b_res), 4096); | 611 | resource_size(b_res), 4096); |
586 | size1 = (!add_head || (add_head && !add_size)) ? size0 : | 612 | if (children_add_size > add_size) |
613 | add_size = children_add_size; | ||
614 | size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 : | ||
587 | calculate_iosize(size, min_size+add_size, size1, | 615 | calculate_iosize(size, min_size+add_size, size1, |
588 | resource_size(b_res), 4096); | 616 | resource_size(b_res), 4096); |
589 | if (!size0 && !size1) { | 617 | if (!size0 && !size1) { |
@@ -598,8 +626,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | |||
598 | b_res->start = 4096; | 626 | b_res->start = 4096; |
599 | b_res->end = b_res->start + size0 - 1; | 627 | b_res->end = b_res->start + size0 - 1; |
600 | b_res->flags |= IORESOURCE_STARTALIGN; | 628 | b_res->flags |= IORESOURCE_STARTALIGN; |
601 | if (size1 > size0 && add_head) | 629 | if (size1 > size0 && realloc_head) |
602 | add_to_list(add_head, bus->self, b_res, size1-size0); | 630 | add_to_list(realloc_head, bus->self, b_res, size1-size0, 4096); |
603 | } | 631 | } |
604 | 632 | ||
605 | /** | 633 | /** |
@@ -608,7 +636,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | |||
608 | * @bus : the bus | 636 | * @bus : the bus |
609 | * @min_size : the minimum memory window that must to be allocated | 637 | * @min_size : the minimum memory window that must to be allocated |
610 | * @add_size : additional optional memory window | 638 | * @add_size : additional optional memory window |
611 | * @add_head : track the additional memory window on this list | 639 | * @realloc_head : track the additional memory window on this list |
612 | * | 640 | * |
613 | * Calculate the size of the bus and minimal alignment which | 641 | * Calculate the size of the bus and minimal alignment which |
614 | * guarantees that all child resources fit in this size. | 642 | * guarantees that all child resources fit in this size. |
@@ -616,7 +644,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | |||
616 | static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | 644 | static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, |
617 | unsigned long type, resource_size_t min_size, | 645 | unsigned long type, resource_size_t min_size, |
618 | resource_size_t add_size, | 646 | resource_size_t add_size, |
619 | struct resource_list_x *add_head) | 647 | struct resource_list_x *realloc_head) |
620 | { | 648 | { |
621 | struct pci_dev *dev; | 649 | struct pci_dev *dev; |
622 | resource_size_t min_align, align, size, size0, size1; | 650 | resource_size_t min_align, align, size, size0, size1; |
@@ -624,6 +652,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
624 | int order, max_order; | 652 | int order, max_order; |
625 | struct resource *b_res = find_free_bus_resource(bus, type); | 653 | struct resource *b_res = find_free_bus_resource(bus, type); |
626 | unsigned int mem64_mask = 0; | 654 | unsigned int mem64_mask = 0; |
655 | resource_size_t children_add_size = 0; | ||
627 | 656 | ||
628 | if (!b_res) | 657 | if (!b_res) |
629 | return 0; | 658 | return 0; |
@@ -645,6 +674,16 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
645 | if (r->parent || (r->flags & mask) != type) | 674 | if (r->parent || (r->flags & mask) != type) |
646 | continue; | 675 | continue; |
647 | r_size = resource_size(r); | 676 | r_size = resource_size(r); |
677 | #ifdef CONFIG_PCI_IOV | ||
678 | /* put SRIOV requested res to the optional list */ | ||
679 | if (realloc_head && i >= PCI_IOV_RESOURCES && | ||
680 | i <= PCI_IOV_RESOURCE_END) { | ||
681 | r->end = r->start - 1; | ||
682 | add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */); | ||
683 | children_add_size += r_size; | ||
684 | continue; | ||
685 | } | ||
686 | #endif | ||
648 | /* For bridges size != alignment */ | 687 | /* For bridges size != alignment */ |
649 | align = pci_resource_alignment(dev, r); | 688 | align = pci_resource_alignment(dev, r); |
650 | order = __ffs(align) - 20; | 689 | order = __ffs(align) - 20; |
@@ -665,6 +704,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
665 | if (order > max_order) | 704 | if (order > max_order) |
666 | max_order = order; | 705 | max_order = order; |
667 | mem64_mask &= r->flags & IORESOURCE_MEM_64; | 706 | mem64_mask &= r->flags & IORESOURCE_MEM_64; |
707 | |||
708 | if (realloc_head) | ||
709 | children_add_size += get_res_add_size(realloc_head, r); | ||
668 | } | 710 | } |
669 | } | 711 | } |
670 | align = 0; | 712 | align = 0; |
@@ -681,7 +723,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
681 | align += aligns[order]; | 723 | align += aligns[order]; |
682 | } | 724 | } |
683 | size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); | 725 | size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); |
684 | size1 = (!add_head || (add_head && !add_size)) ? size0 : | 726 | if (children_add_size > add_size) |
727 | add_size = children_add_size; | ||
728 | size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 : | ||
685 | calculate_memsize(size, min_size+add_size, 0, | 729 | calculate_memsize(size, min_size+add_size, 0, |
686 | resource_size(b_res), min_align); | 730 | resource_size(b_res), min_align); |
687 | if (!size0 && !size1) { | 731 | if (!size0 && !size1) { |
@@ -695,12 +739,22 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
695 | b_res->start = min_align; | 739 | b_res->start = min_align; |
696 | b_res->end = size0 + min_align - 1; | 740 | b_res->end = size0 + min_align - 1; |
697 | b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask; | 741 | b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask; |
698 | if (size1 > size0 && add_head) | 742 | if (size1 > size0 && realloc_head) |
699 | add_to_list(add_head, bus->self, b_res, size1-size0); | 743 | add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align); |
700 | return 1; | 744 | return 1; |
701 | } | 745 | } |
702 | 746 | ||
703 | static void pci_bus_size_cardbus(struct pci_bus *bus) | 747 | unsigned long pci_cardbus_resource_alignment(struct resource *res) |
748 | { | ||
749 | if (res->flags & IORESOURCE_IO) | ||
750 | return pci_cardbus_io_size; | ||
751 | if (res->flags & IORESOURCE_MEM) | ||
752 | return pci_cardbus_mem_size; | ||
753 | return 0; | ||
754 | } | ||
755 | |||
756 | static void pci_bus_size_cardbus(struct pci_bus *bus, | ||
757 | struct resource_list_x *realloc_head) | ||
704 | { | 758 | { |
705 | struct pci_dev *bridge = bus->self; | 759 | struct pci_dev *bridge = bus->self; |
706 | struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; | 760 | struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; |
@@ -711,12 +765,14 @@ static void pci_bus_size_cardbus(struct pci_bus *bus) | |||
711 | * a fixed amount of bus space for CardBus bridges. | 765 | * a fixed amount of bus space for CardBus bridges. |
712 | */ | 766 | */ |
713 | b_res[0].start = 0; | 767 | b_res[0].start = 0; |
714 | b_res[0].end = pci_cardbus_io_size - 1; | ||
715 | b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; | 768 | b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; |
769 | if (realloc_head) | ||
770 | add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size, 0 /* dont care */); | ||
716 | 771 | ||
717 | b_res[1].start = 0; | 772 | b_res[1].start = 0; |
718 | b_res[1].end = pci_cardbus_io_size - 1; | ||
719 | b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; | 773 | b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; |
774 | if (realloc_head) | ||
775 | add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size, 0 /* dont care */); | ||
720 | 776 | ||
721 | /* | 777 | /* |
722 | * Check whether prefetchable memory is supported | 778 | * Check whether prefetchable memory is supported |
@@ -736,21 +792,31 @@ static void pci_bus_size_cardbus(struct pci_bus *bus) | |||
736 | */ | 792 | */ |
737 | if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { | 793 | if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { |
738 | b_res[2].start = 0; | 794 | b_res[2].start = 0; |
739 | b_res[2].end = pci_cardbus_mem_size - 1; | ||
740 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN; | 795 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN; |
796 | if (realloc_head) | ||
797 | add_to_list(realloc_head, bridge, b_res+2, pci_cardbus_mem_size, 0 /* dont care */); | ||
741 | 798 | ||
742 | b_res[3].start = 0; | 799 | b_res[3].start = 0; |
743 | b_res[3].end = pci_cardbus_mem_size - 1; | ||
744 | b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; | 800 | b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; |
801 | if (realloc_head) | ||
802 | add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size, 0 /* dont care */); | ||
745 | } else { | 803 | } else { |
746 | b_res[3].start = 0; | 804 | b_res[3].start = 0; |
747 | b_res[3].end = pci_cardbus_mem_size * 2 - 1; | ||
748 | b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; | 805 | b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; |
806 | if (realloc_head) | ||
807 | add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size * 2, 0 /* dont care */); | ||
749 | } | 808 | } |
809 | |||
810 | /* set the size of the resource to zero, so that the resource does not | ||
811 | * get assigned during required-resource allocation cycle but gets assigned | ||
812 | * during the optional-resource allocation cycle. | ||
813 | */ | ||
814 | b_res[0].start = b_res[1].start = b_res[2].start = b_res[3].start = 1; | ||
815 | b_res[0].end = b_res[1].end = b_res[2].end = b_res[3].end = 0; | ||
750 | } | 816 | } |
751 | 817 | ||
752 | void __ref __pci_bus_size_bridges(struct pci_bus *bus, | 818 | void __ref __pci_bus_size_bridges(struct pci_bus *bus, |
753 | struct resource_list_x *add_head) | 819 | struct resource_list_x *realloc_head) |
754 | { | 820 | { |
755 | struct pci_dev *dev; | 821 | struct pci_dev *dev; |
756 | unsigned long mask, prefmask; | 822 | unsigned long mask, prefmask; |
@@ -763,12 +829,12 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus, | |||
763 | 829 | ||
764 | switch (dev->class >> 8) { | 830 | switch (dev->class >> 8) { |
765 | case PCI_CLASS_BRIDGE_CARDBUS: | 831 | case PCI_CLASS_BRIDGE_CARDBUS: |
766 | pci_bus_size_cardbus(b); | 832 | pci_bus_size_cardbus(b, realloc_head); |
767 | break; | 833 | break; |
768 | 834 | ||
769 | case PCI_CLASS_BRIDGE_PCI: | 835 | case PCI_CLASS_BRIDGE_PCI: |
770 | default: | 836 | default: |
771 | __pci_bus_size_bridges(b, add_head); | 837 | __pci_bus_size_bridges(b, realloc_head); |
772 | break; | 838 | break; |
773 | } | 839 | } |
774 | } | 840 | } |
@@ -792,7 +858,7 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus, | |||
792 | * Follow thru | 858 | * Follow thru |
793 | */ | 859 | */ |
794 | default: | 860 | default: |
795 | pbus_size_io(bus, 0, additional_io_size, add_head); | 861 | pbus_size_io(bus, 0, additional_io_size, realloc_head); |
796 | /* If the bridge supports prefetchable range, size it | 862 | /* If the bridge supports prefetchable range, size it |
797 | separately. If it doesn't, or its prefetchable window | 863 | separately. If it doesn't, or its prefetchable window |
798 | has already been allocated by arch code, try | 864 | has already been allocated by arch code, try |
@@ -800,11 +866,11 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus, | |||
800 | resources. */ | 866 | resources. */ |
801 | mask = IORESOURCE_MEM; | 867 | mask = IORESOURCE_MEM; |
802 | prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; | 868 | prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; |
803 | if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, add_head)) | 869 | if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, realloc_head)) |
804 | mask = prefmask; /* Success, size non-prefetch only. */ | 870 | mask = prefmask; /* Success, size non-prefetch only. */ |
805 | else | 871 | else |
806 | additional_mem_size += additional_mem_size; | 872 | additional_mem_size += additional_mem_size; |
807 | pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, add_head); | 873 | pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, realloc_head); |
808 | break; | 874 | break; |
809 | } | 875 | } |
810 | } | 876 | } |
@@ -816,20 +882,20 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus) | |||
816 | EXPORT_SYMBOL(pci_bus_size_bridges); | 882 | EXPORT_SYMBOL(pci_bus_size_bridges); |
817 | 883 | ||
818 | static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, | 884 | static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, |
819 | struct resource_list_x *add_head, | 885 | struct resource_list_x *realloc_head, |
820 | struct resource_list_x *fail_head) | 886 | struct resource_list_x *fail_head) |
821 | { | 887 | { |
822 | struct pci_bus *b; | 888 | struct pci_bus *b; |
823 | struct pci_dev *dev; | 889 | struct pci_dev *dev; |
824 | 890 | ||
825 | pbus_assign_resources_sorted(bus, add_head, fail_head); | 891 | pbus_assign_resources_sorted(bus, realloc_head, fail_head); |
826 | 892 | ||
827 | list_for_each_entry(dev, &bus->devices, bus_list) { | 893 | list_for_each_entry(dev, &bus->devices, bus_list) { |
828 | b = dev->subordinate; | 894 | b = dev->subordinate; |
829 | if (!b) | 895 | if (!b) |
830 | continue; | 896 | continue; |
831 | 897 | ||
832 | __pci_bus_assign_resources(b, add_head, fail_head); | 898 | __pci_bus_assign_resources(b, realloc_head, fail_head); |
833 | 899 | ||
834 | switch (dev->class >> 8) { | 900 | switch (dev->class >> 8) { |
835 | case PCI_CLASS_BRIDGE_PCI: | 901 | case PCI_CLASS_BRIDGE_PCI: |
@@ -1039,7 +1105,7 @@ void __init | |||
1039 | pci_assign_unassigned_resources(void) | 1105 | pci_assign_unassigned_resources(void) |
1040 | { | 1106 | { |
1041 | struct pci_bus *bus; | 1107 | struct pci_bus *bus; |
1042 | struct resource_list_x add_list; /* list of resources that | 1108 | struct resource_list_x realloc_list; /* list of resources that |
1043 | want additional resources */ | 1109 | want additional resources */ |
1044 | int tried_times = 0; | 1110 | int tried_times = 0; |
1045 | enum release_type rel_type = leaf_only; | 1111 | enum release_type rel_type = leaf_only; |
@@ -1052,7 +1118,7 @@ pci_assign_unassigned_resources(void) | |||
1052 | 1118 | ||
1053 | 1119 | ||
1054 | head.next = NULL; | 1120 | head.next = NULL; |
1055 | add_list.next = NULL; | 1121 | realloc_list.next = NULL; |
1056 | 1122 | ||
1057 | pci_try_num = max_depth + 1; | 1123 | pci_try_num = max_depth + 1; |
1058 | printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", | 1124 | printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", |
@@ -1062,12 +1128,12 @@ again: | |||
1062 | /* Depth first, calculate sizes and alignments of all | 1128 | /* Depth first, calculate sizes and alignments of all |
1063 | subordinate buses. */ | 1129 | subordinate buses. */ |
1064 | list_for_each_entry(bus, &pci_root_buses, node) | 1130 | list_for_each_entry(bus, &pci_root_buses, node) |
1065 | __pci_bus_size_bridges(bus, &add_list); | 1131 | __pci_bus_size_bridges(bus, &realloc_list); |
1066 | 1132 | ||
1067 | /* Depth last, allocate resources and update the hardware. */ | 1133 | /* Depth last, allocate resources and update the hardware. */ |
1068 | list_for_each_entry(bus, &pci_root_buses, node) | 1134 | list_for_each_entry(bus, &pci_root_buses, node) |
1069 | __pci_bus_assign_resources(bus, &add_list, &head); | 1135 | __pci_bus_assign_resources(bus, &realloc_list, &head); |
1070 | BUG_ON(add_list.next); | 1136 | BUG_ON(realloc_list.next); |
1071 | tried_times++; | 1137 | tried_times++; |
1072 | 1138 | ||
1073 | /* any device complain? */ | 1139 | /* any device complain? */ |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 319f359906e8..51a9095c7da4 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -128,16 +128,16 @@ void pci_disable_bridge_window(struct pci_dev *dev) | |||
128 | } | 128 | } |
129 | #endif /* CONFIG_PCI_QUIRKS */ | 129 | #endif /* CONFIG_PCI_QUIRKS */ |
130 | 130 | ||
131 | |||
132 | |||
131 | static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, | 133 | static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, |
132 | int resno) | 134 | int resno, resource_size_t size, resource_size_t align) |
133 | { | 135 | { |
134 | struct resource *res = dev->resource + resno; | 136 | struct resource *res = dev->resource + resno; |
135 | resource_size_t size, min, align; | 137 | resource_size_t min; |
136 | int ret; | 138 | int ret; |
137 | 139 | ||
138 | size = resource_size(res); | ||
139 | min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; | 140 | min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; |
140 | align = pci_resource_alignment(dev, res); | ||
141 | 141 | ||
142 | /* First, try exact prefetching match.. */ | 142 | /* First, try exact prefetching match.. */ |
143 | ret = pci_bus_alloc_resource(bus, res, size, align, min, | 143 | ret = pci_bus_alloc_resource(bus, res, size, align, min, |
@@ -154,56 +154,101 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, | |||
154 | ret = pci_bus_alloc_resource(bus, res, size, align, min, 0, | 154 | ret = pci_bus_alloc_resource(bus, res, size, align, min, 0, |
155 | pcibios_align_resource, dev); | 155 | pcibios_align_resource, dev); |
156 | } | 156 | } |
157 | return ret; | ||
158 | } | ||
157 | 159 | ||
158 | if (ret < 0 && dev->fw_addr[resno]) { | 160 | static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, |
159 | struct resource *root, *conflict; | 161 | int resno, resource_size_t size) |
160 | resource_size_t start, end; | 162 | { |
163 | struct resource *root, *conflict; | ||
164 | resource_size_t start, end; | ||
165 | int ret = 0; | ||
161 | 166 | ||
162 | /* | 167 | if (res->flags & IORESOURCE_IO) |
163 | * If we failed to assign anything, let's try the address | 168 | root = &ioport_resource; |
164 | * where firmware left it. That at least has a chance of | 169 | else |
165 | * working, which is better than just leaving it disabled. | 170 | root = &iomem_resource; |
166 | */ | 171 | |
172 | start = res->start; | ||
173 | end = res->end; | ||
174 | res->start = dev->fw_addr[resno]; | ||
175 | res->end = res->start + size - 1; | ||
176 | dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n", | ||
177 | resno, res); | ||
178 | conflict = request_resource_conflict(root, res); | ||
179 | if (conflict) { | ||
180 | dev_info(&dev->dev, | ||
181 | "BAR %d: %pR conflicts with %s %pR\n", resno, | ||
182 | res, conflict->name, conflict); | ||
183 | res->start = start; | ||
184 | res->end = end; | ||
185 | ret = 1; | ||
186 | } | ||
187 | return ret; | ||
188 | } | ||
189 | |||
190 | static int _pci_assign_resource(struct pci_dev *dev, int resno, int size, resource_size_t min_align) | ||
191 | { | ||
192 | struct resource *res = dev->resource + resno; | ||
193 | struct pci_bus *bus; | ||
194 | int ret; | ||
195 | char *type; | ||
167 | 196 | ||
168 | if (res->flags & IORESOURCE_IO) | 197 | bus = dev->bus; |
169 | root = &ioport_resource; | 198 | while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) { |
199 | if (!bus->parent || !bus->self->transparent) | ||
200 | break; | ||
201 | bus = bus->parent; | ||
202 | } | ||
203 | |||
204 | if (ret) { | ||
205 | if (res->flags & IORESOURCE_MEM) | ||
206 | if (res->flags & IORESOURCE_PREFETCH) | ||
207 | type = "mem pref"; | ||
208 | else | ||
209 | type = "mem"; | ||
210 | else if (res->flags & IORESOURCE_IO) | ||
211 | type = "io"; | ||
170 | else | 212 | else |
171 | root = &iomem_resource; | 213 | type = "unknown"; |
172 | 214 | dev_info(&dev->dev, | |
173 | start = res->start; | 215 | "BAR %d: can't assign %s (size %#llx)\n", |
174 | end = res->end; | 216 | resno, type, (unsigned long long) resource_size(res)); |
175 | res->start = dev->fw_addr[resno]; | ||
176 | res->end = res->start + size - 1; | ||
177 | dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n", | ||
178 | resno, res); | ||
179 | conflict = request_resource_conflict(root, res); | ||
180 | if (conflict) { | ||
181 | dev_info(&dev->dev, | ||
182 | "BAR %d: %pR conflicts with %s %pR\n", resno, | ||
183 | res, conflict->name, conflict); | ||
184 | res->start = start; | ||
185 | res->end = end; | ||
186 | } else | ||
187 | ret = 0; | ||
188 | } | 217 | } |
189 | 218 | ||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize, | ||
223 | resource_size_t min_align) | ||
224 | { | ||
225 | struct resource *res = dev->resource + resno; | ||
226 | resource_size_t new_size; | ||
227 | int ret; | ||
228 | |||
229 | if (!res->parent) { | ||
230 | dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resouce %pR " | ||
231 | "\n", resno, res); | ||
232 | return -EINVAL; | ||
233 | } | ||
234 | |||
235 | new_size = resource_size(res) + addsize + min_align; | ||
236 | ret = _pci_assign_resource(dev, resno, new_size, min_align); | ||
190 | if (!ret) { | 237 | if (!ret) { |
191 | res->flags &= ~IORESOURCE_STARTALIGN; | 238 | res->flags &= ~IORESOURCE_STARTALIGN; |
192 | dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); | 239 | dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); |
193 | if (resno < PCI_BRIDGE_RESOURCES) | 240 | if (resno < PCI_BRIDGE_RESOURCES) |
194 | pci_update_resource(dev, resno); | 241 | pci_update_resource(dev, resno); |
195 | } | 242 | } |
196 | |||
197 | return ret; | 243 | return ret; |
198 | } | 244 | } |
199 | 245 | ||
200 | int pci_assign_resource(struct pci_dev *dev, int resno) | 246 | int pci_assign_resource(struct pci_dev *dev, int resno) |
201 | { | 247 | { |
202 | struct resource *res = dev->resource + resno; | 248 | struct resource *res = dev->resource + resno; |
203 | resource_size_t align; | 249 | resource_size_t align, size; |
204 | struct pci_bus *bus; | 250 | struct pci_bus *bus; |
205 | int ret; | 251 | int ret; |
206 | char *type; | ||
207 | 252 | ||
208 | align = pci_resource_alignment(dev, res); | 253 | align = pci_resource_alignment(dev, res); |
209 | if (!align) { | 254 | if (!align) { |
@@ -213,34 +258,27 @@ int pci_assign_resource(struct pci_dev *dev, int resno) | |||
213 | } | 258 | } |
214 | 259 | ||
215 | bus = dev->bus; | 260 | bus = dev->bus; |
216 | while ((ret = __pci_assign_resource(bus, dev, resno))) { | 261 | size = resource_size(res); |
217 | if (bus->parent && bus->self->transparent) | 262 | ret = _pci_assign_resource(dev, resno, size, align); |
218 | bus = bus->parent; | ||
219 | else | ||
220 | bus = NULL; | ||
221 | if (bus) | ||
222 | continue; | ||
223 | break; | ||
224 | } | ||
225 | 263 | ||
226 | if (ret) { | 264 | /* |
227 | if (res->flags & IORESOURCE_MEM) | 265 | * If we failed to assign anything, let's try the address |
228 | if (res->flags & IORESOURCE_PREFETCH) | 266 | * where firmware left it. That at least has a chance of |
229 | type = "mem pref"; | 267 | * working, which is better than just leaving it disabled. |
230 | else | 268 | */ |
231 | type = "mem"; | 269 | if (ret < 0 && dev->fw_addr[resno]) |
232 | else if (res->flags & IORESOURCE_IO) | 270 | ret = pci_revert_fw_address(res, dev, resno, size); |
233 | type = "io"; | ||
234 | else | ||
235 | type = "unknown"; | ||
236 | dev_info(&dev->dev, | ||
237 | "BAR %d: can't assign %s (size %#llx)\n", | ||
238 | resno, type, (unsigned long long) resource_size(res)); | ||
239 | } | ||
240 | 271 | ||
272 | if (!ret) { | ||
273 | res->flags &= ~IORESOURCE_STARTALIGN; | ||
274 | dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); | ||
275 | if (resno < PCI_BRIDGE_RESOURCES) | ||
276 | pci_update_resource(dev, resno); | ||
277 | } | ||
241 | return ret; | 278 | return ret; |
242 | } | 279 | } |
243 | 280 | ||
281 | |||
244 | /* Sort resources by alignment */ | 282 | /* Sort resources by alignment */ |
245 | void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) | 283 | void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) |
246 | { | 284 | { |
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index 6fa215a38615..90832a955991 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c | |||
@@ -400,9 +400,8 @@ static int pcifront_claim_resource(struct pci_dev *dev, void *data) | |||
400 | dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n", | 400 | dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n", |
401 | pci_name(dev), i); | 401 | pci_name(dev), i); |
402 | if (pci_claim_resource(dev, i)) { | 402 | if (pci_claim_resource(dev, i)) { |
403 | dev_err(&pdev->xdev->dev, "Could not claim " | 403 | dev_err(&pdev->xdev->dev, "Could not claim resource %s/%d! " |
404 | "resource %s/%d! Device offline. Try " | 404 | "Device offline. Try using e820_host=1 in the guest config.\n", |
405 | "giving less than 4GB to domain.\n", | ||
406 | pci_name(dev), i); | 405 | pci_name(dev), i); |
407 | } | 406 | } |
408 | } | 407 | } |