aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/dmar.c48
-rw-r--r--drivers/pci/hotplug/Makefile2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c117
-rw-r--r--drivers/pci/hotplug/acpiphp.h3
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c187
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c11
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c3
-rw-r--r--drivers/pci/hotplug/pciehp.h116
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c24
-rw-r--r--drivers/pci/hotplug/pciehp_core.c136
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c114
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c119
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c160
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c187
-rw-r--r--drivers/pci/hotplug/shpchp.h9
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c62
-rw-r--r--drivers/pci/intel-iommu.c340
-rw-r--r--drivers/pci/intr_remapping.c22
-rw-r--r--drivers/pci/iova.c16
-rw-r--r--drivers/pci/legacy.c34
-rw-r--r--drivers/pci/msi.c283
-rw-r--r--drivers/pci/pci-acpi.c29
-rw-r--r--drivers/pci/pci-driver.c148
-rw-r--r--drivers/pci/pci-stub.c45
-rw-r--r--drivers/pci/pci-sysfs.c37
-rw-r--r--drivers/pci/pci.c106
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c25
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c24
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h34
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c107
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c190
-rw-r--r--drivers/pci/pcie/aspm.c492
-rw-r--r--drivers/pci/pcie/portdrv_core.c6
-rw-r--r--drivers/pci/pcie/portdrv_pci.c1
-rw-r--r--drivers/pci/probe.c33
-rw-r--r--drivers/pci/quirks.c40
-rw-r--r--drivers/pci/search.c31
-rw-r--r--drivers/pci/setup-bus.c22
-rw-r--r--drivers/pci/setup-res.c1
41 files changed, 1654 insertions, 1715 deletions
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 1ebd6b4c743b..4a7f11d8f432 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -8,6 +8,9 @@ obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \
8obj-$(CONFIG_PROC_FS) += proc.o 8obj-$(CONFIG_PROC_FS) += proc.o
9obj-$(CONFIG_SYSFS) += slot.o 9obj-$(CONFIG_SYSFS) += slot.o
10 10
11obj-$(CONFIG_PCI_LEGACY) += legacy.o
12CFLAGS_legacy.o += -Wno-deprecated-declarations
13
11# Build PCI Express stuff if needed 14# Build PCI Express stuff if needed
12obj-$(CONFIG_PCIEPORTBUS) += pcie/ 15obj-$(CONFIG_PCIEPORTBUS) += pcie/
13 16
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 7b287cb38b7a..14bbaa17e2ca 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -33,9 +33,10 @@
33#include <linux/timer.h> 33#include <linux/timer.h>
34#include <linux/irq.h> 34#include <linux/irq.h>
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/tboot.h>
37#include <linux/dmi.h>
36 38
37#undef PREFIX 39#define PREFIX "DMAR: "
38#define PREFIX "DMAR:"
39 40
40/* No locks are needed as DMA remapping hardware unit 41/* No locks are needed as DMA remapping hardware unit
41 * list is constructed at boot time and hotplug of 42 * list is constructed at boot time and hotplug of
@@ -413,6 +414,12 @@ parse_dmar_table(void)
413 */ 414 */
414 dmar_table_detect(); 415 dmar_table_detect();
415 416
417 /*
418 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
419 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
420 */
421 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
422
416 dmar = (struct acpi_table_dmar *)dmar_tbl; 423 dmar = (struct acpi_table_dmar *)dmar_tbl;
417 if (!dmar) 424 if (!dmar)
418 return -ENODEV; 425 return -ENODEV;
@@ -570,9 +577,6 @@ int __init dmar_table_init(void)
570 printk(KERN_INFO PREFIX "No ATSR found\n"); 577 printk(KERN_INFO PREFIX "No ATSR found\n");
571#endif 578#endif
572 579
573#ifdef CONFIG_INTR_REMAP
574 parse_ioapics_under_ir();
575#endif
576 return 0; 580 return 0;
577} 581}
578 582
@@ -632,20 +636,31 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
632 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); 636 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
633 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); 637 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
634 638
639 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
640 /* Promote an attitude of violence to a BIOS engineer today */
641 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
642 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
643 drhd->reg_base_addr,
644 dmi_get_system_info(DMI_BIOS_VENDOR),
645 dmi_get_system_info(DMI_BIOS_VERSION),
646 dmi_get_system_info(DMI_PRODUCT_VERSION));
647 goto err_unmap;
648 }
649
635#ifdef CONFIG_DMAR 650#ifdef CONFIG_DMAR
636 agaw = iommu_calculate_agaw(iommu); 651 agaw = iommu_calculate_agaw(iommu);
637 if (agaw < 0) { 652 if (agaw < 0) {
638 printk(KERN_ERR 653 printk(KERN_ERR
639 "Cannot get a valid agaw for iommu (seq_id = %d)\n", 654 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
640 iommu->seq_id); 655 iommu->seq_id);
641 goto error; 656 goto err_unmap;
642 } 657 }
643 msagaw = iommu_calculate_max_sagaw(iommu); 658 msagaw = iommu_calculate_max_sagaw(iommu);
644 if (msagaw < 0) { 659 if (msagaw < 0) {
645 printk(KERN_ERR 660 printk(KERN_ERR
646 "Cannot get a valid max agaw for iommu (seq_id = %d)\n", 661 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
647 iommu->seq_id); 662 iommu->seq_id);
648 goto error; 663 goto err_unmap;
649 } 664 }
650#endif 665#endif
651 iommu->agaw = agaw; 666 iommu->agaw = agaw;
@@ -665,7 +680,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
665 } 680 }
666 681
667 ver = readl(iommu->reg + DMAR_VER_REG); 682 ver = readl(iommu->reg + DMAR_VER_REG);
668 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", 683 pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
669 (unsigned long long)drhd->reg_base_addr, 684 (unsigned long long)drhd->reg_base_addr,
670 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), 685 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
671 (unsigned long long)iommu->cap, 686 (unsigned long long)iommu->cap,
@@ -675,7 +690,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
675 690
676 drhd->iommu = iommu; 691 drhd->iommu = iommu;
677 return 0; 692 return 0;
678error: 693
694 err_unmap:
695 iounmap(iommu->reg);
696 error:
679 kfree(iommu); 697 kfree(iommu);
680 return -1; 698 return -1;
681} 699}
@@ -1212,7 +1230,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
1212 source_id, guest_addr); 1230 source_id, guest_addr);
1213 1231
1214 fault_index++; 1232 fault_index++;
1215 if (fault_index > cap_num_fault_regs(iommu->cap)) 1233 if (fault_index >= cap_num_fault_regs(iommu->cap))
1216 fault_index = 0; 1234 fault_index = 0;
1217 spin_lock_irqsave(&iommu->register_lock, flag); 1235 spin_lock_irqsave(&iommu->register_lock, flag);
1218 } 1236 }
@@ -1305,3 +1323,13 @@ int dmar_reenable_qi(struct intel_iommu *iommu)
1305 1323
1306 return 0; 1324 return 0;
1307} 1325}
1326
1327/*
1328 * Check interrupt remapping support in DMAR table description.
1329 */
1330int dmar_ir_support(void)
1331{
1332 struct acpi_table_dmar *dmar;
1333 dmar = (struct acpi_table_dmar *)dmar_tbl;
1334 return dmar->flags & 0x1;
1335}
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 2aa117c8cd87..3625b094bf7e 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o
22# Link this last so it doesn't claim devices that have a real hotplug driver 22# Link this last so it doesn't claim devices that have a real hotplug driver
23obj-$(CONFIG_HOTPLUG_PCI_FAKE) += fakephp.o 23obj-$(CONFIG_HOTPLUG_PCI_FAKE) += fakephp.o
24 24
25pci_hotplug-objs := pci_hotplug_core.o 25pci_hotplug-objs := pci_hotplug_core.o pcihp_slot.o
26 26
27ifdef CONFIG_HOTPLUG_PCI_CPCI 27ifdef CONFIG_HOTPLUG_PCI_CPCI
28pci_hotplug-objs += cpci_hotplug_core.o \ 28pci_hotplug-objs += cpci_hotplug_core.o \
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index eb159587d0bf..a73028ec52e5 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -41,7 +41,6 @@
41#define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg) 41#define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg)
42 42
43#define METHOD_NAME__SUN "_SUN" 43#define METHOD_NAME__SUN "_SUN"
44#define METHOD_NAME__HPP "_HPP"
45#define METHOD_NAME_OSHP "OSHP" 44#define METHOD_NAME_OSHP "OSHP"
46 45
47static int debug_acpi; 46static int debug_acpi;
@@ -215,80 +214,41 @@ acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
215static acpi_status 214static acpi_status
216acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) 215acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
217{ 216{
218 acpi_status status; 217 acpi_status status;
219 u8 nui[4]; 218 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
220 struct acpi_buffer ret_buf = { 0, NULL}; 219 union acpi_object *package, *fields;
221 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; 220 int i;
222 union acpi_object *ext_obj, *package;
223 int i, len = 0;
224
225 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
226 221
227 /* Clear the return buffer with zeros */
228 memset(hpp, 0, sizeof(struct hotplug_params)); 222 memset(hpp, 0, sizeof(struct hotplug_params));
229 223
230 /* get _hpp */ 224 status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
231 status = acpi_evaluate_object(handle, METHOD_NAME__HPP, NULL, &ret_buf); 225 if (ACPI_FAILURE(status))
232 switch (status) { 226 return status;
233 case AE_BUFFER_OVERFLOW:
234 ret_buf.pointer = kmalloc (ret_buf.length, GFP_KERNEL);
235 if (!ret_buf.pointer) {
236 printk(KERN_ERR "%s:%s alloc for _HPP fail\n",
237 __func__, (char *)string.pointer);
238 kfree(string.pointer);
239 return AE_NO_MEMORY;
240 }
241 status = acpi_evaluate_object(handle, METHOD_NAME__HPP,
242 NULL, &ret_buf);
243 if (ACPI_SUCCESS(status))
244 break;
245 default:
246 if (ACPI_FAILURE(status)) {
247 pr_debug("%s:%s _HPP fail=0x%x\n", __func__,
248 (char *)string.pointer, status);
249 kfree(string.pointer);
250 return status;
251 }
252 }
253 227
254 ext_obj = (union acpi_object *) ret_buf.pointer; 228 package = (union acpi_object *) buffer.pointer;
255 if (ext_obj->type != ACPI_TYPE_PACKAGE) { 229 if (package->type != ACPI_TYPE_PACKAGE ||
256 printk(KERN_ERR "%s:%s _HPP obj not a package\n", __func__, 230 package->package.count != 4) {
257 (char *)string.pointer);
258 status = AE_ERROR; 231 status = AE_ERROR;
259 goto free_and_return; 232 goto exit;
260 } 233 }
261 234
262 len = ext_obj->package.count; 235 fields = package->package.elements;
263 package = (union acpi_object *) ret_buf.pointer; 236 for (i = 0; i < 4; i++) {
264 for ( i = 0; (i < len) || (i < 4); i++) { 237 if (fields[i].type != ACPI_TYPE_INTEGER) {
265 ext_obj = (union acpi_object *) &package->package.elements[i];
266 switch (ext_obj->type) {
267 case ACPI_TYPE_INTEGER:
268 nui[i] = (u8)ext_obj->integer.value;
269 break;
270 default:
271 printk(KERN_ERR "%s:%s _HPP obj type incorrect\n",
272 __func__, (char *)string.pointer);
273 status = AE_ERROR; 238 status = AE_ERROR;
274 goto free_and_return; 239 goto exit;
275 } 240 }
276 } 241 }
277 242
278 hpp->t0 = &hpp->type0_data; 243 hpp->t0 = &hpp->type0_data;
279 hpp->t0->cache_line_size = nui[0]; 244 hpp->t0->revision = 1;
280 hpp->t0->latency_timer = nui[1]; 245 hpp->t0->cache_line_size = fields[0].integer.value;
281 hpp->t0->enable_serr = nui[2]; 246 hpp->t0->latency_timer = fields[1].integer.value;
282 hpp->t0->enable_perr = nui[3]; 247 hpp->t0->enable_serr = fields[2].integer.value;
283 248 hpp->t0->enable_perr = fields[3].integer.value;
284 pr_debug(" _HPP: cache_line_size=0x%x\n", hpp->t0->cache_line_size);
285 pr_debug(" _HPP: latency timer =0x%x\n", hpp->t0->latency_timer);
286 pr_debug(" _HPP: enable SERR =0x%x\n", hpp->t0->enable_serr);
287 pr_debug(" _HPP: enable PERR =0x%x\n", hpp->t0->enable_perr);
288 249
289free_and_return: 250exit:
290 kfree(string.pointer); 251 kfree(buffer.pointer);
291 kfree(ret_buf.pointer);
292 return status; 252 return status;
293} 253}
294 254
@@ -322,20 +282,19 @@ static acpi_status acpi_run_oshp(acpi_handle handle)
322 return status; 282 return status;
323} 283}
324 284
325/* acpi_get_hp_params_from_firmware 285/* pci_get_hp_params
326 * 286 *
327 * @bus - the pci_bus of the bus on which the device is newly added 287 * @dev - the pci_dev for which we want parameters
328 * @hpp - allocated by the caller 288 * @hpp - allocated by the caller
329 */ 289 */
330acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, 290int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
331 struct hotplug_params *hpp)
332{ 291{
333 acpi_status status = AE_NOT_FOUND; 292 acpi_status status;
334 acpi_handle handle, phandle; 293 acpi_handle handle, phandle;
335 struct pci_bus *pbus; 294 struct pci_bus *pbus;
336 295
337 handle = NULL; 296 handle = NULL;
338 for (pbus = bus; pbus; pbus = pbus->parent) { 297 for (pbus = dev->bus; pbus; pbus = pbus->parent) {
339 handle = acpi_pci_get_bridge_handle(pbus); 298 handle = acpi_pci_get_bridge_handle(pbus);
340 if (handle) 299 if (handle)
341 break; 300 break;
@@ -345,15 +304,15 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
345 * _HPP settings apply to all child buses, until another _HPP is 304 * _HPP settings apply to all child buses, until another _HPP is
346 * encountered. If we don't find an _HPP for the input pci dev, 305 * encountered. If we don't find an _HPP for the input pci dev,
347 * look for it in the parent device scope since that would apply to 306 * look for it in the parent device scope since that would apply to
348 * this pci dev. If we don't find any _HPP, use hardcoded defaults 307 * this pci dev.
349 */ 308 */
350 while (handle) { 309 while (handle) {
351 status = acpi_run_hpx(handle, hpp); 310 status = acpi_run_hpx(handle, hpp);
352 if (ACPI_SUCCESS(status)) 311 if (ACPI_SUCCESS(status))
353 break; 312 return 0;
354 status = acpi_run_hpp(handle, hpp); 313 status = acpi_run_hpp(handle, hpp);
355 if (ACPI_SUCCESS(status)) 314 if (ACPI_SUCCESS(status))
356 break; 315 return 0;
357 if (acpi_is_root_bridge(handle)) 316 if (acpi_is_root_bridge(handle))
358 break; 317 break;
359 status = acpi_get_parent(handle, &phandle); 318 status = acpi_get_parent(handle, &phandle);
@@ -361,9 +320,9 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
361 break; 320 break;
362 handle = phandle; 321 handle = phandle;
363 } 322 }
364 return status; 323 return -ENODEV;
365} 324}
366EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware); 325EXPORT_SYMBOL_GPL(pci_get_hp_params);
367 326
368/** 327/**
369 * acpi_get_hp_hw_control_from_firmware 328 * acpi_get_hp_hw_control_from_firmware
@@ -500,18 +459,18 @@ check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv)
500 459
501/** 460/**
502 * acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots 461 * acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots
503 * @pbus - PCI bus to scan 462 * @handle - handle of the PCI bus to scan
504 * 463 *
505 * Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise. 464 * Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise.
506 */ 465 */
507int acpi_pci_detect_ejectable(struct pci_bus *pbus) 466int acpi_pci_detect_ejectable(acpi_handle handle)
508{ 467{
509 acpi_handle handle;
510 int found = 0; 468 int found = 0;
511 469
512 if (!(handle = acpi_pci_get_bridge_handle(pbus))) 470 if (!handle)
513 return 0; 471 return found;
514 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, 472
473 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
515 check_hotplug, (void *)&found, NULL); 474 check_hotplug, (void *)&found, NULL);
516 return found; 475 return found;
517} 476}
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index e68d5f20ffb3..7d938df79206 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -91,9 +91,6 @@ struct acpiphp_bridge {
91 /* PCI-to-PCI bridge device */ 91 /* PCI-to-PCI bridge device */
92 struct pci_dev *pci_dev; 92 struct pci_dev *pci_dev;
93 93
94 /* ACPI 2.0 _HPP parameters */
95 struct hotplug_params hpp;
96
97 spinlock_t res_lock; 94 spinlock_t res_lock;
98}; 95};
99 96
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 0cb0f830a993..58d25a163a8b 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -59,7 +59,7 @@ static DEFINE_SPINLOCK(ioapic_list_lock);
59 59
60static void handle_hotplug_event_bridge (acpi_handle, u32, void *); 60static void handle_hotplug_event_bridge (acpi_handle, u32, void *);
61static void acpiphp_sanitize_bus(struct pci_bus *bus); 61static void acpiphp_sanitize_bus(struct pci_bus *bus);
62static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus); 62static void acpiphp_set_hpp_values(struct pci_bus *bus);
63static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context); 63static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
64 64
65/* callback routine to check for the existence of a pci dock device */ 65/* callback routine to check for the existence of a pci dock device */
@@ -261,51 +261,21 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
261 261
262 262
263/* see if it's worth looking at this bridge */ 263/* see if it's worth looking at this bridge */
264static int detect_ejectable_slots(struct pci_bus *pbus) 264static int detect_ejectable_slots(acpi_handle handle)
265{ 265{
266 int found = acpi_pci_detect_ejectable(pbus); 266 int found = acpi_pci_detect_ejectable(handle);
267 if (!found) { 267 if (!found) {
268 acpi_handle bridge_handle = acpi_pci_get_bridge_handle(pbus); 268 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
269 if (!bridge_handle)
270 return 0;
271 acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, (u32)1,
272 is_pci_dock_device, (void *)&found, NULL); 269 is_pci_dock_device, (void *)&found, NULL);
273 } 270 }
274 return found; 271 return found;
275} 272}
276 273
277
278/* decode ACPI 2.0 _HPP hot plug parameters */
279static void decode_hpp(struct acpiphp_bridge *bridge)
280{
281 acpi_status status;
282
283 status = acpi_get_hp_params_from_firmware(bridge->pci_bus, &bridge->hpp);
284 if (ACPI_FAILURE(status) ||
285 !bridge->hpp.t0 || (bridge->hpp.t0->revision > 1)) {
286 /* use default numbers */
287 printk(KERN_WARNING
288 "%s: Could not get hotplug parameters. Use defaults\n",
289 __func__);
290 bridge->hpp.t0 = &bridge->hpp.type0_data;
291 bridge->hpp.t0->revision = 0;
292 bridge->hpp.t0->cache_line_size = 0x10;
293 bridge->hpp.t0->latency_timer = 0x40;
294 bridge->hpp.t0->enable_serr = 0;
295 bridge->hpp.t0->enable_perr = 0;
296 }
297}
298
299
300
301/* initialize miscellaneous stuff for both root and PCI-to-PCI bridge */ 274/* initialize miscellaneous stuff for both root and PCI-to-PCI bridge */
302static void init_bridge_misc(struct acpiphp_bridge *bridge) 275static void init_bridge_misc(struct acpiphp_bridge *bridge)
303{ 276{
304 acpi_status status; 277 acpi_status status;
305 278
306 /* decode ACPI 2.0 _HPP (hot plug parameters) */
307 decode_hpp(bridge);
308
309 /* must be added to the list prior to calling register_slot */ 279 /* must be added to the list prior to calling register_slot */
310 list_add(&bridge->list, &bridge_list); 280 list_add(&bridge->list, &bridge_list);
311 281
@@ -399,9 +369,10 @@ static inline void config_p2p_bridge_flags(struct acpiphp_bridge *bridge)
399 369
400 370
401/* allocate and initialize host bridge data structure */ 371/* allocate and initialize host bridge data structure */
402static void add_host_bridge(acpi_handle *handle, struct pci_bus *pci_bus) 372static void add_host_bridge(acpi_handle *handle)
403{ 373{
404 struct acpiphp_bridge *bridge; 374 struct acpiphp_bridge *bridge;
375 struct acpi_pci_root *root = acpi_pci_find_root(handle);
405 376
406 bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL); 377 bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
407 if (bridge == NULL) 378 if (bridge == NULL)
@@ -410,7 +381,7 @@ static void add_host_bridge(acpi_handle *handle, struct pci_bus *pci_bus)
410 bridge->type = BRIDGE_TYPE_HOST; 381 bridge->type = BRIDGE_TYPE_HOST;
411 bridge->handle = handle; 382 bridge->handle = handle;
412 383
413 bridge->pci_bus = pci_bus; 384 bridge->pci_bus = root->bus;
414 385
415 spin_lock_init(&bridge->res_lock); 386 spin_lock_init(&bridge->res_lock);
416 387
@@ -419,7 +390,7 @@ static void add_host_bridge(acpi_handle *handle, struct pci_bus *pci_bus)
419 390
420 391
421/* allocate and initialize PCI-to-PCI bridge data structure */ 392/* allocate and initialize PCI-to-PCI bridge data structure */
422static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev) 393static void add_p2p_bridge(acpi_handle *handle)
423{ 394{
424 struct acpiphp_bridge *bridge; 395 struct acpiphp_bridge *bridge;
425 396
@@ -433,8 +404,8 @@ static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev)
433 bridge->handle = handle; 404 bridge->handle = handle;
434 config_p2p_bridge_flags(bridge); 405 config_p2p_bridge_flags(bridge);
435 406
436 bridge->pci_dev = pci_dev_get(pci_dev); 407 bridge->pci_dev = acpi_get_pci_dev(handle);
437 bridge->pci_bus = pci_dev->subordinate; 408 bridge->pci_bus = bridge->pci_dev->subordinate;
438 if (!bridge->pci_bus) { 409 if (!bridge->pci_bus) {
439 err("This is not a PCI-to-PCI bridge!\n"); 410 err("This is not a PCI-to-PCI bridge!\n");
440 goto err; 411 goto err;
@@ -451,7 +422,7 @@ static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev)
451 init_bridge_misc(bridge); 422 init_bridge_misc(bridge);
452 return; 423 return;
453 err: 424 err:
454 pci_dev_put(pci_dev); 425 pci_dev_put(bridge->pci_dev);
455 kfree(bridge); 426 kfree(bridge);
456 return; 427 return;
457} 428}
@@ -462,39 +433,21 @@ static acpi_status
462find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) 433find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
463{ 434{
464 acpi_status status; 435 acpi_status status;
465 acpi_handle dummy_handle;
466 unsigned long long tmp;
467 int device, function;
468 struct pci_dev *dev; 436 struct pci_dev *dev;
469 struct pci_bus *pci_bus = context;
470
471 status = acpi_get_handle(handle, "_ADR", &dummy_handle);
472 if (ACPI_FAILURE(status))
473 return AE_OK; /* continue */
474
475 status = acpi_evaluate_integer(handle, "_ADR", NULL, &tmp);
476 if (ACPI_FAILURE(status)) {
477 dbg("%s: _ADR evaluation failure\n", __func__);
478 return AE_OK;
479 }
480
481 device = (tmp >> 16) & 0xffff;
482 function = tmp & 0xffff;
483
484 dev = pci_get_slot(pci_bus, PCI_DEVFN(device, function));
485 437
438 dev = acpi_get_pci_dev(handle);
486 if (!dev || !dev->subordinate) 439 if (!dev || !dev->subordinate)
487 goto out; 440 goto out;
488 441
489 /* check if this bridge has ejectable slots */ 442 /* check if this bridge has ejectable slots */
490 if ((detect_ejectable_slots(dev->subordinate) > 0)) { 443 if ((detect_ejectable_slots(handle) > 0)) {
491 dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev)); 444 dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev));
492 add_p2p_bridge(handle, dev); 445 add_p2p_bridge(handle);
493 } 446 }
494 447
495 /* search P2P bridges under this p2p bridge */ 448 /* search P2P bridges under this p2p bridge */
496 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, 449 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
497 find_p2p_bridge, dev->subordinate, NULL); 450 find_p2p_bridge, NULL, NULL);
498 if (ACPI_FAILURE(status)) 451 if (ACPI_FAILURE(status))
499 warn("find_p2p_bridge failed (error code = 0x%x)\n", status); 452 warn("find_p2p_bridge failed (error code = 0x%x)\n", status);
500 453
@@ -509,9 +462,7 @@ static int add_bridge(acpi_handle handle)
509{ 462{
510 acpi_status status; 463 acpi_status status;
511 unsigned long long tmp; 464 unsigned long long tmp;
512 int seg, bus;
513 acpi_handle dummy_handle; 465 acpi_handle dummy_handle;
514 struct pci_bus *pci_bus;
515 466
516 /* if the bridge doesn't have _STA, we assume it is always there */ 467 /* if the bridge doesn't have _STA, we assume it is always there */
517 status = acpi_get_handle(handle, "_STA", &dummy_handle); 468 status = acpi_get_handle(handle, "_STA", &dummy_handle);
@@ -526,36 +477,15 @@ static int add_bridge(acpi_handle handle)
526 return 0; 477 return 0;
527 } 478 }
528 479
529 /* get PCI segment number */
530 status = acpi_evaluate_integer(handle, "_SEG", NULL, &tmp);
531
532 seg = ACPI_SUCCESS(status) ? tmp : 0;
533
534 /* get PCI bus number */
535 status = acpi_evaluate_integer(handle, "_BBN", NULL, &tmp);
536
537 if (ACPI_SUCCESS(status)) {
538 bus = tmp;
539 } else {
540 warn("can't get bus number, assuming 0\n");
541 bus = 0;
542 }
543
544 pci_bus = pci_find_bus(seg, bus);
545 if (!pci_bus) {
546 err("Can't find bus %04x:%02x\n", seg, bus);
547 return 0;
548 }
549
550 /* check if this bridge has ejectable slots */ 480 /* check if this bridge has ejectable slots */
551 if (detect_ejectable_slots(pci_bus) > 0) { 481 if (detect_ejectable_slots(handle) > 0) {
552 dbg("found PCI host-bus bridge with hot-pluggable slots\n"); 482 dbg("found PCI host-bus bridge with hot-pluggable slots\n");
553 add_host_bridge(handle, pci_bus); 483 add_host_bridge(handle);
554 } 484 }
555 485
556 /* search P2P bridges under this host bridge */ 486 /* search P2P bridges under this host bridge */
557 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, 487 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
558 find_p2p_bridge, pci_bus, NULL); 488 find_p2p_bridge, NULL, NULL);
559 489
560 if (ACPI_FAILURE(status)) 490 if (ACPI_FAILURE(status))
561 warn("find_p2p_bridge failed (error code = 0x%x)\n", status); 491 warn("find_p2p_bridge failed (error code = 0x%x)\n", status);
@@ -1083,7 +1013,7 @@ static int __ref enable_device(struct acpiphp_slot *slot)
1083 1013
1084 pci_bus_assign_resources(bus); 1014 pci_bus_assign_resources(bus);
1085 acpiphp_sanitize_bus(bus); 1015 acpiphp_sanitize_bus(bus);
1086 acpiphp_set_hpp_values(slot->bridge->handle, bus); 1016 acpiphp_set_hpp_values(bus);
1087 list_for_each_entry(func, &slot->funcs, sibling) 1017 list_for_each_entry(func, &slot->funcs, sibling)
1088 acpiphp_configure_ioapics(func->handle); 1018 acpiphp_configure_ioapics(func->handle);
1089 pci_enable_bridges(bus); 1019 pci_enable_bridges(bus);
@@ -1294,70 +1224,12 @@ static int acpiphp_check_bridge(struct acpiphp_bridge *bridge)
1294 return retval; 1224 return retval;
1295} 1225}
1296 1226
1297static void program_hpp(struct pci_dev *dev, struct acpiphp_bridge *bridge) 1227static void acpiphp_set_hpp_values(struct pci_bus *bus)
1298{ 1228{
1299 u16 pci_cmd, pci_bctl;
1300 struct pci_dev *cdev;
1301
1302 /* Program hpp values for this device */
1303 if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
1304 (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
1305 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
1306 return;
1307
1308 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
1309 return;
1310
1311 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
1312 bridge->hpp.t0->cache_line_size);
1313 pci_write_config_byte(dev, PCI_LATENCY_TIMER,
1314 bridge->hpp.t0->latency_timer);
1315 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1316 if (bridge->hpp.t0->enable_serr)
1317 pci_cmd |= PCI_COMMAND_SERR;
1318 else
1319 pci_cmd &= ~PCI_COMMAND_SERR;
1320 if (bridge->hpp.t0->enable_perr)
1321 pci_cmd |= PCI_COMMAND_PARITY;
1322 else
1323 pci_cmd &= ~PCI_COMMAND_PARITY;
1324 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1325
1326 /* Program bridge control value and child devices */
1327 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1328 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1329 bridge->hpp.t0->latency_timer);
1330 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1331 if (bridge->hpp.t0->enable_serr)
1332 pci_bctl |= PCI_BRIDGE_CTL_SERR;
1333 else
1334 pci_bctl &= ~PCI_BRIDGE_CTL_SERR;
1335 if (bridge->hpp.t0->enable_perr)
1336 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1337 else
1338 pci_bctl &= ~PCI_BRIDGE_CTL_PARITY;
1339 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1340 if (dev->subordinate) {
1341 list_for_each_entry(cdev, &dev->subordinate->devices,
1342 bus_list)
1343 program_hpp(cdev, bridge);
1344 }
1345 }
1346}
1347
1348static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus)
1349{
1350 struct acpiphp_bridge bridge;
1351 struct pci_dev *dev; 1229 struct pci_dev *dev;
1352 1230
1353 memset(&bridge, 0, sizeof(bridge));
1354 bridge.handle = handle;
1355 bridge.pci_bus = bus;
1356 bridge.pci_dev = bus->self;
1357 decode_hpp(&bridge);
1358 list_for_each_entry(dev, &bus->devices, bus_list) 1231 list_for_each_entry(dev, &bus->devices, bus_list)
1359 program_hpp(dev, &bridge); 1232 pci_configure_slot(dev);
1360
1361} 1233}
1362 1234
1363/* 1235/*
@@ -1387,24 +1259,23 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
1387/* Program resources in newly inserted bridge */ 1259/* Program resources in newly inserted bridge */
1388static int acpiphp_configure_bridge (acpi_handle handle) 1260static int acpiphp_configure_bridge (acpi_handle handle)
1389{ 1261{
1390 struct pci_dev *dev;
1391 struct pci_bus *bus; 1262 struct pci_bus *bus;
1392 1263
1393 dev = acpi_get_pci_dev(handle); 1264 if (acpi_is_root_bridge(handle)) {
1394 if (!dev) { 1265 struct acpi_pci_root *root = acpi_pci_find_root(handle);
1395 err("cannot get PCI domain and bus number for bridge\n"); 1266 bus = root->bus;
1396 return -EINVAL; 1267 } else {
1268 struct pci_dev *pdev = acpi_get_pci_dev(handle);
1269 bus = pdev->subordinate;
1270 pci_dev_put(pdev);
1397 } 1271 }
1398 1272
1399 bus = dev->bus;
1400
1401 pci_bus_size_bridges(bus); 1273 pci_bus_size_bridges(bus);
1402 pci_bus_assign_resources(bus); 1274 pci_bus_assign_resources(bus);
1403 acpiphp_sanitize_bus(bus); 1275 acpiphp_sanitize_bus(bus);
1404 acpiphp_set_hpp_values(handle, bus); 1276 acpiphp_set_hpp_values(bus);
1405 pci_enable_bridges(bus); 1277 pci_enable_bridges(bus);
1406 acpiphp_configure_ioapics(handle); 1278 acpiphp_configure_ioapics(handle);
1407 pci_dev_put(dev);
1408 return 0; 1279 return 0;
1409} 1280}
1410 1281
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 5befa7e379b7..e7be66dbac21 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -398,23 +398,20 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
398 acpi_handle *phandle = (acpi_handle *)context; 398 acpi_handle *phandle = (acpi_handle *)context;
399 acpi_status status; 399 acpi_status status;
400 struct acpi_device_info *info; 400 struct acpi_device_info *info;
401 struct acpi_buffer info_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
402 int retval = 0; 401 int retval = 0;
403 402
404 status = acpi_get_object_info(handle, &info_buffer); 403 status = acpi_get_object_info(handle, &info);
405 if (ACPI_FAILURE(status)) { 404 if (ACPI_FAILURE(status)) {
406 err("%s: Failed to get device information status=0x%x\n", 405 err("%s: Failed to get device information status=0x%x\n",
407 __func__, status); 406 __func__, status);
408 return retval; 407 return retval;
409 } 408 }
410 info = info_buffer.pointer;
411 info->hardware_id.value[sizeof(info->hardware_id.value) - 1] = '\0';
412 409
413 if (info->current_status && (info->valid & ACPI_VALID_HID) && 410 if (info->current_status && (info->valid & ACPI_VALID_HID) &&
414 (!strcmp(info->hardware_id.value, IBM_HARDWARE_ID1) || 411 (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) ||
415 !strcmp(info->hardware_id.value, IBM_HARDWARE_ID2))) { 412 !strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) {
416 dbg("found hardware: %s, handle: %p\n", 413 dbg("found hardware: %s, handle: %p\n",
417 info->hardware_id.value, handle); 414 info->hardware_id.string, handle);
418 *phandle = handle; 415 *phandle = handle;
419 /* returning non-zero causes the search to stop 416 /* returning non-zero causes the search to stop
420 * and returns this value to the caller of 417 * and returns this value to the caller of
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 5c5043f239cf..0325d989bb46 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -86,7 +86,8 @@ static char *pci_bus_speed_strings[] = {
86 "66 MHz PCIX 533", /* 0x11 */ 86 "66 MHz PCIX 533", /* 0x11 */
87 "100 MHz PCIX 533", /* 0x12 */ 87 "100 MHz PCIX 533", /* 0x12 */
88 "133 MHz PCIX 533", /* 0x13 */ 88 "133 MHz PCIX 533", /* 0x13 */
89 "25 GBps PCI-E", /* 0x14 */ 89 "2.5 GT/s PCI-E", /* 0x14 */
90 "5.0 GT/s PCI-E", /* 0x15 */
90}; 91};
91 92
92#ifdef CONFIG_HOTPLUG_PCI_CPCI 93#ifdef CONFIG_HOTPLUG_PCI_CPCI
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index e6cf096498be..3070f77eb56a 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -72,15 +72,9 @@ do { \
72 72
73#define SLOT_NAME_SIZE 10 73#define SLOT_NAME_SIZE 10
74struct slot { 74struct slot {
75 u8 bus;
76 u8 device;
77 u8 state; 75 u8 state;
78 u8 hp_slot;
79 u32 number;
80 struct controller *ctrl; 76 struct controller *ctrl;
81 struct hpc_ops *hpc_ops;
82 struct hotplug_slot *hotplug_slot; 77 struct hotplug_slot *hotplug_slot;
83 struct list_head slot_list;
84 struct delayed_work work; /* work for button event */ 78 struct delayed_work work; /* work for button event */
85 struct mutex lock; 79 struct mutex lock;
86}; 80};
@@ -92,18 +86,10 @@ struct event_info {
92}; 86};
93 87
94struct controller { 88struct controller {
95 struct mutex crit_sect; /* critical section mutex */
96 struct mutex ctrl_lock; /* controller lock */ 89 struct mutex ctrl_lock; /* controller lock */
97 int num_slots; /* Number of slots on ctlr */
98 int slot_num_inc; /* 1 or -1 */
99 struct pci_dev *pci_dev;
100 struct pcie_device *pcie; /* PCI Express port service */ 90 struct pcie_device *pcie; /* PCI Express port service */
101 struct list_head slot_list; 91 struct slot *slot;
102 struct hpc_ops *hpc_ops;
103 wait_queue_head_t queue; /* sleep & wake process */ 92 wait_queue_head_t queue; /* sleep & wake process */
104 u8 slot_device_offset;
105 u32 first_slot; /* First physical slot number */ /* PCIE only has 1 slot */
106 u8 slot_bus; /* Bus where the slots handled by this controller sit */
107 u32 slot_cap; 93 u32 slot_cap;
108 u8 cap_base; 94 u8 cap_base;
109 struct timer_list poll_timer; 95 struct timer_list poll_timer;
@@ -131,40 +117,20 @@ struct controller {
131#define POWERON_STATE 3 117#define POWERON_STATE 3
132#define POWEROFF_STATE 4 118#define POWEROFF_STATE 4
133 119
134/* Error messages */ 120#define ATTN_BUTTN(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_ABP)
135#define INTERLOCK_OPEN 0x00000002 121#define POWER_CTRL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_PCP)
136#define ADD_NOT_SUPPORTED 0x00000003 122#define MRL_SENS(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_MRLSP)
137#define CARD_FUNCTIONING 0x00000005 123#define ATTN_LED(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_AIP)
138#define ADAPTER_NOT_SAME 0x00000006 124#define PWR_LED(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_PIP)
139#define NO_ADAPTER_PRESENT 0x00000009 125#define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_HPS)
140#define NOT_ENOUGH_RESOURCES 0x0000000B 126#define EMI(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_EIP)
141#define DEVICE_TYPE_NOT_SUPPORTED 0x0000000C 127#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_NCCS)
142#define WRONG_BUS_FREQUENCY 0x0000000D 128#define PSN(ctrl) ((ctrl)->slot_cap >> 19)
143#define POWER_FAILURE 0x0000000E
144
145/* Field definitions in Slot Capabilities Register */
146#define ATTN_BUTTN_PRSN 0x00000001
147#define PWR_CTRL_PRSN 0x00000002
148#define MRL_SENS_PRSN 0x00000004
149#define ATTN_LED_PRSN 0x00000008
150#define PWR_LED_PRSN 0x00000010
151#define HP_SUPR_RM_SUP 0x00000020
152#define EMI_PRSN 0x00020000
153#define NO_CMD_CMPL_SUP 0x00040000
154
155#define ATTN_BUTTN(ctrl) ((ctrl)->slot_cap & ATTN_BUTTN_PRSN)
156#define POWER_CTRL(ctrl) ((ctrl)->slot_cap & PWR_CTRL_PRSN)
157#define MRL_SENS(ctrl) ((ctrl)->slot_cap & MRL_SENS_PRSN)
158#define ATTN_LED(ctrl) ((ctrl)->slot_cap & ATTN_LED_PRSN)
159#define PWR_LED(ctrl) ((ctrl)->slot_cap & PWR_LED_PRSN)
160#define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & HP_SUPR_RM_SUP)
161#define EMI(ctrl) ((ctrl)->slot_cap & EMI_PRSN)
162#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & NO_CMD_CMPL_SUP)
163 129
164extern int pciehp_sysfs_enable_slot(struct slot *slot); 130extern int pciehp_sysfs_enable_slot(struct slot *slot);
165extern int pciehp_sysfs_disable_slot(struct slot *slot); 131extern int pciehp_sysfs_disable_slot(struct slot *slot);
166extern u8 pciehp_handle_attention_button(struct slot *p_slot); 132extern u8 pciehp_handle_attention_button(struct slot *p_slot);
167 extern u8 pciehp_handle_switch_change(struct slot *p_slot); 133extern u8 pciehp_handle_switch_change(struct slot *p_slot);
168extern u8 pciehp_handle_presence_change(struct slot *p_slot); 134extern u8 pciehp_handle_presence_change(struct slot *p_slot);
169extern u8 pciehp_handle_power_fault(struct slot *p_slot); 135extern u8 pciehp_handle_power_fault(struct slot *p_slot);
170extern int pciehp_configure_device(struct slot *p_slot); 136extern int pciehp_configure_device(struct slot *p_slot);
@@ -175,45 +141,30 @@ int pcie_init_notification(struct controller *ctrl);
175int pciehp_enable_slot(struct slot *p_slot); 141int pciehp_enable_slot(struct slot *p_slot);
176int pciehp_disable_slot(struct slot *p_slot); 142int pciehp_disable_slot(struct slot *p_slot);
177int pcie_enable_notification(struct controller *ctrl); 143int pcie_enable_notification(struct controller *ctrl);
144int pciehp_power_on_slot(struct slot *slot);
145int pciehp_power_off_slot(struct slot *slot);
146int pciehp_get_power_status(struct slot *slot, u8 *status);
147int pciehp_get_attention_status(struct slot *slot, u8 *status);
148
149int pciehp_set_attention_status(struct slot *slot, u8 status);
150int pciehp_get_latch_status(struct slot *slot, u8 *status);
151int pciehp_get_adapter_status(struct slot *slot, u8 *status);
152int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *speed);
153int pciehp_get_max_link_width(struct slot *slot, enum pcie_link_width *val);
154int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *speed);
155int pciehp_get_cur_link_width(struct slot *slot, enum pcie_link_width *val);
156int pciehp_query_power_fault(struct slot *slot);
157void pciehp_green_led_on(struct slot *slot);
158void pciehp_green_led_off(struct slot *slot);
159void pciehp_green_led_blink(struct slot *slot);
160int pciehp_check_link_status(struct controller *ctrl);
161void pciehp_release_ctrl(struct controller *ctrl);
178 162
179static inline const char *slot_name(struct slot *slot) 163static inline const char *slot_name(struct slot *slot)
180{ 164{
181 return hotplug_slot_name(slot->hotplug_slot); 165 return hotplug_slot_name(slot->hotplug_slot);
182} 166}
183 167
184static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device)
185{
186 struct slot *slot;
187
188 list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
189 if (slot->device == device)
190 return slot;
191 }
192
193 ctrl_err(ctrl, "Slot (device=0x%02x) not found\n", device);
194 return NULL;
195}
196
197struct hpc_ops {
198 int (*power_on_slot)(struct slot *slot);
199 int (*power_off_slot)(struct slot *slot);
200 int (*get_power_status)(struct slot *slot, u8 *status);
201 int (*get_attention_status)(struct slot *slot, u8 *status);
202 int (*set_attention_status)(struct slot *slot, u8 status);
203 int (*get_latch_status)(struct slot *slot, u8 *status);
204 int (*get_adapter_status)(struct slot *slot, u8 *status);
205 int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
206 int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
207 int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val);
208 int (*get_cur_lnk_width)(struct slot *slot, enum pcie_link_width *val);
209 int (*query_power_fault)(struct slot *slot);
210 void (*green_led_on)(struct slot *slot);
211 void (*green_led_off)(struct slot *slot);
212 void (*green_led_blink)(struct slot *slot);
213 void (*release_ctlr)(struct controller *ctrl);
214 int (*check_lnk_status)(struct controller *ctrl);
215};
216
217#ifdef CONFIG_ACPI 168#ifdef CONFIG_ACPI
218#include <acpi/acpi.h> 169#include <acpi/acpi.h>
219#include <acpi/acpi_bus.h> 170#include <acpi/acpi_bus.h>
@@ -237,17 +188,8 @@ static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
237 return retval; 188 return retval;
238 return pciehp_acpi_slot_detection_check(dev); 189 return pciehp_acpi_slot_detection_check(dev);
239} 190}
240
241static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev,
242 struct hotplug_params *hpp)
243{
244 if (ACPI_FAILURE(acpi_get_hp_params_from_firmware(dev->bus, hpp)))
245 return -ENODEV;
246 return 0;
247}
248#else 191#else
249#define pciehp_firmware_init() do {} while (0) 192#define pciehp_firmware_init() do {} while (0)
250#define pciehp_get_hp_hw_control_from_firmware(dev) 0 193#define pciehp_get_hp_hw_control_from_firmware(dev) 0
251#define pciehp_get_hp_params_from_firmware(dev, hpp) (-ENODEV)
252#endif /* CONFIG_ACPI */ 194#endif /* CONFIG_ACPI */
253#endif /* _PCIEHP_H */ 195#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 96048010e7d9..37c8d3d0323e 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -33,6 +33,11 @@
33#define PCIEHP_DETECT_AUTO (2) 33#define PCIEHP_DETECT_AUTO (2)
34#define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO 34#define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO
35 35
36struct dummy_slot {
37 u32 number;
38 struct list_head list;
39};
40
36static int slot_detection_mode; 41static int slot_detection_mode;
37static char *pciehp_detect_mode; 42static char *pciehp_detect_mode;
38module_param(pciehp_detect_mode, charp, 0444); 43module_param(pciehp_detect_mode, charp, 0444);
@@ -47,7 +52,7 @@ int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
47{ 52{
48 if (slot_detection_mode != PCIEHP_DETECT_ACPI) 53 if (slot_detection_mode != PCIEHP_DETECT_ACPI)
49 return 0; 54 return 0;
50 if (acpi_pci_detect_ejectable(dev->subordinate)) 55 if (acpi_pci_detect_ejectable(DEVICE_ACPI_HANDLE(&dev->dev)))
51 return 0; 56 return 0;
52 return -ENODEV; 57 return -ENODEV;
53} 58}
@@ -76,9 +81,9 @@ static int __init dummy_probe(struct pcie_device *dev)
76{ 81{
77 int pos; 82 int pos;
78 u32 slot_cap; 83 u32 slot_cap;
79 struct slot *slot, *tmp; 84 acpi_handle handle;
85 struct dummy_slot *slot, *tmp;
80 struct pci_dev *pdev = dev->port; 86 struct pci_dev *pdev = dev->port;
81 struct pci_bus *pbus = pdev->subordinate;
82 /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ 87 /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
83 if (pciehp_get_hp_hw_control_from_firmware(pdev)) 88 if (pciehp_get_hp_hw_control_from_firmware(pdev))
84 return -ENODEV; 89 return -ENODEV;
@@ -89,12 +94,13 @@ static int __init dummy_probe(struct pcie_device *dev)
89 if (!slot) 94 if (!slot)
90 return -ENOMEM; 95 return -ENOMEM;
91 slot->number = slot_cap >> 19; 96 slot->number = slot_cap >> 19;
92 list_for_each_entry(tmp, &dummy_slots, slot_list) { 97 list_for_each_entry(tmp, &dummy_slots, list) {
93 if (tmp->number == slot->number) 98 if (tmp->number == slot->number)
94 dup_slot_id++; 99 dup_slot_id++;
95 } 100 }
96 list_add_tail(&slot->slot_list, &dummy_slots); 101 list_add_tail(&slot->list, &dummy_slots);
97 if (!acpi_slot_detected && acpi_pci_detect_ejectable(pbus)) 102 handle = DEVICE_ACPI_HANDLE(&pdev->dev);
103 if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle))
98 acpi_slot_detected = 1; 104 acpi_slot_detected = 1;
99 return -ENODEV; /* dummy driver always returns error */ 105 return -ENODEV; /* dummy driver always returns error */
100} 106}
@@ -108,11 +114,11 @@ static struct pcie_port_service_driver __initdata dummy_driver = {
108 114
109static int __init select_detection_mode(void) 115static int __init select_detection_mode(void)
110{ 116{
111 struct slot *slot, *tmp; 117 struct dummy_slot *slot, *tmp;
112 pcie_port_service_register(&dummy_driver); 118 pcie_port_service_register(&dummy_driver);
113 pcie_port_service_unregister(&dummy_driver); 119 pcie_port_service_unregister(&dummy_driver);
114 list_for_each_entry_safe(slot, tmp, &dummy_slots, slot_list) { 120 list_for_each_entry_safe(slot, tmp, &dummy_slots, list) {
115 list_del(&slot->slot_list); 121 list_del(&slot->list);
116 kfree(slot); 122 kfree(slot);
117 } 123 }
118 if (acpi_slot_detected && dup_slot_id) 124 if (acpi_slot_detected && dup_slot_id)
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 2317557fdee6..bc234719b1df 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -99,65 +99,55 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
99 kfree(hotplug_slot); 99 kfree(hotplug_slot);
100} 100}
101 101
102static int init_slots(struct controller *ctrl) 102static int init_slot(struct controller *ctrl)
103{ 103{
104 struct slot *slot; 104 struct slot *slot = ctrl->slot;
105 struct hotplug_slot *hotplug_slot; 105 struct hotplug_slot *hotplug = NULL;
106 struct hotplug_slot_info *info; 106 struct hotplug_slot_info *info = NULL;
107 char name[SLOT_NAME_SIZE]; 107 char name[SLOT_NAME_SIZE];
108 int retval = -ENOMEM; 108 int retval = -ENOMEM;
109 109
110 list_for_each_entry(slot, &ctrl->slot_list, slot_list) { 110 hotplug = kzalloc(sizeof(*hotplug), GFP_KERNEL);
111 hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL); 111 if (!hotplug)
112 if (!hotplug_slot) 112 goto out;
113 goto error; 113
114 114 info = kzalloc(sizeof(*info), GFP_KERNEL);
115 info = kzalloc(sizeof(*info), GFP_KERNEL); 115 if (!info)
116 if (!info) 116 goto out;
117 goto error_hpslot; 117
118 118 /* register this slot with the hotplug pci core */
119 /* register this slot with the hotplug pci core */ 119 hotplug->info = info;
120 hotplug_slot->info = info; 120 hotplug->private = slot;
121 hotplug_slot->private = slot; 121 hotplug->release = &release_slot;
122 hotplug_slot->release = &release_slot; 122 hotplug->ops = &pciehp_hotplug_slot_ops;
123 hotplug_slot->ops = &pciehp_hotplug_slot_ops; 123 slot->hotplug_slot = hotplug;
124 slot->hotplug_slot = hotplug_slot; 124 snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl));
125 snprintf(name, SLOT_NAME_SIZE, "%u", slot->number); 125
126 126 ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:00 sun=%x\n",
127 ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x " 127 pci_domain_nr(ctrl->pcie->port->subordinate),
128 "hp_slot=%x sun=%x slot_device_offset=%x\n", 128 ctrl->pcie->port->subordinate->number, PSN(ctrl));
129 pci_domain_nr(ctrl->pci_dev->subordinate), 129 retval = pci_hp_register(hotplug,
130 slot->bus, slot->device, slot->hp_slot, slot->number, 130 ctrl->pcie->port->subordinate, 0, name);
131 ctrl->slot_device_offset); 131 if (retval) {
132 retval = pci_hp_register(hotplug_slot, 132 ctrl_err(ctrl,
133 ctrl->pci_dev->subordinate, 133 "pci_hp_register failed with error %d\n", retval);
134 slot->device, 134 goto out;
135 name); 135 }
136 if (retval) { 136 get_power_status(hotplug, &info->power_status);
137 ctrl_err(ctrl, "pci_hp_register failed with error %d\n", 137 get_attention_status(hotplug, &info->attention_status);
138 retval); 138 get_latch_status(hotplug, &info->latch_status);
139 goto error_info; 139 get_adapter_status(hotplug, &info->adapter_status);
140 } 140out:
141 get_power_status(hotplug_slot, &info->power_status); 141 if (retval) {
142 get_attention_status(hotplug_slot, &info->attention_status); 142 kfree(info);
143 get_latch_status(hotplug_slot, &info->latch_status); 143 kfree(hotplug);
144 get_adapter_status(hotplug_slot, &info->adapter_status);
145 } 144 }
146
147 return 0;
148error_info:
149 kfree(info);
150error_hpslot:
151 kfree(hotplug_slot);
152error:
153 return retval; 145 return retval;
154} 146}
155 147
156static void cleanup_slots(struct controller *ctrl) 148static void cleanup_slot(struct controller *ctrl)
157{ 149{
158 struct slot *slot; 150 pci_hp_deregister(ctrl->slot->hotplug_slot);
159 list_for_each_entry(slot, &ctrl->slot_list, slot_list)
160 pci_hp_deregister(slot->hotplug_slot);
161} 151}
162 152
163/* 153/*
@@ -173,7 +163,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
173 hotplug_slot->info->attention_status = status; 163 hotplug_slot->info->attention_status = status;
174 164
175 if (ATTN_LED(slot->ctrl)) 165 if (ATTN_LED(slot->ctrl))
176 slot->hpc_ops->set_attention_status(slot, status); 166 pciehp_set_attention_status(slot, status);
177 167
178 return 0; 168 return 0;
179} 169}
@@ -208,7 +198,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
208 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 198 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
209 __func__, slot_name(slot)); 199 __func__, slot_name(slot));
210 200
211 retval = slot->hpc_ops->get_power_status(slot, value); 201 retval = pciehp_get_power_status(slot, value);
212 if (retval < 0) 202 if (retval < 0)
213 *value = hotplug_slot->info->power_status; 203 *value = hotplug_slot->info->power_status;
214 204
@@ -223,7 +213,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
223 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 213 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
224 __func__, slot_name(slot)); 214 __func__, slot_name(slot));
225 215
226 retval = slot->hpc_ops->get_attention_status(slot, value); 216 retval = pciehp_get_attention_status(slot, value);
227 if (retval < 0) 217 if (retval < 0)
228 *value = hotplug_slot->info->attention_status; 218 *value = hotplug_slot->info->attention_status;
229 219
@@ -238,7 +228,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
238 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 228 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
239 __func__, slot_name(slot)); 229 __func__, slot_name(slot));
240 230
241 retval = slot->hpc_ops->get_latch_status(slot, value); 231 retval = pciehp_get_latch_status(slot, value);
242 if (retval < 0) 232 if (retval < 0)
243 *value = hotplug_slot->info->latch_status; 233 *value = hotplug_slot->info->latch_status;
244 234
@@ -253,7 +243,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
253 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 243 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
254 __func__, slot_name(slot)); 244 __func__, slot_name(slot));
255 245
256 retval = slot->hpc_ops->get_adapter_status(slot, value); 246 retval = pciehp_get_adapter_status(slot, value);
257 if (retval < 0) 247 if (retval < 0)
258 *value = hotplug_slot->info->adapter_status; 248 *value = hotplug_slot->info->adapter_status;
259 249
@@ -269,7 +259,7 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
269 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 259 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
270 __func__, slot_name(slot)); 260 __func__, slot_name(slot));
271 261
272 retval = slot->hpc_ops->get_max_bus_speed(slot, value); 262 retval = pciehp_get_max_link_speed(slot, value);
273 if (retval < 0) 263 if (retval < 0)
274 *value = PCI_SPEED_UNKNOWN; 264 *value = PCI_SPEED_UNKNOWN;
275 265
@@ -284,7 +274,7 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
284 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 274 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
285 __func__, slot_name(slot)); 275 __func__, slot_name(slot));
286 276
287 retval = slot->hpc_ops->get_cur_bus_speed(slot, value); 277 retval = pciehp_get_cur_link_speed(slot, value);
288 if (retval < 0) 278 if (retval < 0)
289 *value = PCI_SPEED_UNKNOWN; 279 *value = PCI_SPEED_UNKNOWN;
290 280
@@ -295,7 +285,7 @@ static int pciehp_probe(struct pcie_device *dev)
295{ 285{
296 int rc; 286 int rc;
297 struct controller *ctrl; 287 struct controller *ctrl;
298 struct slot *t_slot; 288 struct slot *slot;
299 u8 value; 289 u8 value;
300 struct pci_dev *pdev = dev->port; 290 struct pci_dev *pdev = dev->port;
301 291
@@ -314,7 +304,7 @@ static int pciehp_probe(struct pcie_device *dev)
314 set_service_data(dev, ctrl); 304 set_service_data(dev, ctrl);
315 305
316 /* Setup the slot information structures */ 306 /* Setup the slot information structures */
317 rc = init_slots(ctrl); 307 rc = init_slot(ctrl);
318 if (rc) { 308 if (rc) {
319 if (rc == -EBUSY) 309 if (rc == -EBUSY)
320 ctrl_warn(ctrl, "Slot already registered by another " 310 ctrl_warn(ctrl, "Slot already registered by another "
@@ -332,15 +322,15 @@ static int pciehp_probe(struct pcie_device *dev)
332 } 322 }
333 323
334 /* Check if slot is occupied */ 324 /* Check if slot is occupied */
335 t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); 325 slot = ctrl->slot;
336 t_slot->hpc_ops->get_adapter_status(t_slot, &value); 326 pciehp_get_adapter_status(slot, &value);
337 if (value) { 327 if (value) {
338 if (pciehp_force) 328 if (pciehp_force)
339 pciehp_enable_slot(t_slot); 329 pciehp_enable_slot(slot);
340 } else { 330 } else {
341 /* Power off slot if not occupied */ 331 /* Power off slot if not occupied */
342 if (POWER_CTRL(ctrl)) { 332 if (POWER_CTRL(ctrl)) {
343 rc = t_slot->hpc_ops->power_off_slot(t_slot); 333 rc = pciehp_power_off_slot(slot);
344 if (rc) 334 if (rc)
345 goto err_out_free_ctrl_slot; 335 goto err_out_free_ctrl_slot;
346 } 336 }
@@ -349,19 +339,19 @@ static int pciehp_probe(struct pcie_device *dev)
349 return 0; 339 return 0;
350 340
351err_out_free_ctrl_slot: 341err_out_free_ctrl_slot:
352 cleanup_slots(ctrl); 342 cleanup_slot(ctrl);
353err_out_release_ctlr: 343err_out_release_ctlr:
354 ctrl->hpc_ops->release_ctlr(ctrl); 344 pciehp_release_ctrl(ctrl);
355err_out_none: 345err_out_none:
356 return -ENODEV; 346 return -ENODEV;
357} 347}
358 348
359static void pciehp_remove (struct pcie_device *dev) 349static void pciehp_remove(struct pcie_device *dev)
360{ 350{
361 struct controller *ctrl = get_service_data(dev); 351 struct controller *ctrl = get_service_data(dev);
362 352
363 cleanup_slots(ctrl); 353 cleanup_slot(ctrl);
364 ctrl->hpc_ops->release_ctlr(ctrl); 354 pciehp_release_ctrl(ctrl);
365} 355}
366 356
367#ifdef CONFIG_PM 357#ifdef CONFIG_PM
@@ -376,20 +366,20 @@ static int pciehp_resume (struct pcie_device *dev)
376 dev_info(&dev->device, "%s ENTRY\n", __func__); 366 dev_info(&dev->device, "%s ENTRY\n", __func__);
377 if (pciehp_force) { 367 if (pciehp_force) {
378 struct controller *ctrl = get_service_data(dev); 368 struct controller *ctrl = get_service_data(dev);
379 struct slot *t_slot; 369 struct slot *slot;
380 u8 status; 370 u8 status;
381 371
382 /* reinitialize the chipset's event detection logic */ 372 /* reinitialize the chipset's event detection logic */
383 pcie_enable_notification(ctrl); 373 pcie_enable_notification(ctrl);
384 374
385 t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); 375 slot = ctrl->slot;
386 376
387 /* Check if slot is occupied */ 377 /* Check if slot is occupied */
388 t_slot->hpc_ops->get_adapter_status(t_slot, &status); 378 pciehp_get_adapter_status(slot, &status);
389 if (status) 379 if (status)
390 pciehp_enable_slot(t_slot); 380 pciehp_enable_slot(slot);
391 else 381 else
392 pciehp_disable_slot(t_slot); 382 pciehp_disable_slot(slot);
393 } 383 }
394 return 0; 384 return 0;
395} 385}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 8aab8edf123e..84487d126e4d 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -82,7 +82,7 @@ u8 pciehp_handle_switch_change(struct slot *p_slot)
82 /* Switch Change */ 82 /* Switch Change */
83 ctrl_dbg(ctrl, "Switch interrupt received\n"); 83 ctrl_dbg(ctrl, "Switch interrupt received\n");
84 84
85 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 85 pciehp_get_latch_status(p_slot, &getstatus);
86 if (getstatus) { 86 if (getstatus) {
87 /* 87 /*
88 * Switch opened 88 * Switch opened
@@ -114,7 +114,7 @@ u8 pciehp_handle_presence_change(struct slot *p_slot)
114 /* Switch is open, assume a presence change 114 /* Switch is open, assume a presence change
115 * Save the presence state 115 * Save the presence state
116 */ 116 */
117 p_slot->hpc_ops->get_adapter_status(p_slot, &presence_save); 117 pciehp_get_adapter_status(p_slot, &presence_save);
118 if (presence_save) { 118 if (presence_save) {
119 /* 119 /*
120 * Card Present 120 * Card Present
@@ -143,7 +143,7 @@ u8 pciehp_handle_power_fault(struct slot *p_slot)
143 /* power fault */ 143 /* power fault */
144 ctrl_dbg(ctrl, "Power fault interrupt received\n"); 144 ctrl_dbg(ctrl, "Power fault interrupt received\n");
145 145
146 if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) { 146 if (!pciehp_query_power_fault(p_slot)) {
147 /* 147 /*
148 * power fault Cleared 148 * power fault Cleared
149 */ 149 */
@@ -172,7 +172,7 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
172{ 172{
173 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ 173 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/
174 if (POWER_CTRL(ctrl)) { 174 if (POWER_CTRL(ctrl)) {
175 if (pslot->hpc_ops->power_off_slot(pslot)) { 175 if (pciehp_power_off_slot(pslot)) {
176 ctrl_err(ctrl, 176 ctrl_err(ctrl,
177 "Issue of Slot Power Off command failed\n"); 177 "Issue of Slot Power Off command failed\n");
178 return; 178 return;
@@ -186,10 +186,10 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
186 } 186 }
187 187
188 if (PWR_LED(ctrl)) 188 if (PWR_LED(ctrl))
189 pslot->hpc_ops->green_led_off(pslot); 189 pciehp_green_led_off(pslot);
190 190
191 if (ATTN_LED(ctrl)) { 191 if (ATTN_LED(ctrl)) {
192 if (pslot->hpc_ops->set_attention_status(pslot, 1)) { 192 if (pciehp_set_attention_status(pslot, 1)) {
193 ctrl_err(ctrl, 193 ctrl_err(ctrl,
194 "Issue of Set Attention Led command failed\n"); 194 "Issue of Set Attention Led command failed\n");
195 return; 195 return;
@@ -208,24 +208,20 @@ static int board_added(struct slot *p_slot)
208{ 208{
209 int retval = 0; 209 int retval = 0;
210 struct controller *ctrl = p_slot->ctrl; 210 struct controller *ctrl = p_slot->ctrl;
211 struct pci_bus *parent = ctrl->pci_dev->subordinate; 211 struct pci_bus *parent = ctrl->pcie->port->subordinate;
212
213 ctrl_dbg(ctrl, "%s: slot device, slot offset, hp slot = %d, %d, %d\n",
214 __func__, p_slot->device, ctrl->slot_device_offset,
215 p_slot->hp_slot);
216 212
217 if (POWER_CTRL(ctrl)) { 213 if (POWER_CTRL(ctrl)) {
218 /* Power on slot */ 214 /* Power on slot */
219 retval = p_slot->hpc_ops->power_on_slot(p_slot); 215 retval = pciehp_power_on_slot(p_slot);
220 if (retval) 216 if (retval)
221 return retval; 217 return retval;
222 } 218 }
223 219
224 if (PWR_LED(ctrl)) 220 if (PWR_LED(ctrl))
225 p_slot->hpc_ops->green_led_blink(p_slot); 221 pciehp_green_led_blink(p_slot);
226 222
227 /* Check link training status */ 223 /* Check link training status */
228 retval = p_slot->hpc_ops->check_lnk_status(ctrl); 224 retval = pciehp_check_link_status(ctrl);
229 if (retval) { 225 if (retval) {
230 ctrl_err(ctrl, "Failed to check link status\n"); 226 ctrl_err(ctrl, "Failed to check link status\n");
231 set_slot_off(ctrl, p_slot); 227 set_slot_off(ctrl, p_slot);
@@ -233,26 +229,21 @@ static int board_added(struct slot *p_slot)
233 } 229 }
234 230
235 /* Check for a power fault */ 231 /* Check for a power fault */
236 if (p_slot->hpc_ops->query_power_fault(p_slot)) { 232 if (pciehp_query_power_fault(p_slot)) {
237 ctrl_dbg(ctrl, "Power fault detected\n"); 233 ctrl_dbg(ctrl, "Power fault detected\n");
238 retval = POWER_FAILURE; 234 retval = -EIO;
239 goto err_exit; 235 goto err_exit;
240 } 236 }
241 237
242 retval = pciehp_configure_device(p_slot); 238 retval = pciehp_configure_device(p_slot);
243 if (retval) { 239 if (retval) {
244 ctrl_err(ctrl, "Cannot add device at %04x:%02x:%02x\n", 240 ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n",
245 pci_domain_nr(parent), p_slot->bus, p_slot->device); 241 pci_domain_nr(parent), parent->number);
246 goto err_exit; 242 goto err_exit;
247 } 243 }
248 244
249 /*
250 * Some PCI Express root ports require fixup after hot-plug operation.
251 */
252 if (pcie_mch_quirk)
253 pci_fixup_device(pci_fixup_final, ctrl->pci_dev);
254 if (PWR_LED(ctrl)) 245 if (PWR_LED(ctrl))
255 p_slot->hpc_ops->green_led_on(p_slot); 246 pciehp_green_led_on(p_slot);
256 247
257 return 0; 248 return 0;
258 249
@@ -274,11 +265,9 @@ static int remove_board(struct slot *p_slot)
274 if (retval) 265 if (retval)
275 return retval; 266 return retval;
276 267
277 ctrl_dbg(ctrl, "%s: hp_slot = %d\n", __func__, p_slot->hp_slot);
278
279 if (POWER_CTRL(ctrl)) { 268 if (POWER_CTRL(ctrl)) {
280 /* power off slot */ 269 /* power off slot */
281 retval = p_slot->hpc_ops->power_off_slot(p_slot); 270 retval = pciehp_power_off_slot(p_slot);
282 if (retval) { 271 if (retval) {
283 ctrl_err(ctrl, 272 ctrl_err(ctrl,
284 "Issue of Slot Disable command failed\n"); 273 "Issue of Slot Disable command failed\n");
@@ -292,9 +281,9 @@ static int remove_board(struct slot *p_slot)
292 msleep(1000); 281 msleep(1000);
293 } 282 }
294 283
284 /* turn off Green LED */
295 if (PWR_LED(ctrl)) 285 if (PWR_LED(ctrl))
296 /* turn off Green LED */ 286 pciehp_green_led_off(p_slot);
297 p_slot->hpc_ops->green_led_off(p_slot);
298 287
299 return 0; 288 return 0;
300} 289}
@@ -322,18 +311,17 @@ static void pciehp_power_thread(struct work_struct *work)
322 case POWEROFF_STATE: 311 case POWEROFF_STATE:
323 mutex_unlock(&p_slot->lock); 312 mutex_unlock(&p_slot->lock);
324 ctrl_dbg(p_slot->ctrl, 313 ctrl_dbg(p_slot->ctrl,
325 "Disabling domain:bus:device=%04x:%02x:%02x\n", 314 "Disabling domain:bus:device=%04x:%02x:00\n",
326 pci_domain_nr(p_slot->ctrl->pci_dev->subordinate), 315 pci_domain_nr(p_slot->ctrl->pcie->port->subordinate),
327 p_slot->bus, p_slot->device); 316 p_slot->ctrl->pcie->port->subordinate->number);
328 pciehp_disable_slot(p_slot); 317 pciehp_disable_slot(p_slot);
329 mutex_lock(&p_slot->lock); 318 mutex_lock(&p_slot->lock);
330 p_slot->state = STATIC_STATE; 319 p_slot->state = STATIC_STATE;
331 break; 320 break;
332 case POWERON_STATE: 321 case POWERON_STATE:
333 mutex_unlock(&p_slot->lock); 322 mutex_unlock(&p_slot->lock);
334 if (pciehp_enable_slot(p_slot) && 323 if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl))
335 PWR_LED(p_slot->ctrl)) 324 pciehp_green_led_off(p_slot);
336 p_slot->hpc_ops->green_led_off(p_slot);
337 mutex_lock(&p_slot->lock); 325 mutex_lock(&p_slot->lock);
338 p_slot->state = STATIC_STATE; 326 p_slot->state = STATIC_STATE;
339 break; 327 break;
@@ -384,10 +372,10 @@ static int update_slot_info(struct slot *slot)
384 if (!info) 372 if (!info)
385 return -ENOMEM; 373 return -ENOMEM;
386 374
387 slot->hpc_ops->get_power_status(slot, &(info->power_status)); 375 pciehp_get_power_status(slot, &info->power_status);
388 slot->hpc_ops->get_attention_status(slot, &(info->attention_status)); 376 pciehp_get_attention_status(slot, &info->attention_status);
389 slot->hpc_ops->get_latch_status(slot, &(info->latch_status)); 377 pciehp_get_latch_status(slot, &info->latch_status);
390 slot->hpc_ops->get_adapter_status(slot, &(info->adapter_status)); 378 pciehp_get_adapter_status(slot, &info->adapter_status);
391 379
392 result = pci_hp_change_slot_info(slot->hotplug_slot, info); 380 result = pci_hp_change_slot_info(slot->hotplug_slot, info);
393 kfree (info); 381 kfree (info);
@@ -404,7 +392,7 @@ static void handle_button_press_event(struct slot *p_slot)
404 392
405 switch (p_slot->state) { 393 switch (p_slot->state) {
406 case STATIC_STATE: 394 case STATIC_STATE:
407 p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 395 pciehp_get_power_status(p_slot, &getstatus);
408 if (getstatus) { 396 if (getstatus) {
409 p_slot->state = BLINKINGOFF_STATE; 397 p_slot->state = BLINKINGOFF_STATE;
410 ctrl_info(ctrl, 398 ctrl_info(ctrl,
@@ -418,9 +406,9 @@ static void handle_button_press_event(struct slot *p_slot)
418 } 406 }
419 /* blink green LED and turn off amber */ 407 /* blink green LED and turn off amber */
420 if (PWR_LED(ctrl)) 408 if (PWR_LED(ctrl))
421 p_slot->hpc_ops->green_led_blink(p_slot); 409 pciehp_green_led_blink(p_slot);
422 if (ATTN_LED(ctrl)) 410 if (ATTN_LED(ctrl))
423 p_slot->hpc_ops->set_attention_status(p_slot, 0); 411 pciehp_set_attention_status(p_slot, 0);
424 412
425 schedule_delayed_work(&p_slot->work, 5*HZ); 413 schedule_delayed_work(&p_slot->work, 5*HZ);
426 break; 414 break;
@@ -435,13 +423,13 @@ static void handle_button_press_event(struct slot *p_slot)
435 cancel_delayed_work(&p_slot->work); 423 cancel_delayed_work(&p_slot->work);
436 if (p_slot->state == BLINKINGOFF_STATE) { 424 if (p_slot->state == BLINKINGOFF_STATE) {
437 if (PWR_LED(ctrl)) 425 if (PWR_LED(ctrl))
438 p_slot->hpc_ops->green_led_on(p_slot); 426 pciehp_green_led_on(p_slot);
439 } else { 427 } else {
440 if (PWR_LED(ctrl)) 428 if (PWR_LED(ctrl))
441 p_slot->hpc_ops->green_led_off(p_slot); 429 pciehp_green_led_off(p_slot);
442 } 430 }
443 if (ATTN_LED(ctrl)) 431 if (ATTN_LED(ctrl))
444 p_slot->hpc_ops->set_attention_status(p_slot, 0); 432 pciehp_set_attention_status(p_slot, 0);
445 ctrl_info(ctrl, "PCI slot #%s - action canceled " 433 ctrl_info(ctrl, "PCI slot #%s - action canceled "
446 "due to button press\n", slot_name(p_slot)); 434 "due to button press\n", slot_name(p_slot));
447 p_slot->state = STATIC_STATE; 435 p_slot->state = STATIC_STATE;
@@ -479,7 +467,7 @@ static void handle_surprise_event(struct slot *p_slot)
479 info->p_slot = p_slot; 467 info->p_slot = p_slot;
480 INIT_WORK(&info->work, pciehp_power_thread); 468 INIT_WORK(&info->work, pciehp_power_thread);
481 469
482 p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 470 pciehp_get_adapter_status(p_slot, &getstatus);
483 if (!getstatus) 471 if (!getstatus)
484 p_slot->state = POWEROFF_STATE; 472 p_slot->state = POWEROFF_STATE;
485 else 473 else
@@ -503,9 +491,9 @@ static void interrupt_event_handler(struct work_struct *work)
503 if (!POWER_CTRL(ctrl)) 491 if (!POWER_CTRL(ctrl))
504 break; 492 break;
505 if (ATTN_LED(ctrl)) 493 if (ATTN_LED(ctrl))
506 p_slot->hpc_ops->set_attention_status(p_slot, 1); 494 pciehp_set_attention_status(p_slot, 1);
507 if (PWR_LED(ctrl)) 495 if (PWR_LED(ctrl))
508 p_slot->hpc_ops->green_led_off(p_slot); 496 pciehp_green_led_off(p_slot);
509 break; 497 break;
510 case INT_PRESENCE_ON: 498 case INT_PRESENCE_ON:
511 case INT_PRESENCE_OFF: 499 case INT_PRESENCE_OFF:
@@ -530,45 +518,38 @@ int pciehp_enable_slot(struct slot *p_slot)
530 int rc; 518 int rc;
531 struct controller *ctrl = p_slot->ctrl; 519 struct controller *ctrl = p_slot->ctrl;
532 520
533 /* Check to see if (latch closed, card present, power off) */ 521 rc = pciehp_get_adapter_status(p_slot, &getstatus);
534 mutex_lock(&p_slot->ctrl->crit_sect);
535
536 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
537 if (rc || !getstatus) { 522 if (rc || !getstatus) {
538 ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); 523 ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
539 mutex_unlock(&p_slot->ctrl->crit_sect);
540 return -ENODEV; 524 return -ENODEV;
541 } 525 }
542 if (MRL_SENS(p_slot->ctrl)) { 526 if (MRL_SENS(p_slot->ctrl)) {
543 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 527 rc = pciehp_get_latch_status(p_slot, &getstatus);
544 if (rc || getstatus) { 528 if (rc || getstatus) {
545 ctrl_info(ctrl, "Latch open on slot(%s)\n", 529 ctrl_info(ctrl, "Latch open on slot(%s)\n",
546 slot_name(p_slot)); 530 slot_name(p_slot));
547 mutex_unlock(&p_slot->ctrl->crit_sect);
548 return -ENODEV; 531 return -ENODEV;
549 } 532 }
550 } 533 }
551 534
552 if (POWER_CTRL(p_slot->ctrl)) { 535 if (POWER_CTRL(p_slot->ctrl)) {
553 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 536 rc = pciehp_get_power_status(p_slot, &getstatus);
554 if (rc || getstatus) { 537 if (rc || getstatus) {
555 ctrl_info(ctrl, "Already enabled on slot(%s)\n", 538 ctrl_info(ctrl, "Already enabled on slot(%s)\n",
556 slot_name(p_slot)); 539 slot_name(p_slot));
557 mutex_unlock(&p_slot->ctrl->crit_sect);
558 return -EINVAL; 540 return -EINVAL;
559 } 541 }
560 } 542 }
561 543
562 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 544 pciehp_get_latch_status(p_slot, &getstatus);
563 545
564 rc = board_added(p_slot); 546 rc = board_added(p_slot);
565 if (rc) { 547 if (rc) {
566 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 548 pciehp_get_latch_status(p_slot, &getstatus);
567 } 549 }
568 550
569 update_slot_info(p_slot); 551 update_slot_info(p_slot);
570 552
571 mutex_unlock(&p_slot->ctrl->crit_sect);
572 return rc; 553 return rc;
573} 554}
574 555
@@ -582,35 +563,29 @@ int pciehp_disable_slot(struct slot *p_slot)
582 if (!p_slot->ctrl) 563 if (!p_slot->ctrl)
583 return 1; 564 return 1;
584 565
585 /* Check to see if (latch closed, card present, power on) */
586 mutex_lock(&p_slot->ctrl->crit_sect);
587
588 if (!HP_SUPR_RM(p_slot->ctrl)) { 566 if (!HP_SUPR_RM(p_slot->ctrl)) {
589 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 567 ret = pciehp_get_adapter_status(p_slot, &getstatus);
590 if (ret || !getstatus) { 568 if (ret || !getstatus) {
591 ctrl_info(ctrl, "No adapter on slot(%s)\n", 569 ctrl_info(ctrl, "No adapter on slot(%s)\n",
592 slot_name(p_slot)); 570 slot_name(p_slot));
593 mutex_unlock(&p_slot->ctrl->crit_sect);
594 return -ENODEV; 571 return -ENODEV;
595 } 572 }
596 } 573 }
597 574
598 if (MRL_SENS(p_slot->ctrl)) { 575 if (MRL_SENS(p_slot->ctrl)) {
599 ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 576 ret = pciehp_get_latch_status(p_slot, &getstatus);
600 if (ret || getstatus) { 577 if (ret || getstatus) {
601 ctrl_info(ctrl, "Latch open on slot(%s)\n", 578 ctrl_info(ctrl, "Latch open on slot(%s)\n",
602 slot_name(p_slot)); 579 slot_name(p_slot));
603 mutex_unlock(&p_slot->ctrl->crit_sect);
604 return -ENODEV; 580 return -ENODEV;
605 } 581 }
606 } 582 }
607 583
608 if (POWER_CTRL(p_slot->ctrl)) { 584 if (POWER_CTRL(p_slot->ctrl)) {
609 ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 585 ret = pciehp_get_power_status(p_slot, &getstatus);
610 if (ret || !getstatus) { 586 if (ret || !getstatus) {
611 ctrl_info(ctrl, "Already disabled on slot(%s)\n", 587 ctrl_info(ctrl, "Already disabled on slot(%s)\n",
612 slot_name(p_slot)); 588 slot_name(p_slot));
613 mutex_unlock(&p_slot->ctrl->crit_sect);
614 return -EINVAL; 589 return -EINVAL;
615 } 590 }
616 } 591 }
@@ -618,7 +593,6 @@ int pciehp_disable_slot(struct slot *p_slot)
618 ret = remove_board(p_slot); 593 ret = remove_board(p_slot);
619 update_slot_info(p_slot); 594 update_slot_info(p_slot);
620 595
621 mutex_unlock(&p_slot->ctrl->crit_sect);
622 return ret; 596 return ret;
623} 597}
624 598
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 52813257e5bf..9ef4605c1ef6 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -44,25 +44,25 @@ static atomic_t pciehp_num_controllers = ATOMIC_INIT(0);
44 44
45static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) 45static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value)
46{ 46{
47 struct pci_dev *dev = ctrl->pci_dev; 47 struct pci_dev *dev = ctrl->pcie->port;
48 return pci_read_config_word(dev, ctrl->cap_base + reg, value); 48 return pci_read_config_word(dev, ctrl->cap_base + reg, value);
49} 49}
50 50
51static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value) 51static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value)
52{ 52{
53 struct pci_dev *dev = ctrl->pci_dev; 53 struct pci_dev *dev = ctrl->pcie->port;
54 return pci_read_config_dword(dev, ctrl->cap_base + reg, value); 54 return pci_read_config_dword(dev, ctrl->cap_base + reg, value);
55} 55}
56 56
57static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value) 57static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value)
58{ 58{
59 struct pci_dev *dev = ctrl->pci_dev; 59 struct pci_dev *dev = ctrl->pcie->port;
60 return pci_write_config_word(dev, ctrl->cap_base + reg, value); 60 return pci_write_config_word(dev, ctrl->cap_base + reg, value);
61} 61}
62 62
63static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value) 63static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
64{ 64{
65 struct pci_dev *dev = ctrl->pci_dev; 65 struct pci_dev *dev = ctrl->pcie->port;
66 return pci_write_config_dword(dev, ctrl->cap_base + reg, value); 66 return pci_write_config_dword(dev, ctrl->cap_base + reg, value);
67} 67}
68 68
@@ -266,7 +266,7 @@ static void pcie_wait_link_active(struct controller *ctrl)
266 ctrl_dbg(ctrl, "Data Link Layer Link Active not set in 1000 msec\n"); 266 ctrl_dbg(ctrl, "Data Link Layer Link Active not set in 1000 msec\n");
267} 267}
268 268
269static int hpc_check_lnk_status(struct controller *ctrl) 269int pciehp_check_link_status(struct controller *ctrl)
270{ 270{
271 u16 lnk_status; 271 u16 lnk_status;
272 int retval = 0; 272 int retval = 0;
@@ -305,7 +305,7 @@ static int hpc_check_lnk_status(struct controller *ctrl)
305 return retval; 305 return retval;
306} 306}
307 307
308static int hpc_get_attention_status(struct slot *slot, u8 *status) 308int pciehp_get_attention_status(struct slot *slot, u8 *status)
309{ 309{
310 struct controller *ctrl = slot->ctrl; 310 struct controller *ctrl = slot->ctrl;
311 u16 slot_ctrl; 311 u16 slot_ctrl;
@@ -344,7 +344,7 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
344 return 0; 344 return 0;
345} 345}
346 346
347static int hpc_get_power_status(struct slot *slot, u8 *status) 347int pciehp_get_power_status(struct slot *slot, u8 *status)
348{ 348{
349 struct controller *ctrl = slot->ctrl; 349 struct controller *ctrl = slot->ctrl;
350 u16 slot_ctrl; 350 u16 slot_ctrl;
@@ -376,7 +376,7 @@ static int hpc_get_power_status(struct slot *slot, u8 *status)
376 return retval; 376 return retval;
377} 377}
378 378
379static int hpc_get_latch_status(struct slot *slot, u8 *status) 379int pciehp_get_latch_status(struct slot *slot, u8 *status)
380{ 380{
381 struct controller *ctrl = slot->ctrl; 381 struct controller *ctrl = slot->ctrl;
382 u16 slot_status; 382 u16 slot_status;
@@ -392,7 +392,7 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status)
392 return 0; 392 return 0;
393} 393}
394 394
395static int hpc_get_adapter_status(struct slot *slot, u8 *status) 395int pciehp_get_adapter_status(struct slot *slot, u8 *status)
396{ 396{
397 struct controller *ctrl = slot->ctrl; 397 struct controller *ctrl = slot->ctrl;
398 u16 slot_status; 398 u16 slot_status;
@@ -408,7 +408,7 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status)
408 return 0; 408 return 0;
409} 409}
410 410
411static int hpc_query_power_fault(struct slot *slot) 411int pciehp_query_power_fault(struct slot *slot)
412{ 412{
413 struct controller *ctrl = slot->ctrl; 413 struct controller *ctrl = slot->ctrl;
414 u16 slot_status; 414 u16 slot_status;
@@ -422,7 +422,7 @@ static int hpc_query_power_fault(struct slot *slot)
422 return !!(slot_status & PCI_EXP_SLTSTA_PFD); 422 return !!(slot_status & PCI_EXP_SLTSTA_PFD);
423} 423}
424 424
425static int hpc_set_attention_status(struct slot *slot, u8 value) 425int pciehp_set_attention_status(struct slot *slot, u8 value)
426{ 426{
427 struct controller *ctrl = slot->ctrl; 427 struct controller *ctrl = slot->ctrl;
428 u16 slot_cmd; 428 u16 slot_cmd;
@@ -450,7 +450,7 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
450 return rc; 450 return rc;
451} 451}
452 452
453static void hpc_set_green_led_on(struct slot *slot) 453void pciehp_green_led_on(struct slot *slot)
454{ 454{
455 struct controller *ctrl = slot->ctrl; 455 struct controller *ctrl = slot->ctrl;
456 u16 slot_cmd; 456 u16 slot_cmd;
@@ -463,7 +463,7 @@ static void hpc_set_green_led_on(struct slot *slot)
463 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 463 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
464} 464}
465 465
466static void hpc_set_green_led_off(struct slot *slot) 466void pciehp_green_led_off(struct slot *slot)
467{ 467{
468 struct controller *ctrl = slot->ctrl; 468 struct controller *ctrl = slot->ctrl;
469 u16 slot_cmd; 469 u16 slot_cmd;
@@ -476,7 +476,7 @@ static void hpc_set_green_led_off(struct slot *slot)
476 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 476 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
477} 477}
478 478
479static void hpc_set_green_led_blink(struct slot *slot) 479void pciehp_green_led_blink(struct slot *slot)
480{ 480{
481 struct controller *ctrl = slot->ctrl; 481 struct controller *ctrl = slot->ctrl;
482 u16 slot_cmd; 482 u16 slot_cmd;
@@ -489,7 +489,7 @@ static void hpc_set_green_led_blink(struct slot *slot)
489 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 489 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
490} 490}
491 491
492static int hpc_power_on_slot(struct slot * slot) 492int pciehp_power_on_slot(struct slot * slot)
493{ 493{
494 struct controller *ctrl = slot->ctrl; 494 struct controller *ctrl = slot->ctrl;
495 u16 slot_cmd; 495 u16 slot_cmd;
@@ -497,8 +497,6 @@ static int hpc_power_on_slot(struct slot * slot)
497 u16 slot_status; 497 u16 slot_status;
498 int retval = 0; 498 int retval = 0;
499 499
500 ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot);
501
502 /* Clear sticky power-fault bit from previous power failures */ 500 /* Clear sticky power-fault bit from previous power failures */
503 retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 501 retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
504 if (retval) { 502 if (retval) {
@@ -539,7 +537,7 @@ static int hpc_power_on_slot(struct slot * slot)
539 537
540static inline int pcie_mask_bad_dllp(struct controller *ctrl) 538static inline int pcie_mask_bad_dllp(struct controller *ctrl)
541{ 539{
542 struct pci_dev *dev = ctrl->pci_dev; 540 struct pci_dev *dev = ctrl->pcie->port;
543 int pos; 541 int pos;
544 u32 reg; 542 u32 reg;
545 543
@@ -556,7 +554,7 @@ static inline int pcie_mask_bad_dllp(struct controller *ctrl)
556 554
557static inline void pcie_unmask_bad_dllp(struct controller *ctrl) 555static inline void pcie_unmask_bad_dllp(struct controller *ctrl)
558{ 556{
559 struct pci_dev *dev = ctrl->pci_dev; 557 struct pci_dev *dev = ctrl->pcie->port;
560 u32 reg; 558 u32 reg;
561 int pos; 559 int pos;
562 560
@@ -570,7 +568,7 @@ static inline void pcie_unmask_bad_dllp(struct controller *ctrl)
570 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg); 568 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg);
571} 569}
572 570
573static int hpc_power_off_slot(struct slot * slot) 571int pciehp_power_off_slot(struct slot * slot)
574{ 572{
575 struct controller *ctrl = slot->ctrl; 573 struct controller *ctrl = slot->ctrl;
576 u16 slot_cmd; 574 u16 slot_cmd;
@@ -578,8 +576,6 @@ static int hpc_power_off_slot(struct slot * slot)
578 int retval = 0; 576 int retval = 0;
579 int changed; 577 int changed;
580 578
581 ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot);
582
583 /* 579 /*
584 * Set Bad DLLP Mask bit in Correctable Error Mask 580 * Set Bad DLLP Mask bit in Correctable Error Mask
585 * Register. This is the workaround against Bad DLLP error 581 * Register. This is the workaround against Bad DLLP error
@@ -614,8 +610,8 @@ static int hpc_power_off_slot(struct slot * slot)
614static irqreturn_t pcie_isr(int irq, void *dev_id) 610static irqreturn_t pcie_isr(int irq, void *dev_id)
615{ 611{
616 struct controller *ctrl = (struct controller *)dev_id; 612 struct controller *ctrl = (struct controller *)dev_id;
613 struct slot *slot = ctrl->slot;
617 u16 detected, intr_loc; 614 u16 detected, intr_loc;
618 struct slot *p_slot;
619 615
620 /* 616 /*
621 * In order to guarantee that all interrupt events are 617 * In order to guarantee that all interrupt events are
@@ -656,29 +652,27 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
656 if (!(intr_loc & ~PCI_EXP_SLTSTA_CC)) 652 if (!(intr_loc & ~PCI_EXP_SLTSTA_CC))
657 return IRQ_HANDLED; 653 return IRQ_HANDLED;
658 654
659 p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
660
661 /* Check MRL Sensor Changed */ 655 /* Check MRL Sensor Changed */
662 if (intr_loc & PCI_EXP_SLTSTA_MRLSC) 656 if (intr_loc & PCI_EXP_SLTSTA_MRLSC)
663 pciehp_handle_switch_change(p_slot); 657 pciehp_handle_switch_change(slot);
664 658
665 /* Check Attention Button Pressed */ 659 /* Check Attention Button Pressed */
666 if (intr_loc & PCI_EXP_SLTSTA_ABP) 660 if (intr_loc & PCI_EXP_SLTSTA_ABP)
667 pciehp_handle_attention_button(p_slot); 661 pciehp_handle_attention_button(slot);
668 662
669 /* Check Presence Detect Changed */ 663 /* Check Presence Detect Changed */
670 if (intr_loc & PCI_EXP_SLTSTA_PDC) 664 if (intr_loc & PCI_EXP_SLTSTA_PDC)
671 pciehp_handle_presence_change(p_slot); 665 pciehp_handle_presence_change(slot);
672 666
673 /* Check Power Fault Detected */ 667 /* Check Power Fault Detected */
674 if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { 668 if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
675 ctrl->power_fault_detected = 1; 669 ctrl->power_fault_detected = 1;
676 pciehp_handle_power_fault(p_slot); 670 pciehp_handle_power_fault(slot);
677 } 671 }
678 return IRQ_HANDLED; 672 return IRQ_HANDLED;
679} 673}
680 674
681static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value) 675int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *value)
682{ 676{
683 struct controller *ctrl = slot->ctrl; 677 struct controller *ctrl = slot->ctrl;
684 enum pcie_link_speed lnk_speed; 678 enum pcie_link_speed lnk_speed;
@@ -693,7 +687,10 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
693 687
694 switch (lnk_cap & 0x000F) { 688 switch (lnk_cap & 0x000F) {
695 case 1: 689 case 1:
696 lnk_speed = PCIE_2PT5GB; 690 lnk_speed = PCIE_2_5GB;
691 break;
692 case 2:
693 lnk_speed = PCIE_5_0GB;
697 break; 694 break;
698 default: 695 default:
699 lnk_speed = PCIE_LNK_SPEED_UNKNOWN; 696 lnk_speed = PCIE_LNK_SPEED_UNKNOWN;
@@ -706,7 +703,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
706 return retval; 703 return retval;
707} 704}
708 705
709static int hpc_get_max_lnk_width(struct slot *slot, 706int pciehp_get_max_lnk_width(struct slot *slot,
710 enum pcie_link_width *value) 707 enum pcie_link_width *value)
711{ 708{
712 struct controller *ctrl = slot->ctrl; 709 struct controller *ctrl = slot->ctrl;
@@ -756,7 +753,7 @@ static int hpc_get_max_lnk_width(struct slot *slot,
756 return retval; 753 return retval;
757} 754}
758 755
759static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value) 756int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *value)
760{ 757{
761 struct controller *ctrl = slot->ctrl; 758 struct controller *ctrl = slot->ctrl;
762 enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN; 759 enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN;
@@ -772,7 +769,10 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
772 769
773 switch (lnk_status & PCI_EXP_LNKSTA_CLS) { 770 switch (lnk_status & PCI_EXP_LNKSTA_CLS) {
774 case 1: 771 case 1:
775 lnk_speed = PCIE_2PT5GB; 772 lnk_speed = PCIE_2_5GB;
773 break;
774 case 2:
775 lnk_speed = PCIE_5_0GB;
776 break; 776 break;
777 default: 777 default:
778 lnk_speed = PCIE_LNK_SPEED_UNKNOWN; 778 lnk_speed = PCIE_LNK_SPEED_UNKNOWN;
@@ -785,7 +785,7 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
785 return retval; 785 return retval;
786} 786}
787 787
788static int hpc_get_cur_lnk_width(struct slot *slot, 788int pciehp_get_cur_lnk_width(struct slot *slot,
789 enum pcie_link_width *value) 789 enum pcie_link_width *value)
790{ 790{
791 struct controller *ctrl = slot->ctrl; 791 struct controller *ctrl = slot->ctrl;
@@ -836,30 +836,6 @@ static int hpc_get_cur_lnk_width(struct slot *slot,
836 return retval; 836 return retval;
837} 837}
838 838
839static void pcie_release_ctrl(struct controller *ctrl);
840static struct hpc_ops pciehp_hpc_ops = {
841 .power_on_slot = hpc_power_on_slot,
842 .power_off_slot = hpc_power_off_slot,
843 .set_attention_status = hpc_set_attention_status,
844 .get_power_status = hpc_get_power_status,
845 .get_attention_status = hpc_get_attention_status,
846 .get_latch_status = hpc_get_latch_status,
847 .get_adapter_status = hpc_get_adapter_status,
848
849 .get_max_bus_speed = hpc_get_max_lnk_speed,
850 .get_cur_bus_speed = hpc_get_cur_lnk_speed,
851 .get_max_lnk_width = hpc_get_max_lnk_width,
852 .get_cur_lnk_width = hpc_get_cur_lnk_width,
853
854 .query_power_fault = hpc_query_power_fault,
855 .green_led_on = hpc_set_green_led_on,
856 .green_led_off = hpc_set_green_led_off,
857 .green_led_blink = hpc_set_green_led_blink,
858
859 .release_ctlr = pcie_release_ctrl,
860 .check_lnk_status = hpc_check_lnk_status,
861};
862
863int pcie_enable_notification(struct controller *ctrl) 839int pcie_enable_notification(struct controller *ctrl)
864{ 840{
865 u16 cmd, mask; 841 u16 cmd, mask;
@@ -924,23 +900,16 @@ static int pcie_init_slot(struct controller *ctrl)
924 if (!slot) 900 if (!slot)
925 return -ENOMEM; 901 return -ENOMEM;
926 902
927 slot->hp_slot = 0;
928 slot->ctrl = ctrl; 903 slot->ctrl = ctrl;
929 slot->bus = ctrl->pci_dev->subordinate->number;
930 slot->device = ctrl->slot_device_offset + slot->hp_slot;
931 slot->hpc_ops = ctrl->hpc_ops;
932 slot->number = ctrl->first_slot;
933 mutex_init(&slot->lock); 904 mutex_init(&slot->lock);
934 INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); 905 INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
935 list_add(&slot->slot_list, &ctrl->slot_list); 906 ctrl->slot = slot;
936 return 0; 907 return 0;
937} 908}
938 909
939static void pcie_cleanup_slot(struct controller *ctrl) 910static void pcie_cleanup_slot(struct controller *ctrl)
940{ 911{
941 struct slot *slot; 912 struct slot *slot = ctrl->slot;
942 slot = list_first_entry(&ctrl->slot_list, struct slot, slot_list);
943 list_del(&slot->slot_list);
944 cancel_delayed_work(&slot->work); 913 cancel_delayed_work(&slot->work);
945 flush_scheduled_work(); 914 flush_scheduled_work();
946 flush_workqueue(pciehp_wq); 915 flush_workqueue(pciehp_wq);
@@ -951,7 +920,7 @@ static inline void dbg_ctrl(struct controller *ctrl)
951{ 920{
952 int i; 921 int i;
953 u16 reg16; 922 u16 reg16;
954 struct pci_dev *pdev = ctrl->pci_dev; 923 struct pci_dev *pdev = ctrl->pcie->port;
955 924
956 if (!pciehp_debug) 925 if (!pciehp_debug)
957 return; 926 return;
@@ -974,7 +943,7 @@ static inline void dbg_ctrl(struct controller *ctrl)
974 (unsigned long long)pci_resource_start(pdev, i)); 943 (unsigned long long)pci_resource_start(pdev, i));
975 } 944 }
976 ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); 945 ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
977 ctrl_info(ctrl, " Physical Slot Number : %d\n", ctrl->first_slot); 946 ctrl_info(ctrl, " Physical Slot Number : %d\n", PSN(ctrl));
978 ctrl_info(ctrl, " Attention Button : %3s\n", 947 ctrl_info(ctrl, " Attention Button : %3s\n",
979 ATTN_BUTTN(ctrl) ? "yes" : "no"); 948 ATTN_BUTTN(ctrl) ? "yes" : "no");
980 ctrl_info(ctrl, " Power Controller : %3s\n", 949 ctrl_info(ctrl, " Power Controller : %3s\n",
@@ -1008,10 +977,7 @@ struct controller *pcie_init(struct pcie_device *dev)
1008 dev_err(&dev->device, "%s: Out of memory\n", __func__); 977 dev_err(&dev->device, "%s: Out of memory\n", __func__);
1009 goto abort; 978 goto abort;
1010 } 979 }
1011 INIT_LIST_HEAD(&ctrl->slot_list);
1012
1013 ctrl->pcie = dev; 980 ctrl->pcie = dev;
1014 ctrl->pci_dev = pdev;
1015 ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); 981 ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1016 if (!ctrl->cap_base) { 982 if (!ctrl->cap_base) {
1017 ctrl_err(ctrl, "Cannot find PCI Express capability\n"); 983 ctrl_err(ctrl, "Cannot find PCI Express capability\n");
@@ -1023,11 +989,6 @@ struct controller *pcie_init(struct pcie_device *dev)
1023 } 989 }
1024 990
1025 ctrl->slot_cap = slot_cap; 991 ctrl->slot_cap = slot_cap;
1026 ctrl->first_slot = slot_cap >> 19;
1027 ctrl->slot_device_offset = 0;
1028 ctrl->num_slots = 1;
1029 ctrl->hpc_ops = &pciehp_hpc_ops;
1030 mutex_init(&ctrl->crit_sect);
1031 mutex_init(&ctrl->ctrl_lock); 992 mutex_init(&ctrl->ctrl_lock);
1032 init_waitqueue_head(&ctrl->queue); 993 init_waitqueue_head(&ctrl->queue);
1033 dbg_ctrl(ctrl); 994 dbg_ctrl(ctrl);
@@ -1083,7 +1044,7 @@ abort:
1083 return NULL; 1044 return NULL;
1084} 1045}
1085 1046
1086void pcie_release_ctrl(struct controller *ctrl) 1047void pciehp_release_ctrl(struct controller *ctrl)
1087{ 1048{
1088 pcie_shutdown_notification(ctrl); 1049 pcie_shutdown_notification(ctrl);
1089 pcie_cleanup_slot(ctrl); 1050 pcie_cleanup_slot(ctrl);
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 10f9566cceeb..21733108adde 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -34,136 +34,6 @@
34#include "../pci.h" 34#include "../pci.h"
35#include "pciehp.h" 35#include "pciehp.h"
36 36
37static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
38{
39 u16 pci_cmd, pci_bctl;
40
41 if (hpp->revision > 1) {
42 warn("Rev.%d type0 record not supported\n", hpp->revision);
43 return;
44 }
45
46 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
47 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
48 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
49 if (hpp->enable_serr)
50 pci_cmd |= PCI_COMMAND_SERR;
51 else
52 pci_cmd &= ~PCI_COMMAND_SERR;
53 if (hpp->enable_perr)
54 pci_cmd |= PCI_COMMAND_PARITY;
55 else
56 pci_cmd &= ~PCI_COMMAND_PARITY;
57 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
58
59 /* Program bridge control value */
60 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
61 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
62 hpp->latency_timer);
63 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
64 if (hpp->enable_serr)
65 pci_bctl |= PCI_BRIDGE_CTL_SERR;
66 else
67 pci_bctl &= ~PCI_BRIDGE_CTL_SERR;
68 if (hpp->enable_perr)
69 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
70 else
71 pci_bctl &= ~PCI_BRIDGE_CTL_PARITY;
72 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
73 }
74}
75
76static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
77{
78 int pos;
79 u16 reg16;
80 u32 reg32;
81
82 if (hpp->revision > 1) {
83 warn("Rev.%d type2 record not supported\n", hpp->revision);
84 return;
85 }
86
87 /* Find PCI Express capability */
88 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
89 if (!pos)
90 return;
91
92 /* Initialize Device Control Register */
93 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
94 reg16 = (reg16 & hpp->pci_exp_devctl_and) | hpp->pci_exp_devctl_or;
95 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
96
97 /* Initialize Link Control Register */
98 if (dev->subordinate) {
99 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &reg16);
100 reg16 = (reg16 & hpp->pci_exp_lnkctl_and)
101 | hpp->pci_exp_lnkctl_or;
102 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, reg16);
103 }
104
105 /* Find Advanced Error Reporting Enhanced Capability */
106 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
107 if (!pos)
108 return;
109
110 /* Initialize Uncorrectable Error Mask Register */
111 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
112 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
113 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
114
115 /* Initialize Uncorrectable Error Severity Register */
116 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
117 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
118 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
119
120 /* Initialize Correctable Error Mask Register */
121 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
122 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
123 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
124
125 /* Initialize Advanced Error Capabilities and Control Register */
126 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
127 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
128 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
129
130 /*
131 * FIXME: The following two registers are not supported yet.
132 *
133 * o Secondary Uncorrectable Error Severity Register
134 * o Secondary Uncorrectable Error Mask Register
135 */
136}
137
138static void program_fw_provided_values(struct pci_dev *dev)
139{
140 struct pci_dev *cdev;
141 struct hotplug_params hpp;
142
143 /* Program hpp values for this device */
144 if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
145 (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
146 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
147 return;
148
149 if (pciehp_get_hp_params_from_firmware(dev, &hpp)) {
150 warn("Could not get hotplug parameters\n");
151 return;
152 }
153
154 if (hpp.t2)
155 program_hpp_type2(dev, hpp.t2);
156 if (hpp.t0)
157 program_hpp_type0(dev, hpp.t0);
158
159 /* Program child devices */
160 if (dev->subordinate) {
161 list_for_each_entry(cdev, &dev->subordinate->devices,
162 bus_list)
163 program_fw_provided_values(cdev);
164 }
165}
166
167static int __ref pciehp_add_bridge(struct pci_dev *dev) 37static int __ref pciehp_add_bridge(struct pci_dev *dev)
168{ 38{
169 struct pci_bus *parent = dev->bus; 39 struct pci_bus *parent = dev->bus;
@@ -193,27 +63,27 @@ static int __ref pciehp_add_bridge(struct pci_dev *dev)
193int pciehp_configure_device(struct slot *p_slot) 63int pciehp_configure_device(struct slot *p_slot)
194{ 64{
195 struct pci_dev *dev; 65 struct pci_dev *dev;
196 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; 66 struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
197 int num, fn; 67 int num, fn;
198 struct controller *ctrl = p_slot->ctrl; 68 struct controller *ctrl = p_slot->ctrl;
199 69
200 dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0)); 70 dev = pci_get_slot(parent, PCI_DEVFN(0, 0));
201 if (dev) { 71 if (dev) {
202 ctrl_err(ctrl, "Device %s already exists " 72 ctrl_err(ctrl, "Device %s already exists "
203 "at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev), 73 "at %04x:%02x:00, cannot hot-add\n", pci_name(dev),
204 pci_domain_nr(parent), p_slot->bus, p_slot->device); 74 pci_domain_nr(parent), parent->number);
205 pci_dev_put(dev); 75 pci_dev_put(dev);
206 return -EINVAL; 76 return -EINVAL;
207 } 77 }
208 78
209 num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0)); 79 num = pci_scan_slot(parent, PCI_DEVFN(0, 0));
210 if (num == 0) { 80 if (num == 0) {
211 ctrl_err(ctrl, "No new device found\n"); 81 ctrl_err(ctrl, "No new device found\n");
212 return -ENODEV; 82 return -ENODEV;
213 } 83 }
214 84
215 for (fn = 0; fn < 8; fn++) { 85 for (fn = 0; fn < 8; fn++) {
216 dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, fn)); 86 dev = pci_get_slot(parent, PCI_DEVFN(0, fn));
217 if (!dev) 87 if (!dev)
218 continue; 88 continue;
219 if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { 89 if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
@@ -226,7 +96,7 @@ int pciehp_configure_device(struct slot *p_slot)
226 (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { 96 (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
227 pciehp_add_bridge(dev); 97 pciehp_add_bridge(dev);
228 } 98 }
229 program_fw_provided_values(dev); 99 pci_configure_slot(dev);
230 pci_dev_put(dev); 100 pci_dev_put(dev);
231 } 101 }
232 102
@@ -241,19 +111,18 @@ int pciehp_unconfigure_device(struct slot *p_slot)
241 int j; 111 int j;
242 u8 bctl = 0; 112 u8 bctl = 0;
243 u8 presence = 0; 113 u8 presence = 0;
244 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; 114 struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
245 u16 command; 115 u16 command;
246 struct controller *ctrl = p_slot->ctrl; 116 struct controller *ctrl = p_slot->ctrl;
247 117
248 ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n", 118 ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n",
249 __func__, pci_domain_nr(parent), p_slot->bus, p_slot->device); 119 __func__, pci_domain_nr(parent), parent->number);
250 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &presence); 120 ret = pciehp_get_adapter_status(p_slot, &presence);
251 if (ret) 121 if (ret)
252 presence = 0; 122 presence = 0;
253 123
254 for (j = 0; j < 8; j++) { 124 for (j = 0; j < 8; j++) {
255 struct pci_dev* temp = pci_get_slot(parent, 125 struct pci_dev* temp = pci_get_slot(parent, PCI_DEVFN(0, j));
256 (p_slot->device << 3) | j);
257 if (!temp) 126 if (!temp)
258 continue; 127 continue;
259 if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) { 128 if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
@@ -285,11 +154,6 @@ int pciehp_unconfigure_device(struct slot *p_slot)
285 } 154 }
286 pci_dev_put(temp); 155 pci_dev_put(temp);
287 } 156 }
288 /*
289 * Some PCI Express root ports require fixup after hot-plug operation.
290 */
291 if (pcie_mch_quirk)
292 pci_fixup_device(pci_fixup_final, p_slot->ctrl->pci_dev);
293 157
294 return rc; 158 return rc;
295} 159}
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
new file mode 100644
index 000000000000..cc8ec3aa41a7
--- /dev/null
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -0,0 +1,187 @@
1/*
2 * Copyright (C) 1995,2001 Compaq Computer Corporation
3 * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
4 * Copyright (C) 2001 IBM Corp.
5 * Copyright (C) 2003-2004 Intel Corporation
6 * (c) Copyright 2009 Hewlett-Packard Development Company, L.P.
7 *
8 * All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
19 * details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/pci.h>
27#include <linux/pci_hotplug.h>
28
29static struct hpp_type0 pci_default_type0 = {
30 .revision = 1,
31 .cache_line_size = 8,
32 .latency_timer = 0x40,
33 .enable_serr = 0,
34 .enable_perr = 0,
35};
36
37static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
38{
39 u16 pci_cmd, pci_bctl;
40
41 if (!hpp) {
42 /*
43 * Perhaps we *should* use default settings for PCIe, but
44 * pciehp didn't, so we won't either.
45 */
46 if (dev->is_pcie)
47 return;
48 dev_info(&dev->dev, "using default PCI settings\n");
49 hpp = &pci_default_type0;
50 }
51
52 if (hpp->revision > 1) {
53 dev_warn(&dev->dev,
54 "PCI settings rev %d not supported; using defaults\n",
55 hpp->revision);
56 hpp = &pci_default_type0;
57 }
58
59 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
60 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
61 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
62 if (hpp->enable_serr)
63 pci_cmd |= PCI_COMMAND_SERR;
64 else
65 pci_cmd &= ~PCI_COMMAND_SERR;
66 if (hpp->enable_perr)
67 pci_cmd |= PCI_COMMAND_PARITY;
68 else
69 pci_cmd &= ~PCI_COMMAND_PARITY;
70 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
71
72 /* Program bridge control value */
73 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
74 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
75 hpp->latency_timer);
76 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
77 if (hpp->enable_serr)
78 pci_bctl |= PCI_BRIDGE_CTL_SERR;
79 else
80 pci_bctl &= ~PCI_BRIDGE_CTL_SERR;
81 if (hpp->enable_perr)
82 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
83 else
84 pci_bctl &= ~PCI_BRIDGE_CTL_PARITY;
85 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
86 }
87}
88
89static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
90{
91 if (hpp)
92 dev_warn(&dev->dev, "PCI-X settings not supported\n");
93}
94
95static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
96{
97 int pos;
98 u16 reg16;
99 u32 reg32;
100
101 if (!hpp)
102 return;
103
104 /* Find PCI Express capability */
105 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
106 if (!pos)
107 return;
108
109 if (hpp->revision > 1) {
110 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
111 hpp->revision);
112 return;
113 }
114
115 /* Initialize Device Control Register */
116 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
117 reg16 = (reg16 & hpp->pci_exp_devctl_and) | hpp->pci_exp_devctl_or;
118 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
119
120 /* Initialize Link Control Register */
121 if (dev->subordinate) {
122 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &reg16);
123 reg16 = (reg16 & hpp->pci_exp_lnkctl_and)
124 | hpp->pci_exp_lnkctl_or;
125 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, reg16);
126 }
127
128 /* Find Advanced Error Reporting Enhanced Capability */
129 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
130 if (!pos)
131 return;
132
133 /* Initialize Uncorrectable Error Mask Register */
134 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
135 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
136 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
137
138 /* Initialize Uncorrectable Error Severity Register */
139 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
140 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
141 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
142
143 /* Initialize Correctable Error Mask Register */
144 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
145 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
146 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
147
148 /* Initialize Advanced Error Capabilities and Control Register */
149 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
150 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
151 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
152
153 /*
154 * FIXME: The following two registers are not supported yet.
155 *
156 * o Secondary Uncorrectable Error Severity Register
157 * o Secondary Uncorrectable Error Mask Register
158 */
159}
160
161void pci_configure_slot(struct pci_dev *dev)
162{
163 struct pci_dev *cdev;
164 struct hotplug_params hpp;
165 int ret;
166
167 if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
168 (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
169 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
170 return;
171
172 memset(&hpp, 0, sizeof(hpp));
173 ret = pci_get_hp_params(dev, &hpp);
174 if (ret)
175 dev_warn(&dev->dev, "no hotplug settings from platform\n");
176
177 program_hpp_type2(dev, hpp.t2);
178 program_hpp_type1(dev, hpp.t1);
179 program_hpp_type0(dev, hpp.t0);
180
181 if (dev->subordinate) {
182 list_for_each_entry(cdev, &dev->subordinate->devices,
183 bus_list)
184 pci_configure_slot(cdev);
185 }
186}
187EXPORT_SYMBOL_GPL(pci_configure_slot);
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 974e924ca96d..bd588eb8e922 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -188,21 +188,12 @@ static inline const char *slot_name(struct slot *slot)
188 188
189#ifdef CONFIG_ACPI 189#ifdef CONFIG_ACPI
190#include <linux/pci-acpi.h> 190#include <linux/pci-acpi.h>
191static inline int get_hp_params_from_firmware(struct pci_dev *dev,
192 struct hotplug_params *hpp)
193{
194 if (ACPI_FAILURE(acpi_get_hp_params_from_firmware(dev->bus, hpp)))
195 return -ENODEV;
196 return 0;
197}
198
199static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev) 191static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
200{ 192{
201 u32 flags = OSC_SHPC_NATIVE_HP_CONTROL; 193 u32 flags = OSC_SHPC_NATIVE_HP_CONTROL;
202 return acpi_get_hp_hw_control_from_firmware(dev, flags); 194 return acpi_get_hp_hw_control_from_firmware(dev, flags);
203} 195}
204#else 196#else
205#define get_hp_params_from_firmware(dev, hpp) (-ENODEV)
206#define get_hp_hw_control_from_firmware(dev) (0) 197#define get_hp_hw_control_from_firmware(dev) (0)
207#endif 198#endif
208 199
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index aa315e52529b..8c3d3219f227 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -34,66 +34,6 @@
34#include "../pci.h" 34#include "../pci.h"
35#include "shpchp.h" 35#include "shpchp.h"
36 36
37static void program_fw_provided_values(struct pci_dev *dev)
38{
39 u16 pci_cmd, pci_bctl;
40 struct pci_dev *cdev;
41 struct hotplug_params hpp;
42
43 /* Program hpp values for this device */
44 if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
45 (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
46 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
47 return;
48
49 /* use default values if we can't get them from firmware */
50 if (get_hp_params_from_firmware(dev, &hpp) ||
51 !hpp.t0 || (hpp.t0->revision > 1)) {
52 warn("Could not get hotplug parameters. Use defaults\n");
53 hpp.t0 = &hpp.type0_data;
54 hpp.t0->revision = 0;
55 hpp.t0->cache_line_size = 8;
56 hpp.t0->latency_timer = 0x40;
57 hpp.t0->enable_serr = 0;
58 hpp.t0->enable_perr = 0;
59 }
60
61 pci_write_config_byte(dev,
62 PCI_CACHE_LINE_SIZE, hpp.t0->cache_line_size);
63 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp.t0->latency_timer);
64 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
65 if (hpp.t0->enable_serr)
66 pci_cmd |= PCI_COMMAND_SERR;
67 else
68 pci_cmd &= ~PCI_COMMAND_SERR;
69 if (hpp.t0->enable_perr)
70 pci_cmd |= PCI_COMMAND_PARITY;
71 else
72 pci_cmd &= ~PCI_COMMAND_PARITY;
73 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
74
75 /* Program bridge control value and child devices */
76 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
77 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
78 hpp.t0->latency_timer);
79 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
80 if (hpp.t0->enable_serr)
81 pci_bctl |= PCI_BRIDGE_CTL_SERR;
82 else
83 pci_bctl &= ~PCI_BRIDGE_CTL_SERR;
84 if (hpp.t0->enable_perr)
85 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
86 else
87 pci_bctl &= ~PCI_BRIDGE_CTL_PARITY;
88 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
89 if (dev->subordinate) {
90 list_for_each_entry(cdev, &dev->subordinate->devices,
91 bus_list)
92 program_fw_provided_values(cdev);
93 }
94 }
95}
96
97int __ref shpchp_configure_device(struct slot *p_slot) 37int __ref shpchp_configure_device(struct slot *p_slot)
98{ 38{
99 struct pci_dev *dev; 39 struct pci_dev *dev;
@@ -153,7 +93,7 @@ int __ref shpchp_configure_device(struct slot *p_slot)
153 child->subordinate = pci_do_scan_bus(child); 93 child->subordinate = pci_do_scan_bus(child);
154 pci_bus_size_bridges(child); 94 pci_bus_size_bridges(child);
155 } 95 }
156 program_fw_provided_values(dev); 96 pci_configure_slot(dev);
157 pci_dev_put(dev); 97 pci_dev_put(dev);
158 } 98 }
159 99
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 2314ad7ee5fe..855dd7ca47f3 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -37,6 +37,8 @@
37#include <linux/iommu.h> 37#include <linux/iommu.h>
38#include <linux/intel-iommu.h> 38#include <linux/intel-iommu.h>
39#include <linux/sysdev.h> 39#include <linux/sysdev.h>
40#include <linux/tboot.h>
41#include <linux/dmi.h>
40#include <asm/cacheflush.h> 42#include <asm/cacheflush.h>
41#include <asm/iommu.h> 43#include <asm/iommu.h>
42#include "pci.h" 44#include "pci.h"
@@ -55,8 +57,14 @@
55 57
56#define MAX_AGAW_WIDTH 64 58#define MAX_AGAW_WIDTH 64
57 59
58#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) 60#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
59#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1) 61#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
62
63/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
64 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
65#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
66 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
67#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
60 68
61#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) 69#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
62#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) 70#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
@@ -251,7 +259,8 @@ static inline int first_pte_in_page(struct dma_pte *pte)
251 * 2. It maps to each iommu if successful. 259 * 2. It maps to each iommu if successful.
252 * 3. Each iommu mapps to this domain if successful. 260 * 3. Each iommu mapps to this domain if successful.
253 */ 261 */
254struct dmar_domain *si_domain; 262static struct dmar_domain *si_domain;
263static int hw_pass_through = 1;
255 264
256/* devices under the same p2p bridge are owned in one domain */ 265/* devices under the same p2p bridge are owned in one domain */
257#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) 266#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
@@ -727,7 +736,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
727 return NULL; 736 return NULL;
728 737
729 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); 738 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
730 pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; 739 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
731 if (cmpxchg64(&pte->val, 0ULL, pteval)) { 740 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
732 /* Someone else set it while we were thinking; use theirs. */ 741 /* Someone else set it while we were thinking; use theirs. */
733 free_pgtable_page(tmp_page); 742 free_pgtable_page(tmp_page);
@@ -777,9 +786,10 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
777 786
778 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 787 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
779 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); 788 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
789 BUG_ON(start_pfn > last_pfn);
780 790
781 /* we don't need lock here; nobody else touches the iova range */ 791 /* we don't need lock here; nobody else touches the iova range */
782 while (start_pfn <= last_pfn) { 792 do {
783 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1); 793 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
784 if (!pte) { 794 if (!pte) {
785 start_pfn = align_to_level(start_pfn + 1, 2); 795 start_pfn = align_to_level(start_pfn + 1, 2);
@@ -793,7 +803,8 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
793 803
794 domain_flush_cache(domain, first_pte, 804 domain_flush_cache(domain, first_pte,
795 (void *)pte - (void *)first_pte); 805 (void *)pte - (void *)first_pte);
796 } 806
807 } while (start_pfn && start_pfn <= last_pfn);
797} 808}
798 809
799/* free page table pages. last level pte should already be cleared */ 810/* free page table pages. last level pte should already be cleared */
@@ -809,6 +820,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
809 820
810 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 821 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
811 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); 822 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
823 BUG_ON(start_pfn > last_pfn);
812 824
813 /* We don't need lock here; nobody else touches the iova range */ 825 /* We don't need lock here; nobody else touches the iova range */
814 level = 2; 826 level = 2;
@@ -819,7 +831,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
819 if (tmp + level_size(level) - 1 > last_pfn) 831 if (tmp + level_size(level) - 1 > last_pfn)
820 return; 832 return;
821 833
822 while (tmp + level_size(level) - 1 <= last_pfn) { 834 do {
823 first_pte = pte = dma_pfn_level_pte(domain, tmp, level); 835 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
824 if (!pte) { 836 if (!pte) {
825 tmp = align_to_level(tmp + 1, level + 1); 837 tmp = align_to_level(tmp + 1, level + 1);
@@ -838,7 +850,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
838 domain_flush_cache(domain, first_pte, 850 domain_flush_cache(domain, first_pte,
839 (void *)pte - (void *)first_pte); 851 (void *)pte - (void *)first_pte);
840 852
841 } 853 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
842 level++; 854 level++;
843 } 855 }
844 /* free pgd */ 856 /* free pgd */
@@ -1157,6 +1169,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
1157 pr_debug("Number of Domains supportd <%ld>\n", ndomains); 1169 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1158 nlongs = BITS_TO_LONGS(ndomains); 1170 nlongs = BITS_TO_LONGS(ndomains);
1159 1171
1172 spin_lock_init(&iommu->lock);
1173
1160 /* TBD: there might be 64K domains, 1174 /* TBD: there might be 64K domains,
1161 * consider other allocation for future chip 1175 * consider other allocation for future chip
1162 */ 1176 */
@@ -1169,12 +1183,9 @@ static int iommu_init_domains(struct intel_iommu *iommu)
1169 GFP_KERNEL); 1183 GFP_KERNEL);
1170 if (!iommu->domains) { 1184 if (!iommu->domains) {
1171 printk(KERN_ERR "Allocating domain array failed\n"); 1185 printk(KERN_ERR "Allocating domain array failed\n");
1172 kfree(iommu->domain_ids);
1173 return -ENOMEM; 1186 return -ENOMEM;
1174 } 1187 }
1175 1188
1176 spin_lock_init(&iommu->lock);
1177
1178 /* 1189 /*
1179 * if Caching mode is set, then invalid translations are tagged 1190 * if Caching mode is set, then invalid translations are tagged
1180 * with domainid 0. Hence we need to pre-allocate it. 1191 * with domainid 0. Hence we need to pre-allocate it.
@@ -1194,22 +1205,24 @@ void free_dmar_iommu(struct intel_iommu *iommu)
1194 int i; 1205 int i;
1195 unsigned long flags; 1206 unsigned long flags;
1196 1207
1197 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); 1208 if ((iommu->domains) && (iommu->domain_ids)) {
1198 for (; i < cap_ndoms(iommu->cap); ) { 1209 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1199 domain = iommu->domains[i]; 1210 for (; i < cap_ndoms(iommu->cap); ) {
1200 clear_bit(i, iommu->domain_ids); 1211 domain = iommu->domains[i];
1212 clear_bit(i, iommu->domain_ids);
1213
1214 spin_lock_irqsave(&domain->iommu_lock, flags);
1215 if (--domain->iommu_count == 0) {
1216 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1217 vm_domain_exit(domain);
1218 else
1219 domain_exit(domain);
1220 }
1221 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1201 1222
1202 spin_lock_irqsave(&domain->iommu_lock, flags); 1223 i = find_next_bit(iommu->domain_ids,
1203 if (--domain->iommu_count == 0) { 1224 cap_ndoms(iommu->cap), i+1);
1204 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1205 vm_domain_exit(domain);
1206 else
1207 domain_exit(domain);
1208 } 1225 }
1209 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1210
1211 i = find_next_bit(iommu->domain_ids,
1212 cap_ndoms(iommu->cap), i+1);
1213 } 1226 }
1214 1227
1215 if (iommu->gcmd & DMA_GCMD_TE) 1228 if (iommu->gcmd & DMA_GCMD_TE)
@@ -1309,7 +1322,6 @@ static void iommu_detach_domain(struct dmar_domain *domain,
1309} 1322}
1310 1323
1311static struct iova_domain reserved_iova_list; 1324static struct iova_domain reserved_iova_list;
1312static struct lock_class_key reserved_alloc_key;
1313static struct lock_class_key reserved_rbtree_key; 1325static struct lock_class_key reserved_rbtree_key;
1314 1326
1315static void dmar_init_reserved_ranges(void) 1327static void dmar_init_reserved_ranges(void)
@@ -1320,8 +1332,6 @@ static void dmar_init_reserved_ranges(void)
1320 1332
1321 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); 1333 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1322 1334
1323 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1324 &reserved_alloc_key);
1325 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, 1335 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1326 &reserved_rbtree_key); 1336 &reserved_rbtree_key);
1327 1337
@@ -1958,14 +1968,35 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
1958 struct dmar_domain *domain; 1968 struct dmar_domain *domain;
1959 int ret; 1969 int ret;
1960 1970
1961 printk(KERN_INFO
1962 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1963 pci_name(pdev), start, end);
1964
1965 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); 1971 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1966 if (!domain) 1972 if (!domain)
1967 return -ENOMEM; 1973 return -ENOMEM;
1968 1974
1975 /* For _hardware_ passthrough, don't bother. But for software
1976 passthrough, we do it anyway -- it may indicate a memory
1977 range which is reserved in E820, so which didn't get set
1978 up to start with in si_domain */
1979 if (domain == si_domain && hw_pass_through) {
1980 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
1981 pci_name(pdev), start, end);
1982 return 0;
1983 }
1984
1985 printk(KERN_INFO
1986 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1987 pci_name(pdev), start, end);
1988
1989 if (end >> agaw_to_width(domain->agaw)) {
1990 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
1991 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
1992 agaw_to_width(domain->agaw),
1993 dmi_get_system_info(DMI_BIOS_VENDOR),
1994 dmi_get_system_info(DMI_BIOS_VERSION),
1995 dmi_get_system_info(DMI_PRODUCT_VERSION));
1996 ret = -EIO;
1997 goto error;
1998 }
1999
1969 ret = iommu_domain_identity_map(domain, start, end); 2000 ret = iommu_domain_identity_map(domain, start, end);
1970 if (ret) 2001 if (ret)
1971 goto error; 2002 goto error;
@@ -2016,23 +2047,6 @@ static inline void iommu_prepare_isa(void)
2016} 2047}
2017#endif /* !CONFIG_DMAR_FLPY_WA */ 2048#endif /* !CONFIG_DMAR_FLPY_WA */
2018 2049
2019/* Initialize each context entry as pass through.*/
2020static int __init init_context_pass_through(void)
2021{
2022 struct pci_dev *pdev = NULL;
2023 struct dmar_domain *domain;
2024 int ret;
2025
2026 for_each_pci_dev(pdev) {
2027 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2028 ret = domain_context_mapping(domain, pdev,
2029 CONTEXT_TT_PASS_THROUGH);
2030 if (ret)
2031 return ret;
2032 }
2033 return 0;
2034}
2035
2036static int md_domain_init(struct dmar_domain *domain, int guest_width); 2050static int md_domain_init(struct dmar_domain *domain, int guest_width);
2037 2051
2038static int __init si_domain_work_fn(unsigned long start_pfn, 2052static int __init si_domain_work_fn(unsigned long start_pfn,
@@ -2047,7 +2061,7 @@ static int __init si_domain_work_fn(unsigned long start_pfn,
2047 2061
2048} 2062}
2049 2063
2050static int si_domain_init(void) 2064static int __init si_domain_init(int hw)
2051{ 2065{
2052 struct dmar_drhd_unit *drhd; 2066 struct dmar_drhd_unit *drhd;
2053 struct intel_iommu *iommu; 2067 struct intel_iommu *iommu;
@@ -2074,6 +2088,9 @@ static int si_domain_init(void)
2074 2088
2075 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; 2089 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2076 2090
2091 if (hw)
2092 return 0;
2093
2077 for_each_online_node(nid) { 2094 for_each_online_node(nid) {
2078 work_with_active_regions(nid, si_domain_work_fn, &ret); 2095 work_with_active_regions(nid, si_domain_work_fn, &ret);
2079 if (ret) 2096 if (ret)
@@ -2100,15 +2117,23 @@ static int identity_mapping(struct pci_dev *pdev)
2100} 2117}
2101 2118
2102static int domain_add_dev_info(struct dmar_domain *domain, 2119static int domain_add_dev_info(struct dmar_domain *domain,
2103 struct pci_dev *pdev) 2120 struct pci_dev *pdev,
2121 int translation)
2104{ 2122{
2105 struct device_domain_info *info; 2123 struct device_domain_info *info;
2106 unsigned long flags; 2124 unsigned long flags;
2125 int ret;
2107 2126
2108 info = alloc_devinfo_mem(); 2127 info = alloc_devinfo_mem();
2109 if (!info) 2128 if (!info)
2110 return -ENOMEM; 2129 return -ENOMEM;
2111 2130
2131 ret = domain_context_mapping(domain, pdev, translation);
2132 if (ret) {
2133 free_devinfo_mem(info);
2134 return ret;
2135 }
2136
2112 info->segment = pci_domain_nr(pdev->bus); 2137 info->segment = pci_domain_nr(pdev->bus);
2113 info->bus = pdev->bus->number; 2138 info->bus = pdev->bus->number;
2114 info->devfn = pdev->devfn; 2139 info->devfn = pdev->devfn;
@@ -2165,27 +2190,25 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2165 return 1; 2190 return 1;
2166} 2191}
2167 2192
2168static int iommu_prepare_static_identity_mapping(void) 2193static int __init iommu_prepare_static_identity_mapping(int hw)
2169{ 2194{
2170 struct pci_dev *pdev = NULL; 2195 struct pci_dev *pdev = NULL;
2171 int ret; 2196 int ret;
2172 2197
2173 ret = si_domain_init(); 2198 ret = si_domain_init(hw);
2174 if (ret) 2199 if (ret)
2175 return -EFAULT; 2200 return -EFAULT;
2176 2201
2177 for_each_pci_dev(pdev) { 2202 for_each_pci_dev(pdev) {
2178 if (iommu_should_identity_map(pdev, 1)) { 2203 if (iommu_should_identity_map(pdev, 1)) {
2179 printk(KERN_INFO "IOMMU: identity mapping for device %s\n", 2204 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2180 pci_name(pdev)); 2205 hw ? "hardware" : "software", pci_name(pdev));
2181 2206
2182 ret = domain_context_mapping(si_domain, pdev, 2207 ret = domain_add_dev_info(si_domain, pdev,
2208 hw ? CONTEXT_TT_PASS_THROUGH :
2183 CONTEXT_TT_MULTI_LEVEL); 2209 CONTEXT_TT_MULTI_LEVEL);
2184 if (ret) 2210 if (ret)
2185 return ret; 2211 return ret;
2186 ret = domain_add_dev_info(si_domain, pdev);
2187 if (ret)
2188 return ret;
2189 } 2212 }
2190 } 2213 }
2191 2214
@@ -2199,14 +2222,6 @@ int __init init_dmars(void)
2199 struct pci_dev *pdev; 2222 struct pci_dev *pdev;
2200 struct intel_iommu *iommu; 2223 struct intel_iommu *iommu;
2201 int i, ret; 2224 int i, ret;
2202 int pass_through = 1;
2203
2204 /*
2205 * In case pass through can not be enabled, iommu tries to use identity
2206 * mapping.
2207 */
2208 if (iommu_pass_through)
2209 iommu_identity_mapping = 1;
2210 2225
2211 /* 2226 /*
2212 * for each drhd 2227 * for each drhd
@@ -2234,7 +2249,6 @@ int __init init_dmars(void)
2234 deferred_flush = kzalloc(g_num_of_iommus * 2249 deferred_flush = kzalloc(g_num_of_iommus *
2235 sizeof(struct deferred_flush_tables), GFP_KERNEL); 2250 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2236 if (!deferred_flush) { 2251 if (!deferred_flush) {
2237 kfree(g_iommus);
2238 ret = -ENOMEM; 2252 ret = -ENOMEM;
2239 goto error; 2253 goto error;
2240 } 2254 }
@@ -2261,14 +2275,8 @@ int __init init_dmars(void)
2261 goto error; 2275 goto error;
2262 } 2276 }
2263 if (!ecap_pass_through(iommu->ecap)) 2277 if (!ecap_pass_through(iommu->ecap))
2264 pass_through = 0; 2278 hw_pass_through = 0;
2265 } 2279 }
2266 if (iommu_pass_through)
2267 if (!pass_through) {
2268 printk(KERN_INFO
2269 "Pass Through is not supported by hardware.\n");
2270 iommu_pass_through = 0;
2271 }
2272 2280
2273 /* 2281 /*
2274 * Start from the sane iommu hardware state. 2282 * Start from the sane iommu hardware state.
@@ -2323,64 +2331,57 @@ int __init init_dmars(void)
2323 } 2331 }
2324 } 2332 }
2325 2333
2334 if (iommu_pass_through)
2335 iommu_identity_mapping = 1;
2336#ifdef CONFIG_DMAR_BROKEN_GFX_WA
2337 else
2338 iommu_identity_mapping = 2;
2339#endif
2326 /* 2340 /*
2327 * If pass through is set and enabled, context entries of all pci 2341 * If pass through is not set or not enabled, setup context entries for
2328 * devices are intialized by pass through translation type. 2342 * identity mappings for rmrr, gfx, and isa and may fall back to static
2343 * identity mapping if iommu_identity_mapping is set.
2329 */ 2344 */
2330 if (iommu_pass_through) { 2345 if (iommu_identity_mapping) {
2331 ret = init_context_pass_through(); 2346 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2332 if (ret) { 2347 if (ret) {
2333 printk(KERN_ERR "IOMMU: Pass through init failed.\n"); 2348 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2334 iommu_pass_through = 0; 2349 goto error;
2335 } 2350 }
2336 } 2351 }
2337
2338 /* 2352 /*
2339 * If pass through is not set or not enabled, setup context entries for 2353 * For each rmrr
2340 * identity mappings for rmrr, gfx, and isa and may fall back to static 2354 * for each dev attached to rmrr
2341 * identity mapping if iommu_identity_mapping is set. 2355 * do
2356 * locate drhd for dev, alloc domain for dev
2357 * allocate free domain
2358 * allocate page table entries for rmrr
2359 * if context not allocated for bus
2360 * allocate and init context
2361 * set present in root table for this bus
2362 * init context with domain, translation etc
2363 * endfor
2364 * endfor
2342 */ 2365 */
2343 if (!iommu_pass_through) { 2366 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2344#ifdef CONFIG_DMAR_BROKEN_GFX_WA 2367 for_each_rmrr_units(rmrr) {
2345 if (!iommu_identity_mapping) 2368 for (i = 0; i < rmrr->devices_cnt; i++) {
2346 iommu_identity_mapping = 2; 2369 pdev = rmrr->devices[i];
2347#endif 2370 /*
2348 if (iommu_identity_mapping) 2371 * some BIOS lists non-exist devices in DMAR
2349 iommu_prepare_static_identity_mapping(); 2372 * table.
2350 /* 2373 */
2351 * For each rmrr 2374 if (!pdev)
2352 * for each dev attached to rmrr 2375 continue;
2353 * do 2376 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2354 * locate drhd for dev, alloc domain for dev 2377 if (ret)
2355 * allocate free domain 2378 printk(KERN_ERR
2356 * allocate page table entries for rmrr 2379 "IOMMU: mapping reserved region failed\n");
2357 * if context not allocated for bus
2358 * allocate and init context
2359 * set present in root table for this bus
2360 * init context with domain, translation etc
2361 * endfor
2362 * endfor
2363 */
2364 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2365 for_each_rmrr_units(rmrr) {
2366 for (i = 0; i < rmrr->devices_cnt; i++) {
2367 pdev = rmrr->devices[i];
2368 /*
2369 * some BIOS lists non-exist devices in DMAR
2370 * table.
2371 */
2372 if (!pdev)
2373 continue;
2374 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2375 if (ret)
2376 printk(KERN_ERR
2377 "IOMMU: mapping reserved region failed\n");
2378 }
2379 } 2380 }
2380
2381 iommu_prepare_isa();
2382 } 2381 }
2383 2382
2383 iommu_prepare_isa();
2384
2384 /* 2385 /*
2385 * for each drhd 2386 * for each drhd
2386 * enable fault log 2387 * enable fault log
@@ -2403,11 +2404,12 @@ int __init init_dmars(void)
2403 2404
2404 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); 2405 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2405 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); 2406 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2406 iommu_disable_protect_mem_regions(iommu);
2407 2407
2408 ret = iommu_enable_translation(iommu); 2408 ret = iommu_enable_translation(iommu);
2409 if (ret) 2409 if (ret)
2410 goto error; 2410 goto error;
2411
2412 iommu_disable_protect_mem_regions(iommu);
2411 } 2413 }
2412 2414
2413 return 0; 2415 return 0;
@@ -2454,8 +2456,7 @@ static struct iova *intel_alloc_iova(struct device *dev,
2454 return iova; 2456 return iova;
2455} 2457}
2456 2458
2457static struct dmar_domain * 2459static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
2458get_valid_domain_for_dev(struct pci_dev *pdev)
2459{ 2460{
2460 struct dmar_domain *domain; 2461 struct dmar_domain *domain;
2461 int ret; 2462 int ret;
@@ -2483,6 +2484,18 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
2483 return domain; 2484 return domain;
2484} 2485}
2485 2486
2487static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2488{
2489 struct device_domain_info *info;
2490
2491 /* No lock here, assumes no domain exit in normal case */
2492 info = dev->dev.archdata.iommu;
2493 if (likely(info))
2494 return info->domain;
2495
2496 return __get_valid_domain_for_dev(dev);
2497}
2498
2486static int iommu_dummy(struct pci_dev *pdev) 2499static int iommu_dummy(struct pci_dev *pdev)
2487{ 2500{
2488 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; 2501 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
@@ -2525,10 +2538,10 @@ static int iommu_no_mapping(struct device *dev)
2525 */ 2538 */
2526 if (iommu_should_identity_map(pdev, 0)) { 2539 if (iommu_should_identity_map(pdev, 0)) {
2527 int ret; 2540 int ret;
2528 ret = domain_add_dev_info(si_domain, pdev); 2541 ret = domain_add_dev_info(si_domain, pdev,
2529 if (ret) 2542 hw_pass_through ?
2530 return 0; 2543 CONTEXT_TT_PASS_THROUGH :
2531 ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL); 2544 CONTEXT_TT_MULTI_LEVEL);
2532 if (!ret) { 2545 if (!ret) {
2533 printk(KERN_INFO "64bit %s uses identity mapping\n", 2546 printk(KERN_INFO "64bit %s uses identity mapping\n",
2534 pci_name(pdev)); 2547 pci_name(pdev));
@@ -2637,10 +2650,9 @@ static void flush_unmaps(void)
2637 unsigned long mask; 2650 unsigned long mask;
2638 struct iova *iova = deferred_flush[i].iova[j]; 2651 struct iova *iova = deferred_flush[i].iova[j];
2639 2652
2640 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT; 2653 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2641 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2642 iommu_flush_dev_iotlb(deferred_flush[i].domain[j], 2654 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2643 iova->pfn_lo << PAGE_SHIFT, mask); 2655 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2644 __free_iova(&deferred_flush[i].domain[j]->iovad, iova); 2656 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2645 } 2657 }
2646 deferred_flush[i].next = 0; 2658 deferred_flush[i].next = 0;
@@ -2733,12 +2745,6 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2733 } 2745 }
2734} 2746}
2735 2747
2736static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2737 int dir)
2738{
2739 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2740}
2741
2742static void *intel_alloc_coherent(struct device *hwdev, size_t size, 2748static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2743 dma_addr_t *dma_handle, gfp_t flags) 2749 dma_addr_t *dma_handle, gfp_t flags)
2744{ 2750{
@@ -2771,7 +2777,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2771 size = PAGE_ALIGN(size); 2777 size = PAGE_ALIGN(size);
2772 order = get_order(size); 2778 order = get_order(size);
2773 2779
2774 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); 2780 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
2775 free_pages((unsigned long)vaddr, order); 2781 free_pages((unsigned long)vaddr, order);
2776} 2782}
2777 2783
@@ -2807,11 +2813,18 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2807 /* free page tables */ 2813 /* free page tables */
2808 dma_pte_free_pagetable(domain, start_pfn, last_pfn); 2814 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2809 2815
2810 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, 2816 if (intel_iommu_strict) {
2811 (last_pfn - start_pfn + 1)); 2817 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2812 2818 last_pfn - start_pfn + 1);
2813 /* free iova */ 2819 /* free iova */
2814 __free_iova(&domain->iovad, iova); 2820 __free_iova(&domain->iovad, iova);
2821 } else {
2822 add_unmap(domain, iova);
2823 /*
2824 * queue up the release of the unmap to save the 1/6th of the
2825 * cpu used up by the iotlb flush operation...
2826 */
2827 }
2815} 2828}
2816 2829
2817static int intel_nontranslate_map_sg(struct device *hddev, 2830static int intel_nontranslate_map_sg(struct device *hddev,
@@ -3055,8 +3068,8 @@ static int init_iommu_hw(void)
3055 DMA_CCMD_GLOBAL_INVL); 3068 DMA_CCMD_GLOBAL_INVL);
3056 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 3069 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3057 DMA_TLB_GLOBAL_FLUSH); 3070 DMA_TLB_GLOBAL_FLUSH);
3058 iommu_disable_protect_mem_regions(iommu);
3059 iommu_enable_translation(iommu); 3071 iommu_enable_translation(iommu);
3072 iommu_disable_protect_mem_regions(iommu);
3060 } 3073 }
3061 3074
3062 return 0; 3075 return 0;
@@ -3183,18 +3196,28 @@ static int __init init_iommu_sysfs(void)
3183int __init intel_iommu_init(void) 3196int __init intel_iommu_init(void)
3184{ 3197{
3185 int ret = 0; 3198 int ret = 0;
3199 int force_on = 0;
3186 3200
3187 if (dmar_table_init()) 3201 /* VT-d is required for a TXT/tboot launch, so enforce that */
3202 force_on = tboot_force_iommu();
3203
3204 if (dmar_table_init()) {
3205 if (force_on)
3206 panic("tboot: Failed to initialize DMAR table\n");
3188 return -ENODEV; 3207 return -ENODEV;
3208 }
3189 3209
3190 if (dmar_dev_scope_init()) 3210 if (dmar_dev_scope_init()) {
3211 if (force_on)
3212 panic("tboot: Failed to initialize DMAR device scope\n");
3191 return -ENODEV; 3213 return -ENODEV;
3214 }
3192 3215
3193 /* 3216 /*
3194 * Check the need for DMA-remapping initialization now. 3217 * Check the need for DMA-remapping initialization now.
3195 * Above initialization will also be used by Interrupt-remapping. 3218 * Above initialization will also be used by Interrupt-remapping.
3196 */ 3219 */
3197 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled) 3220 if (no_iommu || swiotlb || dmar_disabled)
3198 return -ENODEV; 3221 return -ENODEV;
3199 3222
3200 iommu_init_mempool(); 3223 iommu_init_mempool();
@@ -3204,6 +3227,8 @@ int __init intel_iommu_init(void)
3204 3227
3205 ret = init_dmars(); 3228 ret = init_dmars();
3206 if (ret) { 3229 if (ret) {
3230 if (force_on)
3231 panic("tboot: Failed to initialize DMARs\n");
3207 printk(KERN_ERR "IOMMU: dmar init failed\n"); 3232 printk(KERN_ERR "IOMMU: dmar init failed\n");
3208 put_iova_domain(&reserved_iova_list); 3233 put_iova_domain(&reserved_iova_list);
3209 iommu_exit_mempool(); 3234 iommu_exit_mempool();
@@ -3214,14 +3239,7 @@ int __init intel_iommu_init(void)
3214 3239
3215 init_timer(&unmap_timer); 3240 init_timer(&unmap_timer);
3216 force_iommu = 1; 3241 force_iommu = 1;
3217 3242 dma_ops = &intel_dma_ops;
3218 if (!iommu_pass_through) {
3219 printk(KERN_INFO
3220 "Multi-level page-table translation for DMAR.\n");
3221 dma_ops = &intel_dma_ops;
3222 } else
3223 printk(KERN_INFO
3224 "DMAR: Pass through translation for DMAR.\n");
3225 3243
3226 init_iommu_sysfs(); 3244 init_iommu_sysfs();
3227 3245
@@ -3504,7 +3522,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3504 struct intel_iommu *iommu; 3522 struct intel_iommu *iommu;
3505 int addr_width; 3523 int addr_width;
3506 u64 end; 3524 u64 end;
3507 int ret;
3508 3525
3509 /* normally pdev is not mapped */ 3526 /* normally pdev is not mapped */
3510 if (unlikely(domain_context_mapped(pdev))) { 3527 if (unlikely(domain_context_mapped(pdev))) {
@@ -3536,12 +3553,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3536 return -EFAULT; 3553 return -EFAULT;
3537 } 3554 }
3538 3555
3539 ret = domain_add_dev_info(dmar_domain, pdev); 3556 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3540 if (ret)
3541 return ret;
3542
3543 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3544 return ret;
3545} 3557}
3546 3558
3547static void intel_iommu_detach_device(struct iommu_domain *domain, 3559static void intel_iommu_detach_device(struct iommu_domain *domain,
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 4f5b8712931f..0ed78a764ded 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -55,15 +55,12 @@ static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
55 return desc->irq_2_iommu; 55 return desc->irq_2_iommu;
56} 56}
57 57
58static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node) 58static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
59{ 59{
60 struct irq_desc *desc; 60 struct irq_desc *desc;
61 struct irq_2_iommu *irq_iommu; 61 struct irq_2_iommu *irq_iommu;
62 62
63 /* 63 desc = irq_to_desc(irq);
64 * alloc irq desc if not allocated already.
65 */
66 desc = irq_to_desc_alloc_node(irq, node);
67 if (!desc) { 64 if (!desc) {
68 printk(KERN_INFO "can not get irq_desc for %d\n", irq); 65 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
69 return NULL; 66 return NULL;
@@ -72,16 +69,11 @@ static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node)
72 irq_iommu = desc->irq_2_iommu; 69 irq_iommu = desc->irq_2_iommu;
73 70
74 if (!irq_iommu) 71 if (!irq_iommu)
75 desc->irq_2_iommu = get_one_free_irq_2_iommu(node); 72 desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq));
76 73
77 return desc->irq_2_iommu; 74 return desc->irq_2_iommu;
78} 75}
79 76
80static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
81{
82 return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id));
83}
84
85#else /* !CONFIG_SPARSE_IRQ */ 77#else /* !CONFIG_SPARSE_IRQ */
86 78
87static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; 79static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
@@ -611,6 +603,9 @@ int __init intr_remapping_supported(void)
611 if (disable_intremap) 603 if (disable_intremap)
612 return 0; 604 return 0;
613 605
606 if (!dmar_ir_support())
607 return 0;
608
614 for_each_drhd_unit(drhd) { 609 for_each_drhd_unit(drhd) {
615 struct intel_iommu *iommu = drhd->iommu; 610 struct intel_iommu *iommu = drhd->iommu;
616 611
@@ -626,6 +621,11 @@ int __init enable_intr_remapping(int eim)
626 struct dmar_drhd_unit *drhd; 621 struct dmar_drhd_unit *drhd;
627 int setup = 0; 622 int setup = 0;
628 623
624 if (parse_ioapics_under_ir() != 1) {
625 printk(KERN_INFO "Not enable interrupt remapping\n");
626 return -1;
627 }
628
629 for_each_drhd_unit(drhd) { 629 for_each_drhd_unit(drhd) {
630 struct intel_iommu *iommu = drhd->iommu; 630 struct intel_iommu *iommu = drhd->iommu;
631 631
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 46dd440e2315..7914951ef29a 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -22,7 +22,6 @@
22void 22void
23init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) 23init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
24{ 24{
25 spin_lock_init(&iovad->iova_alloc_lock);
26 spin_lock_init(&iovad->iova_rbtree_lock); 25 spin_lock_init(&iovad->iova_rbtree_lock);
27 iovad->rbroot = RB_ROOT; 26 iovad->rbroot = RB_ROOT;
28 iovad->cached32_node = NULL; 27 iovad->cached32_node = NULL;
@@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
205 unsigned long limit_pfn, 204 unsigned long limit_pfn,
206 bool size_aligned) 205 bool size_aligned)
207{ 206{
208 unsigned long flags;
209 struct iova *new_iova; 207 struct iova *new_iova;
210 int ret; 208 int ret;
211 209
@@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
219 if (size_aligned) 217 if (size_aligned)
220 size = __roundup_pow_of_two(size); 218 size = __roundup_pow_of_two(size);
221 219
222 spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
223 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, 220 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
224 new_iova, size_aligned); 221 new_iova, size_aligned);
225 222
226 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
227 if (ret) { 223 if (ret) {
228 free_iova_mem(new_iova); 224 free_iova_mem(new_iova);
229 return NULL; 225 return NULL;
@@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad,
381 struct iova *iova; 377 struct iova *iova;
382 unsigned int overlap = 0; 378 unsigned int overlap = 0;
383 379
384 spin_lock_irqsave(&iovad->iova_alloc_lock, flags); 380 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
385 spin_lock(&iovad->iova_rbtree_lock);
386 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { 381 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
387 if (__is_range_overlap(node, pfn_lo, pfn_hi)) { 382 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
388 iova = container_of(node, struct iova, node); 383 iova = container_of(node, struct iova, node);
@@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad,
402 iova = __insert_new_range(iovad, pfn_lo, pfn_hi); 397 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
403finish: 398finish:
404 399
405 spin_unlock(&iovad->iova_rbtree_lock); 400 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
406 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
407 return iova; 401 return iova;
408} 402}
409 403
@@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
420 unsigned long flags; 414 unsigned long flags;
421 struct rb_node *node; 415 struct rb_node *node;
422 416
423 spin_lock_irqsave(&from->iova_alloc_lock, flags); 417 spin_lock_irqsave(&from->iova_rbtree_lock, flags);
424 spin_lock(&from->iova_rbtree_lock);
425 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { 418 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
426 struct iova *iova = container_of(node, struct iova, node); 419 struct iova *iova = container_of(node, struct iova, node);
427 struct iova *new_iova; 420 struct iova *new_iova;
@@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
430 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", 423 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
431 iova->pfn_lo, iova->pfn_lo); 424 iova->pfn_lo, iova->pfn_lo);
432 } 425 }
433 spin_unlock(&from->iova_rbtree_lock); 426 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
434 spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
435} 427}
diff --git a/drivers/pci/legacy.c b/drivers/pci/legacy.c
new file mode 100644
index 000000000000..871f65c15936
--- /dev/null
+++ b/drivers/pci/legacy.c
@@ -0,0 +1,34 @@
1#include <linux/init.h>
2#include <linux/pci.h>
3#include <linux/module.h>
4#include <linux/interrupt.h>
5#include "pci.h"
6
7/**
8 * pci_find_device - begin or continue searching for a PCI device by vendor/device id
9 * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
10 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
11 * @from: Previous PCI device found in search, or %NULL for new search.
12 *
13 * Iterates through the list of known PCI devices. If a PCI device is found
14 * with a matching @vendor and @device, a pointer to its device structure is
15 * returned. Otherwise, %NULL is returned.
16 * A new search is initiated by passing %NULL as the @from argument.
17 * Otherwise if @from is not %NULL, searches continue from next device
18 * on the global list.
19 *
20 * NOTE: Do not use this function any more; use pci_get_device() instead, as
21 * the PCI device returned by this function can disappear at any moment in
22 * time.
23 */
24struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device,
25 struct pci_dev *from)
26{
27 struct pci_dev *pdev;
28
29 pci_dev_get(from);
30 pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
31 pci_dev_put(pdev);
32 return pdev;
33}
34EXPORT_SYMBOL(pci_find_device);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d986afb7032b..f9cf3173b23d 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -16,9 +16,8 @@
16#include <linux/proc_fs.h> 16#include <linux/proc_fs.h>
17#include <linux/msi.h> 17#include <linux/msi.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19 19#include <linux/errno.h>
20#include <asm/errno.h> 20#include <linux/io.h>
21#include <asm/io.h>
22 21
23#include "pci.h" 22#include "pci.h"
24#include "msi.h" 23#include "msi.h"
@@ -272,7 +271,30 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
272 write_msi_msg_desc(desc, msg); 271 write_msi_msg_desc(desc, msg);
273} 272}
274 273
275static int msi_free_irqs(struct pci_dev* dev); 274static void free_msi_irqs(struct pci_dev *dev)
275{
276 struct msi_desc *entry, *tmp;
277
278 list_for_each_entry(entry, &dev->msi_list, list) {
279 int i, nvec;
280 if (!entry->irq)
281 continue;
282 nvec = 1 << entry->msi_attrib.multiple;
283 for (i = 0; i < nvec; i++)
284 BUG_ON(irq_has_action(entry->irq + i));
285 }
286
287 arch_teardown_msi_irqs(dev);
288
289 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
290 if (entry->msi_attrib.is_msix) {
291 if (list_is_last(&entry->list, &dev->msi_list))
292 iounmap(entry->mask_base);
293 }
294 list_del(&entry->list);
295 kfree(entry);
296 }
297}
276 298
277static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) 299static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
278{ 300{
@@ -324,7 +346,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
324 if (!dev->msix_enabled) 346 if (!dev->msix_enabled)
325 return; 347 return;
326 BUG_ON(list_empty(&dev->msi_list)); 348 BUG_ON(list_empty(&dev->msi_list));
327 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 349 entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
328 pos = entry->msi_attrib.pos; 350 pos = entry->msi_attrib.pos;
329 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 351 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
330 352
@@ -367,7 +389,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
367 u16 control; 389 u16 control;
368 unsigned mask; 390 unsigned mask;
369 391
370 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 392 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
371 msi_set_enable(dev, pos, 0); /* Disable MSI during set up */ 393 msi_set_enable(dev, pos, 0); /* Disable MSI during set up */
372 394
373 pci_read_config_word(dev, msi_control_reg(pos), &control); 395 pci_read_config_word(dev, msi_control_reg(pos), &control);
@@ -376,12 +398,12 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
376 if (!entry) 398 if (!entry)
377 return -ENOMEM; 399 return -ENOMEM;
378 400
379 entry->msi_attrib.is_msix = 0; 401 entry->msi_attrib.is_msix = 0;
380 entry->msi_attrib.is_64 = is_64bit_address(control); 402 entry->msi_attrib.is_64 = is_64bit_address(control);
381 entry->msi_attrib.entry_nr = 0; 403 entry->msi_attrib.entry_nr = 0;
382 entry->msi_attrib.maskbit = is_mask_bit_support(control); 404 entry->msi_attrib.maskbit = is_mask_bit_support(control);
383 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 405 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
384 entry->msi_attrib.pos = pos; 406 entry->msi_attrib.pos = pos;
385 407
386 entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64); 408 entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64);
387 /* All MSIs are unmasked by default, Mask them all */ 409 /* All MSIs are unmasked by default, Mask them all */
@@ -396,7 +418,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
396 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); 418 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
397 if (ret) { 419 if (ret) {
398 msi_mask_irq(entry, mask, ~mask); 420 msi_mask_irq(entry, mask, ~mask);
399 msi_free_irqs(dev); 421 free_msi_irqs(dev);
400 return ret; 422 return ret;
401 } 423 }
402 424
@@ -409,44 +431,27 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
409 return 0; 431 return 0;
410} 432}
411 433
412/** 434static void __iomem *msix_map_region(struct pci_dev *dev, unsigned pos,
413 * msix_capability_init - configure device's MSI-X capability 435 unsigned nr_entries)
414 * @dev: pointer to the pci_dev data structure of MSI-X device function
415 * @entries: pointer to an array of struct msix_entry entries
416 * @nvec: number of @entries
417 *
418 * Setup the MSI-X capability structure of device function with a
419 * single MSI-X irq. A return of zero indicates the successful setup of
420 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
421 **/
422static int msix_capability_init(struct pci_dev *dev,
423 struct msix_entry *entries, int nvec)
424{ 436{
425 struct msi_desc *entry;
426 int pos, i, j, nr_entries, ret;
427 unsigned long phys_addr; 437 unsigned long phys_addr;
428 u32 table_offset; 438 u32 table_offset;
429 u16 control;
430 u8 bir; 439 u8 bir;
431 void __iomem *base;
432 440
433 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 441 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
434 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
435
436 /* Ensure MSI-X is disabled while it is set up */
437 control &= ~PCI_MSIX_FLAGS_ENABLE;
438 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
439
440 /* Request & Map MSI-X table region */
441 nr_entries = multi_msix_capable(control);
442
443 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
444 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); 442 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
445 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; 443 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
446 phys_addr = pci_resource_start (dev, bir) + table_offset; 444 phys_addr = pci_resource_start(dev, bir) + table_offset;
447 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); 445
448 if (base == NULL) 446 return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
449 return -ENOMEM; 447}
448
449static int msix_setup_entries(struct pci_dev *dev, unsigned pos,
450 void __iomem *base, struct msix_entry *entries,
451 int nvec)
452{
453 struct msi_desc *entry;
454 int i;
450 455
451 for (i = 0; i < nvec; i++) { 456 for (i = 0; i < nvec; i++) {
452 entry = alloc_msi_entry(dev); 457 entry = alloc_msi_entry(dev);
@@ -454,41 +459,78 @@ static int msix_capability_init(struct pci_dev *dev,
454 if (!i) 459 if (!i)
455 iounmap(base); 460 iounmap(base);
456 else 461 else
457 msi_free_irqs(dev); 462 free_msi_irqs(dev);
458 /* No enough memory. Don't try again */ 463 /* No enough memory. Don't try again */
459 return -ENOMEM; 464 return -ENOMEM;
460 } 465 }
461 466
462 j = entries[i].entry; 467 entry->msi_attrib.is_msix = 1;
463 entry->msi_attrib.is_msix = 1; 468 entry->msi_attrib.is_64 = 1;
464 entry->msi_attrib.is_64 = 1; 469 entry->msi_attrib.entry_nr = entries[i].entry;
465 entry->msi_attrib.entry_nr = j; 470 entry->msi_attrib.default_irq = dev->irq;
466 entry->msi_attrib.default_irq = dev->irq; 471 entry->msi_attrib.pos = pos;
467 entry->msi_attrib.pos = pos; 472 entry->mask_base = base;
468 entry->mask_base = base;
469 473
470 list_add_tail(&entry->list, &dev->msi_list); 474 list_add_tail(&entry->list, &dev->msi_list);
471 } 475 }
472 476
473 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); 477 return 0;
474 if (ret < 0) { 478}
475 /* If we had some success report the number of irqs
476 * we succeeded in setting up. */
477 int avail = 0;
478 list_for_each_entry(entry, &dev->msi_list, list) {
479 if (entry->irq != 0) {
480 avail++;
481 }
482 }
483 479
484 if (avail != 0) 480static void msix_program_entries(struct pci_dev *dev,
485 ret = avail; 481 struct msix_entry *entries)
482{
483 struct msi_desc *entry;
484 int i = 0;
485
486 list_for_each_entry(entry, &dev->msi_list, list) {
487 int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE +
488 PCI_MSIX_ENTRY_VECTOR_CTRL;
489
490 entries[i].vector = entry->irq;
491 set_irq_msi(entry->irq, entry);
492 entry->masked = readl(entry->mask_base + offset);
493 msix_mask_irq(entry, 1);
494 i++;
486 } 495 }
496}
487 497
488 if (ret) { 498/**
489 msi_free_irqs(dev); 499 * msix_capability_init - configure device's MSI-X capability
500 * @dev: pointer to the pci_dev data structure of MSI-X device function
501 * @entries: pointer to an array of struct msix_entry entries
502 * @nvec: number of @entries
503 *
504 * Setup the MSI-X capability structure of device function with a
505 * single MSI-X irq. A return of zero indicates the successful setup of
506 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
507 **/
508static int msix_capability_init(struct pci_dev *dev,
509 struct msix_entry *entries, int nvec)
510{
511 int pos, ret;
512 u16 control;
513 void __iomem *base;
514
515 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
516 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
517
518 /* Ensure MSI-X is disabled while it is set up */
519 control &= ~PCI_MSIX_FLAGS_ENABLE;
520 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
521
522 /* Request & Map MSI-X table region */
523 base = msix_map_region(dev, pos, multi_msix_capable(control));
524 if (!base)
525 return -ENOMEM;
526
527 ret = msix_setup_entries(dev, pos, base, entries, nvec);
528 if (ret)
490 return ret; 529 return ret;
491 } 530
531 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
532 if (ret)
533 goto error;
492 534
493 /* 535 /*
494 * Some devices require MSI-X to be enabled before we can touch the 536 * Some devices require MSI-X to be enabled before we can touch the
@@ -498,16 +540,7 @@ static int msix_capability_init(struct pci_dev *dev,
498 control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE; 540 control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
499 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 541 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
500 542
501 i = 0; 543 msix_program_entries(dev, entries);
502 list_for_each_entry(entry, &dev->msi_list, list) {
503 entries[i].vector = entry->irq;
504 set_irq_msi(entry->irq, entry);
505 j = entries[i].entry;
506 entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
507 PCI_MSIX_ENTRY_VECTOR_CTRL);
508 msix_mask_irq(entry, 1);
509 i++;
510 }
511 544
512 /* Set MSI-X enabled bits and unmask the function */ 545 /* Set MSI-X enabled bits and unmask the function */
513 pci_intx_for_msi(dev, 0); 546 pci_intx_for_msi(dev, 0);
@@ -517,6 +550,27 @@ static int msix_capability_init(struct pci_dev *dev,
517 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 550 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
518 551
519 return 0; 552 return 0;
553
554error:
555 if (ret < 0) {
556 /*
557 * If we had some success, report the number of irqs
558 * we succeeded in setting up.
559 */
560 struct msi_desc *entry;
561 int avail = 0;
562
563 list_for_each_entry(entry, &dev->msi_list, list) {
564 if (entry->irq != 0)
565 avail++;
566 }
567 if (avail != 0)
568 ret = avail;
569 }
570
571 free_msi_irqs(dev);
572
573 return ret;
520} 574}
521 575
522/** 576/**
@@ -529,7 +583,7 @@ static int msix_capability_init(struct pci_dev *dev,
529 * to determine if MSI/-X are supported for the device. If MSI/-X is 583 * to determine if MSI/-X are supported for the device. If MSI/-X is
530 * supported return 0, else return an error code. 584 * supported return 0, else return an error code.
531 **/ 585 **/
532static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) 586static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
533{ 587{
534 struct pci_bus *bus; 588 struct pci_bus *bus;
535 int ret; 589 int ret;
@@ -546,8 +600,9 @@ static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
546 if (nvec < 1) 600 if (nvec < 1)
547 return -ERANGE; 601 return -ERANGE;
548 602
549 /* Any bridge which does NOT route MSI transactions from it's 603 /*
550 * secondary bus to it's primary bus must set NO_MSI flag on 604 * Any bridge which does NOT route MSI transactions from its
605 * secondary bus to its primary bus must set NO_MSI flag on
551 * the secondary pci_bus. 606 * the secondary pci_bus.
552 * We expect only arch-specific PCI host bus controller driver 607 * We expect only arch-specific PCI host bus controller driver
553 * or quirks for specific PCI bridges to be setting NO_MSI. 608 * or quirks for specific PCI bridges to be setting NO_MSI.
@@ -638,50 +693,16 @@ void pci_msi_shutdown(struct pci_dev *dev)
638 dev->irq = desc->msi_attrib.default_irq; 693 dev->irq = desc->msi_attrib.default_irq;
639} 694}
640 695
641void pci_disable_msi(struct pci_dev* dev) 696void pci_disable_msi(struct pci_dev *dev)
642{ 697{
643 struct msi_desc *entry;
644
645 if (!pci_msi_enable || !dev || !dev->msi_enabled) 698 if (!pci_msi_enable || !dev || !dev->msi_enabled)
646 return; 699 return;
647 700
648 pci_msi_shutdown(dev); 701 pci_msi_shutdown(dev);
649 702 free_msi_irqs(dev);
650 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
651 if (entry->msi_attrib.is_msix)
652 return;
653
654 msi_free_irqs(dev);
655} 703}
656EXPORT_SYMBOL(pci_disable_msi); 704EXPORT_SYMBOL(pci_disable_msi);
657 705
658static int msi_free_irqs(struct pci_dev* dev)
659{
660 struct msi_desc *entry, *tmp;
661
662 list_for_each_entry(entry, &dev->msi_list, list) {
663 int i, nvec;
664 if (!entry->irq)
665 continue;
666 nvec = 1 << entry->msi_attrib.multiple;
667 for (i = 0; i < nvec; i++)
668 BUG_ON(irq_has_action(entry->irq + i));
669 }
670
671 arch_teardown_msi_irqs(dev);
672
673 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
674 if (entry->msi_attrib.is_msix) {
675 if (list_is_last(&entry->list, &dev->msi_list))
676 iounmap(entry->mask_base);
677 }
678 list_del(&entry->list);
679 kfree(entry);
680 }
681
682 return 0;
683}
684
685/** 706/**
686 * pci_msix_table_size - return the number of device's MSI-X table entries 707 * pci_msix_table_size - return the number of device's MSI-X table entries
687 * @dev: pointer to the pci_dev data structure of MSI-X device function 708 * @dev: pointer to the pci_dev data structure of MSI-X device function
@@ -714,13 +735,13 @@ int pci_msix_table_size(struct pci_dev *dev)
714 * of irqs or MSI-X vectors available. Driver should use the returned value to 735 * of irqs or MSI-X vectors available. Driver should use the returned value to
715 * re-send its request. 736 * re-send its request.
716 **/ 737 **/
717int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 738int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
718{ 739{
719 int status, nr_entries; 740 int status, nr_entries;
720 int i, j; 741 int i, j;
721 742
722 if (!entries) 743 if (!entries)
723 return -EINVAL; 744 return -EINVAL;
724 745
725 status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); 746 status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
726 if (status) 747 if (status)
@@ -742,7 +763,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
742 WARN_ON(!!dev->msix_enabled); 763 WARN_ON(!!dev->msix_enabled);
743 764
744 /* Check whether driver already requested for MSI irq */ 765 /* Check whether driver already requested for MSI irq */
745 if (dev->msi_enabled) { 766 if (dev->msi_enabled) {
746 dev_info(&dev->dev, "can't enable MSI-X " 767 dev_info(&dev->dev, "can't enable MSI-X "
747 "(MSI IRQ already assigned)\n"); 768 "(MSI IRQ already assigned)\n");
748 return -EINVAL; 769 return -EINVAL;
@@ -752,12 +773,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
752} 773}
753EXPORT_SYMBOL(pci_enable_msix); 774EXPORT_SYMBOL(pci_enable_msix);
754 775
755static void msix_free_all_irqs(struct pci_dev *dev) 776void pci_msix_shutdown(struct pci_dev *dev)
756{
757 msi_free_irqs(dev);
758}
759
760void pci_msix_shutdown(struct pci_dev* dev)
761{ 777{
762 struct msi_desc *entry; 778 struct msi_desc *entry;
763 779
@@ -774,14 +790,14 @@ void pci_msix_shutdown(struct pci_dev* dev)
774 pci_intx_for_msi(dev, 1); 790 pci_intx_for_msi(dev, 1);
775 dev->msix_enabled = 0; 791 dev->msix_enabled = 0;
776} 792}
777void pci_disable_msix(struct pci_dev* dev) 793
794void pci_disable_msix(struct pci_dev *dev)
778{ 795{
779 if (!pci_msi_enable || !dev || !dev->msix_enabled) 796 if (!pci_msi_enable || !dev || !dev->msix_enabled)
780 return; 797 return;
781 798
782 pci_msix_shutdown(dev); 799 pci_msix_shutdown(dev);
783 800 free_msi_irqs(dev);
784 msix_free_all_irqs(dev);
785} 801}
786EXPORT_SYMBOL(pci_disable_msix); 802EXPORT_SYMBOL(pci_disable_msix);
787 803
@@ -794,16 +810,13 @@ EXPORT_SYMBOL(pci_disable_msix);
794 * allocated for this device function, are reclaimed to unused state, 810 * allocated for this device function, are reclaimed to unused state,
795 * which may be used later on. 811 * which may be used later on.
796 **/ 812 **/
797void msi_remove_pci_irq_vectors(struct pci_dev* dev) 813void msi_remove_pci_irq_vectors(struct pci_dev *dev)
798{ 814{
799 if (!pci_msi_enable || !dev) 815 if (!pci_msi_enable || !dev)
800 return; 816 return;
801
802 if (dev->msi_enabled)
803 msi_free_irqs(dev);
804 817
805 if (dev->msix_enabled) 818 if (dev->msi_enabled || dev->msix_enabled)
806 msix_free_all_irqs(dev); 819 free_msi_irqs(dev);
807} 820}
808 821
809void pci_no_msi(void) 822void pci_no_msi(void)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index ea15b0537457..33317df47699 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -109,15 +109,32 @@ static bool acpi_pci_can_wakeup(struct pci_dev *dev)
109 return handle ? acpi_bus_can_wakeup(handle) : false; 109 return handle ? acpi_bus_can_wakeup(handle) : false;
110} 110}
111 111
112static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
113{
114 while (bus->parent) {
115 struct pci_dev *bridge = bus->self;
116 int ret;
117
118 ret = acpi_pm_device_sleep_wake(&bridge->dev, enable);
119 if (!ret || bridge->is_pcie)
120 return;
121 bus = bus->parent;
122 }
123
124 /* We have reached the root bus. */
125 if (bus->bridge)
126 acpi_pm_device_sleep_wake(bus->bridge, enable);
127}
128
112static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable) 129static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
113{ 130{
114 int error = acpi_pm_device_sleep_wake(&dev->dev, enable); 131 if (acpi_pci_can_wakeup(dev))
132 return acpi_pm_device_sleep_wake(&dev->dev, enable);
115 133
116 if (!error) 134 if (!dev->is_pcie)
117 dev_printk(KERN_INFO, &dev->dev, 135 acpi_pci_propagate_wakeup_enable(dev->bus, enable);
118 "wake-up capability %s by ACPI\n", 136
119 enable ? "enabled" : "disabled"); 137 return 0;
120 return error;
121} 138}
122 139
123static struct pci_platform_pm_ops acpi_pci_platform_pm = { 140static struct pci_platform_pm_ops acpi_pci_platform_pm = {
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index f99bc7f089f1..e5d47be3c6d7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -19,37 +19,98 @@
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include "pci.h" 20#include "pci.h"
21 21
22/*
23 * Dynamic device IDs are disabled for !CONFIG_HOTPLUG
24 */
25
26struct pci_dynid { 22struct pci_dynid {
27 struct list_head node; 23 struct list_head node;
28 struct pci_device_id id; 24 struct pci_device_id id;
29}; 25};
30 26
31#ifdef CONFIG_HOTPLUG 27/**
28 * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices
29 * @drv: target pci driver
30 * @vendor: PCI vendor ID
31 * @device: PCI device ID
32 * @subvendor: PCI subvendor ID
33 * @subdevice: PCI subdevice ID
34 * @class: PCI class
35 * @class_mask: PCI class mask
36 * @driver_data: private driver data
37 *
38 * Adds a new dynamic pci device ID to this driver and causes the
39 * driver to probe for all devices again. @drv must have been
40 * registered prior to calling this function.
41 *
42 * CONTEXT:
43 * Does GFP_KERNEL allocation.
44 *
45 * RETURNS:
46 * 0 on success, -errno on failure.
47 */
48int pci_add_dynid(struct pci_driver *drv,
49 unsigned int vendor, unsigned int device,
50 unsigned int subvendor, unsigned int subdevice,
51 unsigned int class, unsigned int class_mask,
52 unsigned long driver_data)
53{
54 struct pci_dynid *dynid;
55 int retval;
56
57 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
58 if (!dynid)
59 return -ENOMEM;
60
61 dynid->id.vendor = vendor;
62 dynid->id.device = device;
63 dynid->id.subvendor = subvendor;
64 dynid->id.subdevice = subdevice;
65 dynid->id.class = class;
66 dynid->id.class_mask = class_mask;
67 dynid->id.driver_data = driver_data;
68
69 spin_lock(&drv->dynids.lock);
70 list_add_tail(&dynid->node, &drv->dynids.list);
71 spin_unlock(&drv->dynids.lock);
72
73 get_driver(&drv->driver);
74 retval = driver_attach(&drv->driver);
75 put_driver(&drv->driver);
76
77 return retval;
78}
79
80static void pci_free_dynids(struct pci_driver *drv)
81{
82 struct pci_dynid *dynid, *n;
32 83
84 spin_lock(&drv->dynids.lock);
85 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
86 list_del(&dynid->node);
87 kfree(dynid);
88 }
89 spin_unlock(&drv->dynids.lock);
90}
91
92/*
93 * Dynamic device ID manipulation via sysfs is disabled for !CONFIG_HOTPLUG
94 */
95#ifdef CONFIG_HOTPLUG
33/** 96/**
34 * store_new_id - add a new PCI device ID to this driver and re-probe devices 97 * store_new_id - sysfs frontend to pci_add_dynid()
35 * @driver: target device driver 98 * @driver: target device driver
36 * @buf: buffer for scanning device ID data 99 * @buf: buffer for scanning device ID data
37 * @count: input size 100 * @count: input size
38 * 101 *
39 * Adds a new dynamic pci device ID to this driver, 102 * Allow PCI IDs to be added to an existing driver via sysfs.
40 * and causes the driver to probe for all devices again.
41 */ 103 */
42static ssize_t 104static ssize_t
43store_new_id(struct device_driver *driver, const char *buf, size_t count) 105store_new_id(struct device_driver *driver, const char *buf, size_t count)
44{ 106{
45 struct pci_dynid *dynid;
46 struct pci_driver *pdrv = to_pci_driver(driver); 107 struct pci_driver *pdrv = to_pci_driver(driver);
47 const struct pci_device_id *ids = pdrv->id_table; 108 const struct pci_device_id *ids = pdrv->id_table;
48 __u32 vendor, device, subvendor=PCI_ANY_ID, 109 __u32 vendor, device, subvendor=PCI_ANY_ID,
49 subdevice=PCI_ANY_ID, class=0, class_mask=0; 110 subdevice=PCI_ANY_ID, class=0, class_mask=0;
50 unsigned long driver_data=0; 111 unsigned long driver_data=0;
51 int fields=0; 112 int fields=0;
52 int retval=0; 113 int retval;
53 114
54 fields = sscanf(buf, "%x %x %x %x %x %x %lx", 115 fields = sscanf(buf, "%x %x %x %x %x %x %lx",
55 &vendor, &device, &subvendor, &subdevice, 116 &vendor, &device, &subvendor, &subdevice,
@@ -72,27 +133,8 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
72 return retval; 133 return retval;
73 } 134 }
74 135
75 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 136 retval = pci_add_dynid(pdrv, vendor, device, subvendor, subdevice,
76 if (!dynid) 137 class, class_mask, driver_data);
77 return -ENOMEM;
78
79 dynid->id.vendor = vendor;
80 dynid->id.device = device;
81 dynid->id.subvendor = subvendor;
82 dynid->id.subdevice = subdevice;
83 dynid->id.class = class;
84 dynid->id.class_mask = class_mask;
85 dynid->id.driver_data = driver_data;
86
87 spin_lock(&pdrv->dynids.lock);
88 list_add_tail(&dynid->node, &pdrv->dynids.list);
89 spin_unlock(&pdrv->dynids.lock);
90
91 if (get_driver(&pdrv->driver)) {
92 retval = driver_attach(&pdrv->driver);
93 put_driver(&pdrv->driver);
94 }
95
96 if (retval) 138 if (retval)
97 return retval; 139 return retval;
98 return count; 140 return count;
@@ -145,19 +187,6 @@ store_remove_id(struct device_driver *driver, const char *buf, size_t count)
145} 187}
146static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id); 188static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
147 189
148static void
149pci_free_dynids(struct pci_driver *drv)
150{
151 struct pci_dynid *dynid, *n;
152
153 spin_lock(&drv->dynids.lock);
154 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
155 list_del(&dynid->node);
156 kfree(dynid);
157 }
158 spin_unlock(&drv->dynids.lock);
159}
160
161static int 190static int
162pci_create_newid_file(struct pci_driver *drv) 191pci_create_newid_file(struct pci_driver *drv)
163{ 192{
@@ -186,7 +215,6 @@ static void pci_remove_removeid_file(struct pci_driver *drv)
186 driver_remove_file(&drv->driver, &driver_attr_remove_id); 215 driver_remove_file(&drv->driver, &driver_attr_remove_id);
187} 216}
188#else /* !CONFIG_HOTPLUG */ 217#else /* !CONFIG_HOTPLUG */
189static inline void pci_free_dynids(struct pci_driver *drv) {}
190static inline int pci_create_newid_file(struct pci_driver *drv) 218static inline int pci_create_newid_file(struct pci_driver *drv)
191{ 219{
192 return 0; 220 return 0;
@@ -417,8 +445,6 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
417 struct pci_dev * pci_dev = to_pci_dev(dev); 445 struct pci_dev * pci_dev = to_pci_dev(dev);
418 struct pci_driver * drv = pci_dev->driver; 446 struct pci_driver * drv = pci_dev->driver;
419 447
420 pci_dev->state_saved = false;
421
422 if (drv && drv->suspend) { 448 if (drv && drv->suspend) {
423 pci_power_t prev = pci_dev->current_state; 449 pci_power_t prev = pci_dev->current_state;
424 int error; 450 int error;
@@ -514,7 +540,6 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
514static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) 540static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
515{ 541{
516 pci_restore_standard_config(pci_dev); 542 pci_restore_standard_config(pci_dev);
517 pci_dev->state_saved = false;
518 pci_fixup_device(pci_fixup_resume_early, pci_dev); 543 pci_fixup_device(pci_fixup_resume_early, pci_dev);
519} 544}
520 545
@@ -575,13 +600,11 @@ static void pci_pm_complete(struct device *dev)
575static int pci_pm_suspend(struct device *dev) 600static int pci_pm_suspend(struct device *dev)
576{ 601{
577 struct pci_dev *pci_dev = to_pci_dev(dev); 602 struct pci_dev *pci_dev = to_pci_dev(dev);
578 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 603 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
579 604
580 if (pci_has_legacy_pm_support(pci_dev)) 605 if (pci_has_legacy_pm_support(pci_dev))
581 return pci_legacy_suspend(dev, PMSG_SUSPEND); 606 return pci_legacy_suspend(dev, PMSG_SUSPEND);
582 607
583 pci_dev->state_saved = false;
584
585 if (!pm) { 608 if (!pm) {
586 pci_pm_default_suspend(pci_dev); 609 pci_pm_default_suspend(pci_dev);
587 goto Fixup; 610 goto Fixup;
@@ -613,7 +636,7 @@ static int pci_pm_suspend(struct device *dev)
613static int pci_pm_suspend_noirq(struct device *dev) 636static int pci_pm_suspend_noirq(struct device *dev)
614{ 637{
615 struct pci_dev *pci_dev = to_pci_dev(dev); 638 struct pci_dev *pci_dev = to_pci_dev(dev);
616 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 639 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
617 640
618 if (pci_has_legacy_pm_support(pci_dev)) 641 if (pci_has_legacy_pm_support(pci_dev))
619 return pci_legacy_suspend_late(dev, PMSG_SUSPEND); 642 return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
@@ -672,7 +695,7 @@ static int pci_pm_resume_noirq(struct device *dev)
672static int pci_pm_resume(struct device *dev) 695static int pci_pm_resume(struct device *dev)
673{ 696{
674 struct pci_dev *pci_dev = to_pci_dev(dev); 697 struct pci_dev *pci_dev = to_pci_dev(dev);
675 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 698 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
676 int error = 0; 699 int error = 0;
677 700
678 /* 701 /*
@@ -694,7 +717,7 @@ static int pci_pm_resume(struct device *dev)
694 pci_pm_reenable_device(pci_dev); 717 pci_pm_reenable_device(pci_dev);
695 } 718 }
696 719
697 return 0; 720 return error;
698} 721}
699 722
700#else /* !CONFIG_SUSPEND */ 723#else /* !CONFIG_SUSPEND */
@@ -711,13 +734,11 @@ static int pci_pm_resume(struct device *dev)
711static int pci_pm_freeze(struct device *dev) 734static int pci_pm_freeze(struct device *dev)
712{ 735{
713 struct pci_dev *pci_dev = to_pci_dev(dev); 736 struct pci_dev *pci_dev = to_pci_dev(dev);
714 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 737 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
715 738
716 if (pci_has_legacy_pm_support(pci_dev)) 739 if (pci_has_legacy_pm_support(pci_dev))
717 return pci_legacy_suspend(dev, PMSG_FREEZE); 740 return pci_legacy_suspend(dev, PMSG_FREEZE);
718 741
719 pci_dev->state_saved = false;
720
721 if (!pm) { 742 if (!pm) {
722 pci_pm_default_suspend(pci_dev); 743 pci_pm_default_suspend(pci_dev);
723 return 0; 744 return 0;
@@ -780,7 +801,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
780static int pci_pm_thaw(struct device *dev) 801static int pci_pm_thaw(struct device *dev)
781{ 802{
782 struct pci_dev *pci_dev = to_pci_dev(dev); 803 struct pci_dev *pci_dev = to_pci_dev(dev);
783 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 804 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
784 int error = 0; 805 int error = 0;
785 806
786 if (pci_has_legacy_pm_support(pci_dev)) 807 if (pci_has_legacy_pm_support(pci_dev))
@@ -793,19 +814,19 @@ static int pci_pm_thaw(struct device *dev)
793 pci_pm_reenable_device(pci_dev); 814 pci_pm_reenable_device(pci_dev);
794 } 815 }
795 816
817 pci_dev->state_saved = false;
818
796 return error; 819 return error;
797} 820}
798 821
799static int pci_pm_poweroff(struct device *dev) 822static int pci_pm_poweroff(struct device *dev)
800{ 823{
801 struct pci_dev *pci_dev = to_pci_dev(dev); 824 struct pci_dev *pci_dev = to_pci_dev(dev);
802 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 825 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
803 826
804 if (pci_has_legacy_pm_support(pci_dev)) 827 if (pci_has_legacy_pm_support(pci_dev))
805 return pci_legacy_suspend(dev, PMSG_HIBERNATE); 828 return pci_legacy_suspend(dev, PMSG_HIBERNATE);
806 829
807 pci_dev->state_saved = false;
808
809 if (!pm) { 830 if (!pm) {
810 pci_pm_default_suspend(pci_dev); 831 pci_pm_default_suspend(pci_dev);
811 goto Fixup; 832 goto Fixup;
@@ -872,7 +893,7 @@ static int pci_pm_restore_noirq(struct device *dev)
872static int pci_pm_restore(struct device *dev) 893static int pci_pm_restore(struct device *dev)
873{ 894{
874 struct pci_dev *pci_dev = to_pci_dev(dev); 895 struct pci_dev *pci_dev = to_pci_dev(dev);
875 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 896 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
876 int error = 0; 897 int error = 0;
877 898
878 /* 899 /*
@@ -910,7 +931,7 @@ static int pci_pm_restore(struct device *dev)
910 931
911#endif /* !CONFIG_HIBERNATION */ 932#endif /* !CONFIG_HIBERNATION */
912 933
913struct dev_pm_ops pci_dev_pm_ops = { 934const struct dev_pm_ops pci_dev_pm_ops = {
914 .prepare = pci_pm_prepare, 935 .prepare = pci_pm_prepare,
915 .complete = pci_pm_complete, 936 .complete = pci_pm_complete,
916 .suspend = pci_pm_suspend, 937 .suspend = pci_pm_suspend,
@@ -1106,6 +1127,7 @@ static int __init pci_driver_init(void)
1106 1127
1107postcore_initcall(pci_driver_init); 1128postcore_initcall(pci_driver_init);
1108 1129
1130EXPORT_SYMBOL_GPL(pci_add_dynid);
1109EXPORT_SYMBOL(pci_match_id); 1131EXPORT_SYMBOL(pci_match_id);
1110EXPORT_SYMBOL(__pci_register_driver); 1132EXPORT_SYMBOL(__pci_register_driver);
1111EXPORT_SYMBOL(pci_unregister_driver); 1133EXPORT_SYMBOL(pci_unregister_driver);
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index 74fbec0bf6cb..f7b68ca6cc98 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -19,8 +19,16 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/pci.h> 20#include <linux/pci.h>
21 21
22static char ids[1024] __initdata;
23
24module_param_string(ids, ids, sizeof(ids), 0);
25MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the stub driver, format is "
26 "\"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\""
27 " and multiple comma separated entries can be specified");
28
22static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id) 29static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id)
23{ 30{
31 dev_printk(KERN_INFO, &dev->dev, "claimed by stub\n");
24 return 0; 32 return 0;
25} 33}
26 34
@@ -32,7 +40,42 @@ static struct pci_driver stub_driver = {
32 40
33static int __init pci_stub_init(void) 41static int __init pci_stub_init(void)
34{ 42{
35 return pci_register_driver(&stub_driver); 43 char *p, *id;
44 int rc;
45
46 rc = pci_register_driver(&stub_driver);
47 if (rc)
48 return rc;
49
50 /* add ids specified in the module parameter */
51 p = ids;
52 while ((id = strsep(&p, ","))) {
53 unsigned int vendor, device, subvendor = PCI_ANY_ID,
54 subdevice = PCI_ANY_ID, class=0, class_mask=0;
55 int fields;
56
57 fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
58 &vendor, &device, &subvendor, &subdevice,
59 &class, &class_mask);
60
61 if (fields < 2) {
62 printk(KERN_WARNING
63 "pci-stub: invalid id string \"%s\"\n", id);
64 continue;
65 }
66
67 printk(KERN_INFO
68 "pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n",
69 vendor, device, subvendor, subdevice, class, class_mask);
70
71 rc = pci_add_dynid(&stub_driver, vendor, device,
72 subvendor, subdevice, class, class_mask, 0);
73 if (rc)
74 printk(KERN_WARNING
75 "pci-stub: failed to add dynamic id (%d)\n", rc);
76 }
77
78 return 0;
36} 79}
37 80
38static void __exit pci_stub_exit(void) 81static void __exit pci_stub_exit(void)
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 85ebd02a64a7..0f6382f090ee 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -916,6 +916,24 @@ int __attribute__ ((weak)) pcibios_add_platform_entries(struct pci_dev *dev)
916 return 0; 916 return 0;
917} 917}
918 918
919static ssize_t reset_store(struct device *dev,
920 struct device_attribute *attr, const char *buf,
921 size_t count)
922{
923 struct pci_dev *pdev = to_pci_dev(dev);
924 unsigned long val;
925 ssize_t result = strict_strtoul(buf, 0, &val);
926
927 if (result < 0)
928 return result;
929
930 if (val != 1)
931 return -EINVAL;
932 return pci_reset_function(pdev);
933}
934
935static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store);
936
919static int pci_create_capabilities_sysfs(struct pci_dev *dev) 937static int pci_create_capabilities_sysfs(struct pci_dev *dev)
920{ 938{
921 int retval; 939 int retval;
@@ -943,7 +961,22 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
943 /* Active State Power Management */ 961 /* Active State Power Management */
944 pcie_aspm_create_sysfs_dev_files(dev); 962 pcie_aspm_create_sysfs_dev_files(dev);
945 963
964 if (!pci_probe_reset_function(dev)) {
965 retval = device_create_file(&dev->dev, &reset_attr);
966 if (retval)
967 goto error;
968 dev->reset_fn = 1;
969 }
946 return 0; 970 return 0;
971
972error:
973 pcie_aspm_remove_sysfs_dev_files(dev);
974 if (dev->vpd && dev->vpd->attr) {
975 sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
976 kfree(dev->vpd->attr);
977 }
978
979 return retval;
947} 980}
948 981
949int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) 982int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
@@ -1037,6 +1070,10 @@ static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
1037 } 1070 }
1038 1071
1039 pcie_aspm_remove_sysfs_dev_files(dev); 1072 pcie_aspm_remove_sysfs_dev_files(dev);
1073 if (dev->reset_fn) {
1074 device_remove_file(&dev->dev, &reset_attr);
1075 dev->reset_fn = 0;
1076 }
1040} 1077}
1041 1078
1042/** 1079/**
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 7b70312181d7..6edecff0b419 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -41,6 +41,12 @@ int pci_domains_supported = 1;
41unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; 41unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
42unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; 42unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
43 43
44#define DEFAULT_HOTPLUG_IO_SIZE (256)
45#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
46/* pci=hpmemsize=nnM,hpiosize=nn can override this */
47unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
48unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
49
44/** 50/**
45 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 51 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
46 * @bus: pointer to PCI bus structure to search 52 * @bus: pointer to PCI bus structure to search
@@ -848,6 +854,7 @@ pci_restore_state(struct pci_dev *dev)
848 854
849 if (!dev->state_saved) 855 if (!dev->state_saved)
850 return 0; 856 return 0;
857
851 /* PCI Express register must be restored first */ 858 /* PCI Express register must be restored first */
852 pci_restore_pcie_state(dev); 859 pci_restore_pcie_state(dev);
853 860
@@ -869,6 +876,8 @@ pci_restore_state(struct pci_dev *dev)
869 pci_restore_msi_state(dev); 876 pci_restore_msi_state(dev);
870 pci_restore_iov_state(dev); 877 pci_restore_iov_state(dev);
871 878
879 dev->state_saved = false;
880
872 return 0; 881 return 0;
873} 882}
874 883
@@ -1214,30 +1223,40 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
1214 */ 1223 */
1215int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) 1224int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
1216{ 1225{
1217 int error = 0; 1226 int ret = 0;
1218 bool pme_done = false;
1219 1227
1220 if (enable && !device_may_wakeup(&dev->dev)) 1228 if (enable && !device_may_wakeup(&dev->dev))
1221 return -EINVAL; 1229 return -EINVAL;
1222 1230
1231 /* Don't do the same thing twice in a row for one device. */
1232 if (!!enable == !!dev->wakeup_prepared)
1233 return 0;
1234
1223 /* 1235 /*
1224 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don 1236 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1225 * Anderson we should be doing PME# wake enable followed by ACPI wake 1237 * Anderson we should be doing PME# wake enable followed by ACPI wake
1226 * enable. To disable wake-up we call the platform first, for symmetry. 1238 * enable. To disable wake-up we call the platform first, for symmetry.
1227 */ 1239 */
1228 1240
1229 if (!enable && platform_pci_can_wakeup(dev)) 1241 if (enable) {
1230 error = platform_pci_sleep_wake(dev, false); 1242 int error;
1231
1232 if (!enable || pci_pme_capable(dev, state)) {
1233 pci_pme_active(dev, enable);
1234 pme_done = true;
1235 }
1236 1243
1237 if (enable && platform_pci_can_wakeup(dev)) 1244 if (pci_pme_capable(dev, state))
1245 pci_pme_active(dev, true);
1246 else
1247 ret = 1;
1238 error = platform_pci_sleep_wake(dev, true); 1248 error = platform_pci_sleep_wake(dev, true);
1249 if (ret)
1250 ret = error;
1251 if (!ret)
1252 dev->wakeup_prepared = true;
1253 } else {
1254 platform_pci_sleep_wake(dev, false);
1255 pci_pme_active(dev, false);
1256 dev->wakeup_prepared = false;
1257 }
1239 1258
1240 return pme_done ? 0 : error; 1259 return ret;
1241} 1260}
1242 1261
1243/** 1262/**
@@ -1356,6 +1375,7 @@ void pci_pm_init(struct pci_dev *dev)
1356 int pm; 1375 int pm;
1357 u16 pmc; 1376 u16 pmc;
1358 1377
1378 dev->wakeup_prepared = false;
1359 dev->pm_cap = 0; 1379 dev->pm_cap = 0;
1360 1380
1361 /* find PCI PM capability in list */ 1381 /* find PCI PM capability in list */
@@ -2262,6 +2282,22 @@ int __pci_reset_function(struct pci_dev *dev)
2262EXPORT_SYMBOL_GPL(__pci_reset_function); 2282EXPORT_SYMBOL_GPL(__pci_reset_function);
2263 2283
2264/** 2284/**
2285 * pci_probe_reset_function - check whether the device can be safely reset
2286 * @dev: PCI device to reset
2287 *
2288 * Some devices allow an individual function to be reset without affecting
2289 * other functions in the same device. The PCI device must be responsive
2290 * to PCI config space in order to use this function.
2291 *
2292 * Returns 0 if the device function can be reset or negative if the
2293 * device doesn't support resetting a single function.
2294 */
2295int pci_probe_reset_function(struct pci_dev *dev)
2296{
2297 return pci_dev_reset(dev, 1);
2298}
2299
2300/**
2265 * pci_reset_function - quiesce and reset a PCI device function 2301 * pci_reset_function - quiesce and reset a PCI device function
2266 * @dev: PCI device to reset 2302 * @dev: PCI device to reset
2267 * 2303 *
@@ -2504,6 +2540,50 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
2504 return 0; 2540 return 0;
2505} 2541}
2506 2542
2543/**
2544 * pci_set_vga_state - set VGA decode state on device and parents if requested
2545 * @dev the PCI device
2546 * @decode - true = enable decoding, false = disable decoding
2547 * @command_bits PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
2548 * @change_bridge - traverse ancestors and change bridges
2549 */
2550int pci_set_vga_state(struct pci_dev *dev, bool decode,
2551 unsigned int command_bits, bool change_bridge)
2552{
2553 struct pci_bus *bus;
2554 struct pci_dev *bridge;
2555 u16 cmd;
2556
2557 WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
2558
2559 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2560 if (decode == true)
2561 cmd |= command_bits;
2562 else
2563 cmd &= ~command_bits;
2564 pci_write_config_word(dev, PCI_COMMAND, cmd);
2565
2566 if (change_bridge == false)
2567 return 0;
2568
2569 bus = dev->bus;
2570 while (bus) {
2571 bridge = bus->self;
2572 if (bridge) {
2573 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
2574 &cmd);
2575 if (decode == true)
2576 cmd |= PCI_BRIDGE_CTL_VGA;
2577 else
2578 cmd &= ~PCI_BRIDGE_CTL_VGA;
2579 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
2580 cmd);
2581 }
2582 bus = bus->parent;
2583 }
2584 return 0;
2585}
2586
2507#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE 2587#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
2508static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; 2588static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
2509spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED; 2589spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED;
@@ -2672,6 +2752,10 @@ static int __init pci_setup(char *str)
2672 strlen(str + 19)); 2752 strlen(str + 19));
2673 } else if (!strncmp(str, "ecrc=", 5)) { 2753 } else if (!strncmp(str, "ecrc=", 5)) {
2674 pcie_ecrc_get_policy(str + 5); 2754 pcie_ecrc_get_policy(str + 5);
2755 } else if (!strncmp(str, "hpiosize=", 9)) {
2756 pci_hotplug_io_size = memparse(str + 9, &str);
2757 } else if (!strncmp(str, "hpmemsize=", 10)) {
2758 pci_hotplug_mem_size = memparse(str + 10, &str);
2675 } else { 2759 } else {
2676 printk(KERN_ERR "PCI: Unknown option `%s'\n", 2760 printk(KERN_ERR "PCI: Unknown option `%s'\n",
2677 str); 2761 str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 5ff4d25bf0e9..d92d1954a2fb 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -16,6 +16,7 @@ extern void pci_cleanup_rom(struct pci_dev *dev);
16extern int pci_mmap_fits(struct pci_dev *pdev, int resno, 16extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
17 struct vm_area_struct *vma); 17 struct vm_area_struct *vma);
18#endif 18#endif
19int pci_probe_reset_function(struct pci_dev *dev);
19 20
20/** 21/**
21 * struct pci_platform_pm_ops - Firmware PM callbacks 22 * struct pci_platform_pm_ops - Firmware PM callbacks
@@ -133,7 +134,6 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
133 return (dev->no_d1d2 || parent_dstates); 134 return (dev->no_d1d2 || parent_dstates);
134 135
135} 136}
136extern int pcie_mch_quirk;
137extern struct device_attribute pci_dev_attrs[]; 137extern struct device_attribute pci_dev_attrs[];
138extern struct device_attribute dev_attr_cpuaffinity; 138extern struct device_attribute dev_attr_cpuaffinity;
139extern struct device_attribute dev_attr_cpulistaffinity; 139extern struct device_attribute dev_attr_cpulistaffinity;
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index d92ae21a59d8..62d15f652bb6 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -22,11 +22,10 @@
22#include <linux/miscdevice.h> 22#include <linux/miscdevice.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <asm/uaccess.h> 25#include <linux/uaccess.h>
26#include "aerdrv.h" 26#include "aerdrv.h"
27 27
28struct aer_error_inj 28struct aer_error_inj {
29{
30 u8 bus; 29 u8 bus;
31 u8 dev; 30 u8 dev;
32 u8 fn; 31 u8 fn;
@@ -38,8 +37,7 @@ struct aer_error_inj
38 u32 header_log3; 37 u32 header_log3;
39}; 38};
40 39
41struct aer_error 40struct aer_error {
42{
43 struct list_head list; 41 struct list_head list;
44 unsigned int bus; 42 unsigned int bus;
45 unsigned int devfn; 43 unsigned int devfn;
@@ -55,8 +53,7 @@ struct aer_error
55 u32 source_id; 53 u32 source_id;
56}; 54};
57 55
58struct pci_bus_ops 56struct pci_bus_ops {
59{
60 struct list_head list; 57 struct list_head list;
61 struct pci_bus *bus; 58 struct pci_bus *bus;
62 struct pci_ops *ops; 59 struct pci_ops *ops;
@@ -150,7 +147,7 @@ static u32 *find_pci_config_dword(struct aer_error *err, int where,
150 target = &err->header_log1; 147 target = &err->header_log1;
151 break; 148 break;
152 case PCI_ERR_HEADER_LOG+8: 149 case PCI_ERR_HEADER_LOG+8:
153 target = &err->header_log2; 150 target = &err->header_log2;
154 break; 151 break;
155 case PCI_ERR_HEADER_LOG+12: 152 case PCI_ERR_HEADER_LOG+12:
156 target = &err->header_log3; 153 target = &err->header_log3;
@@ -258,8 +255,7 @@ static int pci_bus_set_aer_ops(struct pci_bus *bus)
258 bus_ops = NULL; 255 bus_ops = NULL;
259out: 256out:
260 spin_unlock_irqrestore(&inject_lock, flags); 257 spin_unlock_irqrestore(&inject_lock, flags);
261 if (bus_ops) 258 kfree(bus_ops);
262 kfree(bus_ops);
263 return 0; 259 return 0;
264} 260}
265 261
@@ -401,10 +397,8 @@ static int aer_inject(struct aer_error_inj *einj)
401 else 397 else
402 ret = -EINVAL; 398 ret = -EINVAL;
403out_put: 399out_put:
404 if (err_alloc) 400 kfree(err_alloc);
405 kfree(err_alloc); 401 kfree(rperr_alloc);
406 if (rperr_alloc)
407 kfree(rperr_alloc);
408 pci_dev_put(dev); 402 pci_dev_put(dev);
409 return ret; 403 return ret;
410} 404}
@@ -458,8 +452,7 @@ static void __exit aer_inject_exit(void)
458 } 452 }
459 453
460 spin_lock_irqsave(&inject_lock, flags); 454 spin_lock_irqsave(&inject_lock, flags);
461 list_for_each_entry_safe(err, err_next, 455 list_for_each_entry_safe(err, err_next, &pci_bus_ops_list, list) {
462 &pci_bus_ops_list, list) {
463 list_del(&err->list); 456 list_del(&err->list);
464 kfree(err); 457 kfree(err);
465 } 458 }
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 4770f13b3ca1..2ce8f9ccc66e 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -38,7 +38,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
38MODULE_DESCRIPTION(DRIVER_DESC); 38MODULE_DESCRIPTION(DRIVER_DESC);
39MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40 40
41static int __devinit aer_probe (struct pcie_device *dev); 41static int __devinit aer_probe(struct pcie_device *dev);
42static void aer_remove(struct pcie_device *dev); 42static void aer_remove(struct pcie_device *dev);
43static pci_ers_result_t aer_error_detected(struct pci_dev *dev, 43static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
44 enum pci_channel_state error); 44 enum pci_channel_state error);
@@ -47,7 +47,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
47 47
48static struct pci_error_handlers aer_error_handlers = { 48static struct pci_error_handlers aer_error_handlers = {
49 .error_detected = aer_error_detected, 49 .error_detected = aer_error_detected,
50 .resume = aer_error_resume, 50 .resume = aer_error_resume,
51}; 51};
52 52
53static struct pcie_port_service_driver aerdriver = { 53static struct pcie_port_service_driver aerdriver = {
@@ -134,12 +134,12 @@ EXPORT_SYMBOL_GPL(aer_irq);
134 * 134 *
135 * Invoked when Root Port's AER service is loaded. 135 * Invoked when Root Port's AER service is loaded.
136 **/ 136 **/
137static struct aer_rpc* aer_alloc_rpc(struct pcie_device *dev) 137static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
138{ 138{
139 struct aer_rpc *rpc; 139 struct aer_rpc *rpc;
140 140
141 if (!(rpc = kzalloc(sizeof(struct aer_rpc), 141 rpc = kzalloc(sizeof(struct aer_rpc), GFP_KERNEL);
142 GFP_KERNEL))) 142 if (!rpc)
143 return NULL; 143 return NULL;
144 144
145 /* 145 /*
@@ -189,26 +189,28 @@ static void aer_remove(struct pcie_device *dev)
189 * 189 *
190 * Invoked when PCI Express bus loads AER service driver. 190 * Invoked when PCI Express bus loads AER service driver.
191 **/ 191 **/
192static int __devinit aer_probe (struct pcie_device *dev) 192static int __devinit aer_probe(struct pcie_device *dev)
193{ 193{
194 int status; 194 int status;
195 struct aer_rpc *rpc; 195 struct aer_rpc *rpc;
196 struct device *device = &dev->device; 196 struct device *device = &dev->device;
197 197
198 /* Init */ 198 /* Init */
199 if ((status = aer_init(dev))) 199 status = aer_init(dev);
200 if (status)
200 return status; 201 return status;
201 202
202 /* Alloc rpc data structure */ 203 /* Alloc rpc data structure */
203 if (!(rpc = aer_alloc_rpc(dev))) { 204 rpc = aer_alloc_rpc(dev);
205 if (!rpc) {
204 dev_printk(KERN_DEBUG, device, "alloc rpc failed\n"); 206 dev_printk(KERN_DEBUG, device, "alloc rpc failed\n");
205 aer_remove(dev); 207 aer_remove(dev);
206 return -ENOMEM; 208 return -ENOMEM;
207 } 209 }
208 210
209 /* Request IRQ ISR */ 211 /* Request IRQ ISR */
210 if ((status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", 212 status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev);
211 dev))) { 213 if (status) {
212 dev_printk(KERN_DEBUG, device, "request IRQ failed\n"); 214 dev_printk(KERN_DEBUG, device, "request IRQ failed\n");
213 aer_remove(dev); 215 aer_remove(dev);
214 return status; 216 return status;
@@ -316,6 +318,8 @@ static int __init aer_service_init(void)
316{ 318{
317 if (pcie_aer_disable) 319 if (pcie_aer_disable)
318 return -ENXIO; 320 return -ENXIO;
321 if (!pci_msi_enabled())
322 return -ENXIO;
319 return pcie_port_service_register(&aerdriver); 323 return pcie_port_service_register(&aerdriver);
320} 324}
321 325
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index bbd7428ca2d0..bd833ea3ba49 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -16,12 +16,9 @@
16#define AER_NONFATAL 0 16#define AER_NONFATAL 0
17#define AER_FATAL 1 17#define AER_FATAL 1
18#define AER_CORRECTABLE 2 18#define AER_CORRECTABLE 2
19#define AER_UNCORRECTABLE 4
20#define AER_ERROR_MASK 0x001fffff
21#define AER_ERROR(d) (d & AER_ERROR_MASK)
22 19
23/* Root Error Status Register Bits */ 20/* Root Error Status Register Bits */
24#define ROOT_ERR_STATUS_MASKS 0x0f 21#define ROOT_ERR_STATUS_MASKS 0x0f
25 22
26#define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \ 23#define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \
27 PCI_EXP_RTCTL_SENFEE| \ 24 PCI_EXP_RTCTL_SENFEE| \
@@ -32,8 +29,6 @@
32#define ERR_COR_ID(d) (d & 0xffff) 29#define ERR_COR_ID(d) (d & 0xffff)
33#define ERR_UNCOR_ID(d) (d >> 16) 30#define ERR_UNCOR_ID(d) (d >> 16)
34 31
35#define AER_SUCCESS 0
36#define AER_UNSUCCESS 1
37#define AER_ERROR_SOURCES_MAX 100 32#define AER_ERROR_SOURCES_MAX 100
38 33
39#define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \ 34#define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \
@@ -43,13 +38,6 @@
43 PCI_ERR_UNC_UNX_COMP| \ 38 PCI_ERR_UNC_UNX_COMP| \
44 PCI_ERR_UNC_MALF_TLP) 39 PCI_ERR_UNC_MALF_TLP)
45 40
46/* AER Error Info Flags */
47#define AER_TLP_HEADER_VALID_FLAG 0x00000001
48#define AER_MULTI_ERROR_VALID_FLAG 0x00000002
49
50#define ERR_CORRECTABLE_ERROR_MASK 0x000031c1
51#define ERR_UNCORRECTABLE_ERROR_MASK 0x001ff010
52
53struct header_log_regs { 41struct header_log_regs {
54 unsigned int dw0; 42 unsigned int dw0;
55 unsigned int dw1; 43 unsigned int dw1;
@@ -61,11 +49,20 @@ struct header_log_regs {
61struct aer_err_info { 49struct aer_err_info {
62 struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES]; 50 struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
63 int error_dev_num; 51 int error_dev_num;
64 u16 id; 52
65 int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */ 53 unsigned int id:16;
66 int flags; 54
55 unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */
56 unsigned int __pad1:5;
57 unsigned int multi_error_valid:1;
58
59 unsigned int first_error:5;
60 unsigned int __pad2:2;
61 unsigned int tlp_header_valid:1;
62
67 unsigned int status; /* COR/UNCOR Error Status */ 63 unsigned int status; /* COR/UNCOR Error Status */
68 struct header_log_regs tlp; /* TLP Header */ 64 unsigned int mask; /* COR/UNCOR Error Mask */
65 struct header_log_regs tlp; /* TLP Header */
69}; 66};
70 67
71struct aer_err_source { 68struct aer_err_source {
@@ -125,6 +122,7 @@ extern void aer_delete_rootport(struct aer_rpc *rpc);
125extern int aer_init(struct pcie_device *dev); 122extern int aer_init(struct pcie_device *dev);
126extern void aer_isr(struct work_struct *work); 123extern void aer_isr(struct work_struct *work);
127extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); 124extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
125extern void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info);
128extern irqreturn_t aer_irq(int irq, void *context); 126extern irqreturn_t aer_irq(int irq, void *context);
129 127
130#ifdef CONFIG_ACPI 128#ifdef CONFIG_ACPI
@@ -136,4 +134,4 @@ static inline int aer_osc_setup(struct pcie_device *pciedev)
136} 134}
137#endif 135#endif
138 136
139#endif //_AERDRV_H_ 137#endif /* _AERDRV_H_ */
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 3d8872704a58..9f5ccbeb4fa5 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -49,10 +49,11 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev)
49 PCI_EXP_DEVCTL_NFERE | 49 PCI_EXP_DEVCTL_NFERE |
50 PCI_EXP_DEVCTL_FERE | 50 PCI_EXP_DEVCTL_FERE |
51 PCI_EXP_DEVCTL_URRE; 51 PCI_EXP_DEVCTL_URRE;
52 pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, 52 pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16);
53 reg16); 53
54 return 0; 54 return 0;
55} 55}
56EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
56 57
57int pci_disable_pcie_error_reporting(struct pci_dev *dev) 58int pci_disable_pcie_error_reporting(struct pci_dev *dev)
58{ 59{
@@ -68,10 +69,11 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev)
68 PCI_EXP_DEVCTL_NFERE | 69 PCI_EXP_DEVCTL_NFERE |
69 PCI_EXP_DEVCTL_FERE | 70 PCI_EXP_DEVCTL_FERE |
70 PCI_EXP_DEVCTL_URRE); 71 PCI_EXP_DEVCTL_URRE);
71 pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, 72 pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16);
72 reg16); 73
73 return 0; 74 return 0;
74} 75}
76EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
75 77
76int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 78int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
77{ 79{
@@ -92,6 +94,7 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
92 94
93 return 0; 95 return 0;
94} 96}
97EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
95 98
96#if 0 99#if 0
97int pci_cleanup_aer_correct_error_status(struct pci_dev *dev) 100int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
@@ -110,7 +113,6 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
110} 113}
111#endif /* 0 */ 114#endif /* 0 */
112 115
113
114static int set_device_error_reporting(struct pci_dev *dev, void *data) 116static int set_device_error_reporting(struct pci_dev *dev, void *data)
115{ 117{
116 bool enable = *((bool *)data); 118 bool enable = *((bool *)data);
@@ -164,8 +166,9 @@ static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
164 e_info->dev[e_info->error_dev_num] = dev; 166 e_info->dev[e_info->error_dev_num] = dev;
165 e_info->error_dev_num++; 167 e_info->error_dev_num++;
166 return 1; 168 return 1;
167 } else 169 }
168 return 0; 170
171 return 0;
169} 172}
170 173
171 174
@@ -193,7 +196,7 @@ static int find_device_iter(struct pci_dev *dev, void *data)
193 * If there is no multiple error, we stop 196 * If there is no multiple error, we stop
194 * or continue based on the id comparing. 197 * or continue based on the id comparing.
195 */ 198 */
196 if (!(e_info->flags & AER_MULTI_ERROR_VALID_FLAG)) 199 if (!e_info->multi_error_valid)
197 return result; 200 return result;
198 201
199 /* 202 /*
@@ -233,24 +236,16 @@ static int find_device_iter(struct pci_dev *dev, void *data)
233 status = 0; 236 status = 0;
234 mask = 0; 237 mask = 0;
235 if (e_info->severity == AER_CORRECTABLE) { 238 if (e_info->severity == AER_CORRECTABLE) {
236 pci_read_config_dword(dev, 239 pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
237 pos + PCI_ERR_COR_STATUS, 240 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
238 &status); 241 if (status & ~mask) {
239 pci_read_config_dword(dev,
240 pos + PCI_ERR_COR_MASK,
241 &mask);
242 if (status & ERR_CORRECTABLE_ERROR_MASK & ~mask) {
243 add_error_device(e_info, dev); 242 add_error_device(e_info, dev);
244 goto added; 243 goto added;
245 } 244 }
246 } else { 245 } else {
247 pci_read_config_dword(dev, 246 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
248 pos + PCI_ERR_UNCOR_STATUS, 247 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
249 &status); 248 if (status & ~mask) {
250 pci_read_config_dword(dev,
251 pos + PCI_ERR_UNCOR_MASK,
252 &mask);
253 if (status & ERR_UNCORRECTABLE_ERROR_MASK & ~mask) {
254 add_error_device(e_info, dev); 249 add_error_device(e_info, dev);
255 goto added; 250 goto added;
256 } 251 }
@@ -259,7 +254,7 @@ static int find_device_iter(struct pci_dev *dev, void *data)
259 return 0; 254 return 0;
260 255
261added: 256added:
262 if (e_info->flags & AER_MULTI_ERROR_VALID_FLAG) 257 if (e_info->multi_error_valid)
263 return 0; 258 return 0;
264 else 259 else
265 return 1; 260 return 1;
@@ -411,8 +406,7 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
411 pci_cleanup_aer_uncorrect_error_status(dev); 406 pci_cleanup_aer_uncorrect_error_status(dev);
412 dev->error_state = pci_channel_io_normal; 407 dev->error_state = pci_channel_io_normal;
413 } 408 }
414 } 409 } else {
415 else {
416 /* 410 /*
417 * If the error is reported by an end point, we think this 411 * If the error is reported by an end point, we think this
418 * error is related to the upstream link of the end point. 412 * error is related to the upstream link of the end point.
@@ -473,7 +467,7 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
473 if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) 467 if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)
474 udev = dev; 468 udev = dev;
475 else 469 else
476 udev= dev->bus->self; 470 udev = dev->bus->self;
477 471
478 data.is_downstream = 0; 472 data.is_downstream = 0;
479 data.aer_driver = NULL; 473 data.aer_driver = NULL;
@@ -576,7 +570,7 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
576 * 570 *
577 * Invoked when an error being detected by Root Port. 571 * Invoked when an error being detected by Root Port.
578 */ 572 */
579static void handle_error_source(struct pcie_device * aerdev, 573static void handle_error_source(struct pcie_device *aerdev,
580 struct pci_dev *dev, 574 struct pci_dev *dev,
581 struct aer_err_info *info) 575 struct aer_err_info *info)
582{ 576{
@@ -682,7 +676,7 @@ static void disable_root_aer(struct aer_rpc *rpc)
682 * 676 *
683 * Invoked by DPC handler to consume an error. 677 * Invoked by DPC handler to consume an error.
684 */ 678 */
685static struct aer_err_source* get_e_source(struct aer_rpc *rpc) 679static struct aer_err_source *get_e_source(struct aer_rpc *rpc)
686{ 680{
687 struct aer_err_source *e_source; 681 struct aer_err_source *e_source;
688 unsigned long flags; 682 unsigned long flags;
@@ -702,32 +696,50 @@ static struct aer_err_source* get_e_source(struct aer_rpc *rpc)
702 return e_source; 696 return e_source;
703} 697}
704 698
699/**
700 * get_device_error_info - read error status from dev and store it to info
701 * @dev: pointer to the device expected to have a error record
702 * @info: pointer to structure to store the error record
703 *
704 * Return 1 on success, 0 on error.
705 */
705static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) 706static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
706{ 707{
707 int pos; 708 int pos, temp;
709
710 info->status = 0;
711 info->tlp_header_valid = 0;
708 712
709 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 713 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
710 714
711 /* The device might not support AER */ 715 /* The device might not support AER */
712 if (!pos) 716 if (!pos)
713 return AER_SUCCESS; 717 return 1;
714 718
715 if (info->severity == AER_CORRECTABLE) { 719 if (info->severity == AER_CORRECTABLE) {
716 pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, 720 pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
717 &info->status); 721 &info->status);
718 if (!(info->status & ERR_CORRECTABLE_ERROR_MASK)) 722 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK,
719 return AER_UNSUCCESS; 723 &info->mask);
724 if (!(info->status & ~info->mask))
725 return 0;
720 } else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE || 726 } else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE ||
721 info->severity == AER_NONFATAL) { 727 info->severity == AER_NONFATAL) {
722 728
723 /* Link is still healthy for IO reads */ 729 /* Link is still healthy for IO reads */
724 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, 730 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
725 &info->status); 731 &info->status);
726 if (!(info->status & ERR_UNCORRECTABLE_ERROR_MASK)) 732 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK,
727 return AER_UNSUCCESS; 733 &info->mask);
734 if (!(info->status & ~info->mask))
735 return 0;
736
737 /* Get First Error Pointer */
738 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp);
739 info->first_error = PCI_ERR_CAP_FEP(temp);
728 740
729 if (info->status & AER_LOG_TLP_MASKS) { 741 if (info->status & AER_LOG_TLP_MASKS) {
730 info->flags |= AER_TLP_HEADER_VALID_FLAG; 742 info->tlp_header_valid = 1;
731 pci_read_config_dword(dev, 743 pci_read_config_dword(dev,
732 pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0); 744 pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
733 pci_read_config_dword(dev, 745 pci_read_config_dword(dev,
@@ -739,7 +751,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
739 } 751 }
740 } 752 }
741 753
742 return AER_SUCCESS; 754 return 1;
743} 755}
744 756
745static inline void aer_process_err_devices(struct pcie_device *p_device, 757static inline void aer_process_err_devices(struct pcie_device *p_device,
@@ -753,14 +765,14 @@ static inline void aer_process_err_devices(struct pcie_device *p_device,
753 e_info->id); 765 e_info->id);
754 } 766 }
755 767
768 /* Report all before handle them, not to lost records by reset etc. */
756 for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { 769 for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
757 if (get_device_error_info(e_info->dev[i], e_info) == 770 if (get_device_error_info(e_info->dev[i], e_info))
758 AER_SUCCESS) {
759 aer_print_error(e_info->dev[i], e_info); 771 aer_print_error(e_info->dev[i], e_info);
760 handle_error_source(p_device, 772 }
761 e_info->dev[i], 773 for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
762 e_info); 774 if (get_device_error_info(e_info->dev[i], e_info))
763 } 775 handle_error_source(p_device, e_info->dev[i], e_info);
764 } 776 }
765} 777}
766 778
@@ -806,7 +818,9 @@ static void aer_isr_one_error(struct pcie_device *p_device,
806 if (e_src->status & 818 if (e_src->status &
807 (PCI_ERR_ROOT_MULTI_COR_RCV | 819 (PCI_ERR_ROOT_MULTI_COR_RCV |
808 PCI_ERR_ROOT_MULTI_UNCOR_RCV)) 820 PCI_ERR_ROOT_MULTI_UNCOR_RCV))
809 e_info->flags |= AER_MULTI_ERROR_VALID_FLAG; 821 e_info->multi_error_valid = 1;
822
823 aer_print_port_info(p_device->port, e_info);
810 824
811 find_source_device(p_device->port, e_info); 825 find_source_device(p_device->port, e_info);
812 aer_process_err_devices(p_device, e_info); 826 aer_process_err_devices(p_device, e_info);
@@ -863,10 +877,5 @@ int aer_init(struct pcie_device *dev)
863 if (aer_osc_setup(dev) && !forceload) 877 if (aer_osc_setup(dev) && !forceload)
864 return -ENXIO; 878 return -ENXIO;
865 879
866 return AER_SUCCESS; 880 return 0;
867} 881}
868
869EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
870EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
871EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
872
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 0fc29ae80df8..44acde72294f 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -27,69 +27,70 @@
27#define AER_AGENT_COMPLETER 2 27#define AER_AGENT_COMPLETER 2
28#define AER_AGENT_TRANSMITTER 3 28#define AER_AGENT_TRANSMITTER 3
29 29
30#define AER_AGENT_REQUESTER_MASK (PCI_ERR_UNC_COMP_TIME| \ 30#define AER_AGENT_REQUESTER_MASK(t) ((t == AER_CORRECTABLE) ? \
31 PCI_ERR_UNC_UNSUP) 31 0 : (PCI_ERR_UNC_COMP_TIME|PCI_ERR_UNC_UNSUP))
32 32#define AER_AGENT_COMPLETER_MASK(t) ((t == AER_CORRECTABLE) ? \
33#define AER_AGENT_COMPLETER_MASK PCI_ERR_UNC_COMP_ABORT 33 0 : PCI_ERR_UNC_COMP_ABORT)
34 34#define AER_AGENT_TRANSMITTER_MASK(t) ((t == AER_CORRECTABLE) ? \
35#define AER_AGENT_TRANSMITTER_MASK(t, e) (e & (PCI_ERR_COR_REP_ROLL| \ 35 (PCI_ERR_COR_REP_ROLL|PCI_ERR_COR_REP_TIMER) : 0)
36 ((t == AER_CORRECTABLE) ? PCI_ERR_COR_REP_TIMER: 0)))
37 36
38#define AER_GET_AGENT(t, e) \ 37#define AER_GET_AGENT(t, e) \
39 ((e & AER_AGENT_COMPLETER_MASK) ? AER_AGENT_COMPLETER : \ 38 ((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER : \
40 (e & AER_AGENT_REQUESTER_MASK) ? AER_AGENT_REQUESTER : \ 39 (e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER : \
41 (AER_AGENT_TRANSMITTER_MASK(t, e)) ? AER_AGENT_TRANSMITTER : \ 40 (e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER : \
42 AER_AGENT_RECEIVER) 41 AER_AGENT_RECEIVER)
43 42
44#define AER_PHYSICAL_LAYER_ERROR_MASK PCI_ERR_COR_RCVR
45#define AER_DATA_LINK_LAYER_ERROR_MASK(t, e) \
46 (PCI_ERR_UNC_DLP| \
47 PCI_ERR_COR_BAD_TLP| \
48 PCI_ERR_COR_BAD_DLLP| \
49 PCI_ERR_COR_REP_ROLL| \
50 ((t == AER_CORRECTABLE) ? \
51 PCI_ERR_COR_REP_TIMER: 0))
52
53#define AER_PHYSICAL_LAYER_ERROR 0 43#define AER_PHYSICAL_LAYER_ERROR 0
54#define AER_DATA_LINK_LAYER_ERROR 1 44#define AER_DATA_LINK_LAYER_ERROR 1
55#define AER_TRANSACTION_LAYER_ERROR 2 45#define AER_TRANSACTION_LAYER_ERROR 2
56 46
57#define AER_GET_LAYER_ERROR(t, e) \ 47#define AER_PHYSICAL_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \
58 ((e & AER_PHYSICAL_LAYER_ERROR_MASK) ? \ 48 PCI_ERR_COR_RCVR : 0)
59 AER_PHYSICAL_LAYER_ERROR : \ 49#define AER_DATA_LINK_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \
60 (e & AER_DATA_LINK_LAYER_ERROR_MASK(t, e)) ? \ 50 (PCI_ERR_COR_BAD_TLP| \
61 AER_DATA_LINK_LAYER_ERROR : \ 51 PCI_ERR_COR_BAD_DLLP| \
62 AER_TRANSACTION_LAYER_ERROR) 52 PCI_ERR_COR_REP_ROLL| \
53 PCI_ERR_COR_REP_TIMER) : PCI_ERR_UNC_DLP)
54
55#define AER_GET_LAYER_ERROR(t, e) \
56 ((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \
57 (e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \
58 AER_TRANSACTION_LAYER_ERROR)
59
60#define AER_PR(info, pdev, fmt, args...) \
61 printk("%s%s %s: " fmt, (info->severity == AER_CORRECTABLE) ? \
62 KERN_WARNING : KERN_ERR, dev_driver_string(&pdev->dev), \
63 dev_name(&pdev->dev), ## args)
63 64
64/* 65/*
65 * AER error strings 66 * AER error strings
66 */ 67 */
67static char* aer_error_severity_string[] = { 68static char *aer_error_severity_string[] = {
68 "Uncorrected (Non-Fatal)", 69 "Uncorrected (Non-Fatal)",
69 "Uncorrected (Fatal)", 70 "Uncorrected (Fatal)",
70 "Corrected" 71 "Corrected"
71}; 72};
72 73
73static char* aer_error_layer[] = { 74static char *aer_error_layer[] = {
74 "Physical Layer", 75 "Physical Layer",
75 "Data Link Layer", 76 "Data Link Layer",
76 "Transaction Layer" 77 "Transaction Layer"
77}; 78};
78static char* aer_correctable_error_string[] = { 79static char *aer_correctable_error_string[] = {
79 "Receiver Error ", /* Bit Position 0 */ 80 "Receiver Error ", /* Bit Position 0 */
80 NULL, 81 NULL,
81 NULL, 82 NULL,
82 NULL, 83 NULL,
83 NULL, 84 NULL,
84 NULL, 85 NULL,
85 "Bad TLP ", /* Bit Position 6 */ 86 "Bad TLP ", /* Bit Position 6 */
86 "Bad DLLP ", /* Bit Position 7 */ 87 "Bad DLLP ", /* Bit Position 7 */
87 "RELAY_NUM Rollover ", /* Bit Position 8 */ 88 "RELAY_NUM Rollover ", /* Bit Position 8 */
88 NULL, 89 NULL,
89 NULL, 90 NULL,
90 NULL, 91 NULL,
91 "Replay Timer Timeout ", /* Bit Position 12 */ 92 "Replay Timer Timeout ", /* Bit Position 12 */
92 "Advisory Non-Fatal ", /* Bit Position 13 */ 93 "Advisory Non-Fatal ", /* Bit Position 13 */
93 NULL, 94 NULL,
94 NULL, 95 NULL,
95 NULL, 96 NULL,
@@ -110,7 +111,7 @@ static char* aer_correctable_error_string[] = {
110 NULL, 111 NULL,
111}; 112};
112 113
113static char* aer_uncorrectable_error_string[] = { 114static char *aer_uncorrectable_error_string[] = {
114 NULL, 115 NULL,
115 NULL, 116 NULL,
116 NULL, 117 NULL,
@@ -123,10 +124,10 @@ static char* aer_uncorrectable_error_string[] = {
123 NULL, 124 NULL,
124 NULL, 125 NULL,
125 NULL, 126 NULL,
126 "Poisoned TLP ", /* Bit Position 12 */ 127 "Poisoned TLP ", /* Bit Position 12 */
127 "Flow Control Protocol ", /* Bit Position 13 */ 128 "Flow Control Protocol ", /* Bit Position 13 */
128 "Completion Timeout ", /* Bit Position 14 */ 129 "Completion Timeout ", /* Bit Position 14 */
129 "Completer Abort ", /* Bit Position 15 */ 130 "Completer Abort ", /* Bit Position 15 */
130 "Unexpected Completion ", /* Bit Position 16 */ 131 "Unexpected Completion ", /* Bit Position 16 */
131 "Receiver Overflow ", /* Bit Position 17 */ 132 "Receiver Overflow ", /* Bit Position 17 */
132 "Malformed TLP ", /* Bit Position 18 */ 133 "Malformed TLP ", /* Bit Position 18 */
@@ -145,98 +146,69 @@ static char* aer_uncorrectable_error_string[] = {
145 NULL, 146 NULL,
146}; 147};
147 148
148static char* aer_agent_string[] = { 149static char *aer_agent_string[] = {
149 "Receiver ID", 150 "Receiver ID",
150 "Requester ID", 151 "Requester ID",
151 "Completer ID", 152 "Completer ID",
152 "Transmitter ID" 153 "Transmitter ID"
153}; 154};
154 155
155static char * aer_get_error_source_name(int severity, 156static void __aer_print_error(struct aer_err_info *info, struct pci_dev *dev)
156 unsigned int status,
157 char errmsg_buff[])
158{ 157{
159 int i; 158 int i, status;
160 char * errmsg = NULL; 159 char *errmsg = NULL;
160
161 status = (info->status & ~info->mask);
161 162
162 for (i = 0; i < 32; i++) { 163 for (i = 0; i < 32; i++) {
163 if (!(status & (1 << i))) 164 if (!(status & (1 << i)))
164 continue; 165 continue;
165 166
166 if (severity == AER_CORRECTABLE) 167 if (info->severity == AER_CORRECTABLE)
167 errmsg = aer_correctable_error_string[i]; 168 errmsg = aer_correctable_error_string[i];
168 else 169 else
169 errmsg = aer_uncorrectable_error_string[i]; 170 errmsg = aer_uncorrectable_error_string[i];
170 171
171 if (!errmsg) { 172 if (errmsg)
172 sprintf(errmsg_buff, "Unknown Error Bit %2d ", i); 173 AER_PR(info, dev, " [%2d] %s%s\n", i, errmsg,
173 errmsg = errmsg_buff; 174 info->first_error == i ? " (First)" : "");
174 } 175 else
175 176 AER_PR(info, dev, " [%2d] Unknown Error Bit%s\n", i,
176 break; 177 info->first_error == i ? " (First)" : "");
177 } 178 }
178
179 return errmsg;
180} 179}
181 180
182static DEFINE_SPINLOCK(logbuf_lock);
183static char errmsg_buff[100];
184void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) 181void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
185{ 182{
186 char * errmsg; 183 int id = ((dev->bus->number << 8) | dev->devfn);
187 int err_layer, agent; 184
188 char * loglevel; 185 if (info->status == 0) {
189 186 AER_PR(info, dev,
190 if (info->severity == AER_CORRECTABLE) 187 "PCIE Bus Error: severity=%s, type=Unaccessible, "
191 loglevel = KERN_WARNING; 188 "id=%04x(Unregistered Agent ID)\n",
192 else 189 aer_error_severity_string[info->severity], id);
193 loglevel = KERN_ERR;
194
195 printk("%s+------ PCI-Express Device Error ------+\n", loglevel);
196 printk("%sError Severity\t\t: %s\n", loglevel,
197 aer_error_severity_string[info->severity]);
198
199 if ( info->status == 0) {
200 printk("%sPCIE Bus Error type\t: (Unaccessible)\n", loglevel);
201 printk("%sUnaccessible Received\t: %s\n", loglevel,
202 info->flags & AER_MULTI_ERROR_VALID_FLAG ?
203 "Multiple" : "First");
204 printk("%sUnregistered Agent ID\t: %04x\n", loglevel,
205 (dev->bus->number << 8) | dev->devfn);
206 } else { 190 } else {
207 err_layer = AER_GET_LAYER_ERROR(info->severity, info->status); 191 int layer, agent;
208 printk("%sPCIE Bus Error type\t: %s\n", loglevel,
209 aer_error_layer[err_layer]);
210
211 spin_lock(&logbuf_lock);
212 errmsg = aer_get_error_source_name(info->severity,
213 info->status,
214 errmsg_buff);
215 printk("%s%s\t: %s\n", loglevel, errmsg,
216 info->flags & AER_MULTI_ERROR_VALID_FLAG ?
217 "Multiple" : "First");
218 spin_unlock(&logbuf_lock);
219 192
193 layer = AER_GET_LAYER_ERROR(info->severity, info->status);
220 agent = AER_GET_AGENT(info->severity, info->status); 194 agent = AER_GET_AGENT(info->severity, info->status);
221 printk("%s%s\t\t: %04x\n", loglevel, 195
222 aer_agent_string[agent], 196 AER_PR(info, dev,
223 (dev->bus->number << 8) | dev->devfn); 197 "PCIE Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
224 198 aer_error_severity_string[info->severity],
225 printk("%sVendorID=%04xh, DeviceID=%04xh," 199 aer_error_layer[layer], id, aer_agent_string[agent]);
226 " Bus=%02xh, Device=%02xh, Function=%02xh\n", 200
227 loglevel, 201 AER_PR(info, dev,
228 dev->vendor, 202 " device [%04x:%04x] error status/mask=%08x/%08x\n",
229 dev->device, 203 dev->vendor, dev->device, info->status, info->mask);
230 dev->bus->number, 204
231 PCI_SLOT(dev->devfn), 205 __aer_print_error(info, dev);
232 PCI_FUNC(dev->devfn)); 206
233 207 if (info->tlp_header_valid) {
234 if (info->flags & AER_TLP_HEADER_VALID_FLAG) {
235 unsigned char *tlp = (unsigned char *) &info->tlp; 208 unsigned char *tlp = (unsigned char *) &info->tlp;
236 printk("%sTLP Header:\n", loglevel); 209 AER_PR(info, dev, " TLP Header:"
237 printk("%s%02x%02x%02x%02x %02x%02x%02x%02x" 210 " %02x%02x%02x%02x %02x%02x%02x%02x"
238 " %02x%02x%02x%02x %02x%02x%02x%02x\n", 211 " %02x%02x%02x%02x %02x%02x%02x%02x\n",
239 loglevel,
240 *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, 212 *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
241 *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), 213 *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
242 *(tlp + 11), *(tlp + 10), *(tlp + 9), 214 *(tlp + 11), *(tlp + 10), *(tlp + 9),
@@ -244,5 +216,15 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
244 *(tlp + 13), *(tlp + 12)); 216 *(tlp + 13), *(tlp + 12));
245 } 217 }
246 } 218 }
219
220 if (info->id && info->error_dev_num > 1 && info->id == id)
221 AER_PR(info, dev,
222 " Error of this Agent(%04x) is reported first\n", id);
247} 223}
248 224
225void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
226{
227 dev_info(&dev->dev, "AER: %s%s error received: id=%04x\n",
228 info->multi_error_valid ? "Multiple " : "",
229 aer_error_severity_string[info->severity], info->id);
230}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 3d27c97e0486..745402e8e498 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -26,6 +26,13 @@
26#endif 26#endif
27#define MODULE_PARAM_PREFIX "pcie_aspm." 27#define MODULE_PARAM_PREFIX "pcie_aspm."
28 28
29/* Note: those are not register definitions */
30#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
31#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
32#define ASPM_STATE_L1 (4) /* L1 state */
33#define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
34#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35
29struct aspm_latency { 36struct aspm_latency {
30 u32 l0s; /* L0s latency (nsec) */ 37 u32 l0s; /* L0s latency (nsec) */
31 u32 l1; /* L1 latency (nsec) */ 38 u32 l1; /* L1 latency (nsec) */
@@ -40,17 +47,20 @@ struct pcie_link_state {
40 struct list_head link; /* node in parent's children list */ 47 struct list_head link; /* node in parent's children list */
41 48
42 /* ASPM state */ 49 /* ASPM state */
43 u32 aspm_support:2; /* Supported ASPM state */ 50 u32 aspm_support:3; /* Supported ASPM state */
44 u32 aspm_enabled:2; /* Enabled ASPM state */ 51 u32 aspm_enabled:3; /* Enabled ASPM state */
45 u32 aspm_default:2; /* Default ASPM state by BIOS */ 52 u32 aspm_capable:3; /* Capable ASPM state with latency */
53 u32 aspm_default:3; /* Default ASPM state by BIOS */
54 u32 aspm_disable:3; /* Disabled ASPM state */
46 55
47 /* Clock PM state */ 56 /* Clock PM state */
48 u32 clkpm_capable:1; /* Clock PM capable? */ 57 u32 clkpm_capable:1; /* Clock PM capable? */
49 u32 clkpm_enabled:1; /* Current Clock PM state */ 58 u32 clkpm_enabled:1; /* Current Clock PM state */
50 u32 clkpm_default:1; /* Default Clock PM state by BIOS */ 59 u32 clkpm_default:1; /* Default Clock PM state by BIOS */
51 60
52 /* Latencies */ 61 /* Exit latencies */
53 struct aspm_latency latency; /* Exit latency */ 62 struct aspm_latency latency_up; /* Upstream direction exit latency */
63 struct aspm_latency latency_dw; /* Downstream direction exit latency */
54 /* 64 /*
55 * Endpoint acceptable latencies. A pcie downstream port only 65 * Endpoint acceptable latencies. A pcie downstream port only
56 * has one slot under it, so at most there are 8 functions. 66 * has one slot under it, so at most there are 8 functions.
@@ -82,7 +92,7 @@ static int policy_to_aspm_state(struct pcie_link_state *link)
82 return 0; 92 return 0;
83 case POLICY_POWERSAVE: 93 case POLICY_POWERSAVE:
84 /* Enable ASPM L0s/L1 */ 94 /* Enable ASPM L0s/L1 */
85 return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1; 95 return ASPM_STATE_ALL;
86 case POLICY_DEFAULT: 96 case POLICY_DEFAULT:
87 return link->aspm_default; 97 return link->aspm_default;
88 } 98 }
@@ -164,18 +174,6 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
164 link->clkpm_capable = (blacklist) ? 0 : capable; 174 link->clkpm_capable = (blacklist) ? 0 : capable;
165} 175}
166 176
167static bool pcie_aspm_downstream_has_switch(struct pcie_link_state *link)
168{
169 struct pci_dev *child;
170 struct pci_bus *linkbus = link->pdev->subordinate;
171
172 list_for_each_entry(child, &linkbus->devices, bus_list) {
173 if (child->pcie_type == PCI_EXP_TYPE_UPSTREAM)
174 return true;
175 }
176 return false;
177}
178
179/* 177/*
180 * pcie_aspm_configure_common_clock: check if the 2 ends of a link 178 * pcie_aspm_configure_common_clock: check if the 2 ends of a link
181 * could use common clock. If they are, configure them to use the 179 * could use common clock. If they are, configure them to use the
@@ -288,71 +286,130 @@ static u32 calc_l1_acceptable(u32 encoding)
288 return (1000 << encoding); 286 return (1000 << encoding);
289} 287}
290 288
291static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state, 289struct aspm_register_info {
292 u32 *l0s, u32 *l1, u32 *enabled) 290 u32 support:2;
291 u32 enabled:2;
292 u32 latency_encoding_l0s;
293 u32 latency_encoding_l1;
294};
295
296static void pcie_get_aspm_reg(struct pci_dev *pdev,
297 struct aspm_register_info *info)
293{ 298{
294 int pos; 299 int pos;
295 u16 reg16; 300 u16 reg16;
296 u32 reg32, encoding; 301 u32 reg32;
297 302
298 *l0s = *l1 = *enabled = 0;
299 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 303 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
300 pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32); 304 pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32);
301 *state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; 305 info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
302 if (*state != PCIE_LINK_STATE_L0S && 306 info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
303 *state != (PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_L0S)) 307 info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
304 *state = 0; 308 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
305 if (*state == 0) 309 info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC;
310}
311
312static void pcie_aspm_check_latency(struct pci_dev *endpoint)
313{
314 u32 latency, l1_switch_latency = 0;
315 struct aspm_latency *acceptable;
316 struct pcie_link_state *link;
317
318 /* Device not in D0 doesn't need latency check */
319 if ((endpoint->current_state != PCI_D0) &&
320 (endpoint->current_state != PCI_UNKNOWN))
306 return; 321 return;
307 322
308 encoding = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; 323 link = endpoint->bus->self->link_state;
309 *l0s = calc_l0s_latency(encoding); 324 acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
310 if (*state & PCIE_LINK_STATE_L1) { 325
311 encoding = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; 326 while (link) {
312 *l1 = calc_l1_latency(encoding); 327 /* Check upstream direction L0s latency */
328 if ((link->aspm_capable & ASPM_STATE_L0S_UP) &&
329 (link->latency_up.l0s > acceptable->l0s))
330 link->aspm_capable &= ~ASPM_STATE_L0S_UP;
331
332 /* Check downstream direction L0s latency */
333 if ((link->aspm_capable & ASPM_STATE_L0S_DW) &&
334 (link->latency_dw.l0s > acceptable->l0s))
335 link->aspm_capable &= ~ASPM_STATE_L0S_DW;
336 /*
337 * Check L1 latency.
338 * Every switch on the path to root complex need 1
339 * more microsecond for L1. Spec doesn't mention L0s.
340 */
341 latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1);
342 if ((link->aspm_capable & ASPM_STATE_L1) &&
343 (latency + l1_switch_latency > acceptable->l1))
344 link->aspm_capable &= ~ASPM_STATE_L1;
345 l1_switch_latency += 1000;
346
347 link = link->parent;
313 } 348 }
314 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
315 *enabled = reg16 & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
316} 349}
317 350
318static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) 351static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
319{ 352{
320 u32 support, l0s, l1, enabled;
321 struct pci_dev *child, *parent = link->pdev; 353 struct pci_dev *child, *parent = link->pdev;
322 struct pci_bus *linkbus = parent->subordinate; 354 struct pci_bus *linkbus = parent->subordinate;
355 struct aspm_register_info upreg, dwreg;
323 356
324 if (blacklist) { 357 if (blacklist) {
325 /* Set support state to 0, so we will disable ASPM later */ 358 /* Set enabled/disable so that we will disable ASPM later */
326 link->aspm_support = 0; 359 link->aspm_enabled = ASPM_STATE_ALL;
327 link->aspm_default = 0; 360 link->aspm_disable = ASPM_STATE_ALL;
328 link->aspm_enabled = PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
329 return; 361 return;
330 } 362 }
331 363
332 /* Configure common clock before checking latencies */ 364 /* Configure common clock before checking latencies */
333 pcie_aspm_configure_common_clock(link); 365 pcie_aspm_configure_common_clock(link);
334 366
335 /* upstream component states */ 367 /* Get upstream/downstream components' register state */
336 pcie_aspm_get_cap_device(parent, &support, &l0s, &l1, &enabled); 368 pcie_get_aspm_reg(parent, &upreg);
337 link->aspm_support = support;
338 link->latency.l0s = l0s;
339 link->latency.l1 = l1;
340 link->aspm_enabled = enabled;
341
342 /* downstream component states, all functions have the same setting */
343 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); 369 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
344 pcie_aspm_get_cap_device(child, &support, &l0s, &l1, &enabled); 370 pcie_get_aspm_reg(child, &dwreg);
345 link->aspm_support &= support;
346 link->latency.l0s = max_t(u32, link->latency.l0s, l0s);
347 link->latency.l1 = max_t(u32, link->latency.l1, l1);
348 371
349 if (!link->aspm_support) 372 /*
350 return; 373 * Setup L0s state
351 374 *
352 link->aspm_enabled &= link->aspm_support; 375 * Note that we must not enable L0s in either direction on a
376 * given link unless components on both sides of the link each
377 * support L0s.
378 */
379 if (dwreg.support & upreg.support & PCIE_LINK_STATE_L0S)
380 link->aspm_support |= ASPM_STATE_L0S;
381 if (dwreg.enabled & PCIE_LINK_STATE_L0S)
382 link->aspm_enabled |= ASPM_STATE_L0S_UP;
383 if (upreg.enabled & PCIE_LINK_STATE_L0S)
384 link->aspm_enabled |= ASPM_STATE_L0S_DW;
385 link->latency_up.l0s = calc_l0s_latency(upreg.latency_encoding_l0s);
386 link->latency_dw.l0s = calc_l0s_latency(dwreg.latency_encoding_l0s);
387
388 /* Setup L1 state */
389 if (upreg.support & dwreg.support & PCIE_LINK_STATE_L1)
390 link->aspm_support |= ASPM_STATE_L1;
391 if (upreg.enabled & dwreg.enabled & PCIE_LINK_STATE_L1)
392 link->aspm_enabled |= ASPM_STATE_L1;
393 link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1);
394 link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1);
395
396 /* Save default state */
353 link->aspm_default = link->aspm_enabled; 397 link->aspm_default = link->aspm_enabled;
354 398
355 /* ENDPOINT states*/ 399 /* Setup initial capable state. Will be updated later */
400 link->aspm_capable = link->aspm_support;
401 /*
402 * If the downstream component has pci bridge function, don't
403 * do ASPM for now.
404 */
405 list_for_each_entry(child, &linkbus->devices, bus_list) {
406 if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
407 link->aspm_disable = ASPM_STATE_ALL;
408 break;
409 }
410 }
411
412 /* Get and check endpoint acceptable latencies */
356 list_for_each_entry(child, &linkbus->devices, bus_list) { 413 list_for_each_entry(child, &linkbus->devices, bus_list) {
357 int pos; 414 int pos;
358 u32 reg32, encoding; 415 u32 reg32, encoding;
@@ -365,109 +422,46 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
365 422
366 pos = pci_find_capability(child, PCI_CAP_ID_EXP); 423 pos = pci_find_capability(child, PCI_CAP_ID_EXP);
367 pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32); 424 pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
425 /* Calculate endpoint L0s acceptable latency */
368 encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; 426 encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
369 acceptable->l0s = calc_l0s_acceptable(encoding); 427 acceptable->l0s = calc_l0s_acceptable(encoding);
370 if (link->aspm_support & PCIE_LINK_STATE_L1) { 428 /* Calculate endpoint L1 acceptable latency */
371 encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9; 429 encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
372 acceptable->l1 = calc_l1_acceptable(encoding); 430 acceptable->l1 = calc_l1_acceptable(encoding);
373 }
374 }
375}
376
377/**
378 * __pcie_aspm_check_state_one - check latency for endpoint device.
379 * @endpoint: pointer to the struct pci_dev of endpoint device
380 *
381 * TBD: The latency from the endpoint to root complex vary per switch's
382 * upstream link state above the device. Here we just do a simple check
383 * which assumes all links above the device can be in L1 state, that
384 * is we just consider the worst case. If switch's upstream link can't
385 * be put into L0S/L1, then our check is too strictly.
386 */
387static u32 __pcie_aspm_check_state_one(struct pci_dev *endpoint, u32 state)
388{
389 u32 l1_switch_latency = 0;
390 struct aspm_latency *acceptable;
391 struct pcie_link_state *link;
392
393 link = endpoint->bus->self->link_state;
394 state &= link->aspm_support;
395 acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
396 431
397 while (link && state) { 432 pcie_aspm_check_latency(child);
398 if ((state & PCIE_LINK_STATE_L0S) &&
399 (link->latency.l0s > acceptable->l0s))
400 state &= ~PCIE_LINK_STATE_L0S;
401 if ((state & PCIE_LINK_STATE_L1) &&
402 (link->latency.l1 + l1_switch_latency > acceptable->l1))
403 state &= ~PCIE_LINK_STATE_L1;
404 link = link->parent;
405 /*
406 * Every switch on the path to root complex need 1
407 * more microsecond for L1. Spec doesn't mention L0s.
408 */
409 l1_switch_latency += 1000;
410 }
411 return state;
412}
413
414static u32 pcie_aspm_check_state(struct pcie_link_state *link, u32 state)
415{
416 pci_power_t power_state;
417 struct pci_dev *child;
418 struct pci_bus *linkbus = link->pdev->subordinate;
419
420 /* If no child, ignore the link */
421 if (list_empty(&linkbus->devices))
422 return state;
423
424 list_for_each_entry(child, &linkbus->devices, bus_list) {
425 /*
426 * If downstream component of a link is pci bridge, we
427 * disable ASPM for now for the link
428 */
429 if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
430 return 0;
431
432 if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
433 child->pcie_type != PCI_EXP_TYPE_LEG_END))
434 continue;
435 /* Device not in D0 doesn't need check latency */
436 power_state = child->current_state;
437 if (power_state == PCI_D1 || power_state == PCI_D2 ||
438 power_state == PCI_D3hot || power_state == PCI_D3cold)
439 continue;
440 state = __pcie_aspm_check_state_one(child, state);
441 } 433 }
442 return state;
443} 434}
444 435
445static void __pcie_aspm_config_one_dev(struct pci_dev *pdev, unsigned int state) 436static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
446{ 437{
447 u16 reg16; 438 u16 reg16;
448 int pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 439 int pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
449 440
450 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); 441 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
451 reg16 &= ~0x3; 442 reg16 &= ~0x3;
452 reg16 |= state; 443 reg16 |= val;
453 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 444 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
454} 445}
455 446
456static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state) 447static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
457{ 448{
449 u32 upstream = 0, dwstream = 0;
458 struct pci_dev *child, *parent = link->pdev; 450 struct pci_dev *child, *parent = link->pdev;
459 struct pci_bus *linkbus = parent->subordinate; 451 struct pci_bus *linkbus = parent->subordinate;
460 452
461 /* If no child, disable the link */ 453 /* Nothing to do if the link is already in the requested state */
462 if (list_empty(&linkbus->devices)) 454 state &= (link->aspm_capable & ~link->aspm_disable);
463 state = 0; 455 if (link->aspm_enabled == state)
464 /* 456 return;
465 * If the downstream component has pci bridge function, don't 457 /* Convert ASPM state to upstream/downstream ASPM register state */
466 * do ASPM now. 458 if (state & ASPM_STATE_L0S_UP)
467 */ 459 dwstream |= PCIE_LINK_STATE_L0S;
468 list_for_each_entry(child, &linkbus->devices, bus_list) { 460 if (state & ASPM_STATE_L0S_DW)
469 if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) 461 upstream |= PCIE_LINK_STATE_L0S;
470 return; 462 if (state & ASPM_STATE_L1) {
463 upstream |= PCIE_LINK_STATE_L1;
464 dwstream |= PCIE_LINK_STATE_L1;
471 } 465 }
472 /* 466 /*
473 * Spec 2.0 suggests all functions should be configured the 467 * Spec 2.0 suggests all functions should be configured the
@@ -475,67 +469,24 @@ static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state)
475 * upstream component first and then downstream, and vice 469 * upstream component first and then downstream, and vice
476 * versa for disabling ASPM L1. Spec doesn't mention L0S. 470 * versa for disabling ASPM L1. Spec doesn't mention L0S.
477 */ 471 */
478 if (state & PCIE_LINK_STATE_L1) 472 if (state & ASPM_STATE_L1)
479 __pcie_aspm_config_one_dev(parent, state); 473 pcie_config_aspm_dev(parent, upstream);
480
481 list_for_each_entry(child, &linkbus->devices, bus_list) 474 list_for_each_entry(child, &linkbus->devices, bus_list)
482 __pcie_aspm_config_one_dev(child, state); 475 pcie_config_aspm_dev(child, dwstream);
483 476 if (!(state & ASPM_STATE_L1))
484 if (!(state & PCIE_LINK_STATE_L1)) 477 pcie_config_aspm_dev(parent, upstream);
485 __pcie_aspm_config_one_dev(parent, state);
486 478
487 link->aspm_enabled = state; 479 link->aspm_enabled = state;
488} 480}
489 481
490/* Check the whole hierarchy, and configure each link in the hierarchy */ 482static void pcie_config_aspm_path(struct pcie_link_state *link)
491static void __pcie_aspm_configure_link_state(struct pcie_link_state *link,
492 u32 state)
493{ 483{
494 struct pcie_link_state *leaf, *root = link->root; 484 while (link) {
495 485 pcie_config_aspm_link(link, policy_to_aspm_state(link));
496 state &= (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); 486 link = link->parent;
497
498 /* Check all links who have specific root port link */
499 list_for_each_entry(leaf, &link_list, sibling) {
500 if (!list_empty(&leaf->children) || (leaf->root != root))
501 continue;
502 state = pcie_aspm_check_state(leaf, state);
503 }
504 /* Check root port link too in case it hasn't children */
505 state = pcie_aspm_check_state(root, state);
506 if (link->aspm_enabled == state)
507 return;
508 /*
509 * We must change the hierarchy. See comments in
510 * __pcie_aspm_config_link for the order
511 **/
512 if (state & PCIE_LINK_STATE_L1) {
513 list_for_each_entry(leaf, &link_list, sibling) {
514 if (leaf->root == root)
515 __pcie_aspm_config_link(leaf, state);
516 }
517 } else {
518 list_for_each_entry_reverse(leaf, &link_list, sibling) {
519 if (leaf->root == root)
520 __pcie_aspm_config_link(leaf, state);
521 }
522 } 487 }
523} 488}
524 489
525/*
526 * pcie_aspm_configure_link_state: enable/disable PCI express link state
527 * @pdev: the root port or switch downstream port
528 */
529static void pcie_aspm_configure_link_state(struct pcie_link_state *link,
530 u32 state)
531{
532 down_read(&pci_bus_sem);
533 mutex_lock(&aspm_lock);
534 __pcie_aspm_configure_link_state(link, state);
535 mutex_unlock(&aspm_lock);
536 up_read(&pci_bus_sem);
537}
538
539static void free_link_state(struct pcie_link_state *link) 490static void free_link_state(struct pcie_link_state *link)
540{ 491{
541 link->pdev->link_state = NULL; 492 link->pdev->link_state = NULL;
@@ -570,10 +521,9 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
570 return 0; 521 return 0;
571} 522}
572 523
573static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev) 524static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
574{ 525{
575 struct pcie_link_state *link; 526 struct pcie_link_state *link;
576 int blacklist = !!pcie_aspm_sanity_check(pdev);
577 527
578 link = kzalloc(sizeof(*link), GFP_KERNEL); 528 link = kzalloc(sizeof(*link), GFP_KERNEL);
579 if (!link) 529 if (!link)
@@ -599,15 +549,7 @@ static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
599 link->root = link->parent->root; 549 link->root = link->parent->root;
600 550
601 list_add(&link->sibling, &link_list); 551 list_add(&link->sibling, &link_list);
602
603 pdev->link_state = link; 552 pdev->link_state = link;
604
605 /* Check ASPM capability */
606 pcie_aspm_cap_init(link, blacklist);
607
608 /* Check Clock PM capability */
609 pcie_clkpm_cap_init(link, blacklist);
610
611 return link; 553 return link;
612} 554}
613 555
@@ -618,8 +560,8 @@ static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
618 */ 560 */
619void pcie_aspm_init_link_state(struct pci_dev *pdev) 561void pcie_aspm_init_link_state(struct pci_dev *pdev)
620{ 562{
621 u32 state;
622 struct pcie_link_state *link; 563 struct pcie_link_state *link;
564 int blacklist = !!pcie_aspm_sanity_check(pdev);
623 565
624 if (aspm_disabled || !pdev->is_pcie || pdev->link_state) 566 if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
625 return; 567 return;
@@ -637,47 +579,64 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
637 goto out; 579 goto out;
638 580
639 mutex_lock(&aspm_lock); 581 mutex_lock(&aspm_lock);
640 link = pcie_aspm_setup_link_state(pdev); 582 link = alloc_pcie_link_state(pdev);
641 if (!link) 583 if (!link)
642 goto unlock; 584 goto unlock;
643 /* 585 /*
644 * Setup initial ASPM state 586 * Setup initial ASPM state. Note that we need to configure
645 * 587 * upstream links also because capable state of them can be
646 * If link has switch, delay the link config. The leaf link 588 * update through pcie_aspm_cap_init().
647 * initialization will config the whole hierarchy. But we must
648 * make sure BIOS doesn't set unsupported link state.
649 */ 589 */
650 if (pcie_aspm_downstream_has_switch(link)) { 590 pcie_aspm_cap_init(link, blacklist);
651 state = pcie_aspm_check_state(link, link->aspm_default); 591 pcie_config_aspm_path(link);
652 __pcie_aspm_config_link(link, state);
653 } else {
654 state = policy_to_aspm_state(link);
655 __pcie_aspm_configure_link_state(link, state);
656 }
657 592
658 /* Setup initial Clock PM state */ 593 /* Setup initial Clock PM state */
659 state = (link->clkpm_capable) ? policy_to_clkpm_state(link) : 0; 594 pcie_clkpm_cap_init(link, blacklist);
660 pcie_set_clkpm(link, state); 595 pcie_set_clkpm(link, policy_to_clkpm_state(link));
661unlock: 596unlock:
662 mutex_unlock(&aspm_lock); 597 mutex_unlock(&aspm_lock);
663out: 598out:
664 up_read(&pci_bus_sem); 599 up_read(&pci_bus_sem);
665} 600}
666 601
602/* Recheck latencies and update aspm_capable for links under the root */
603static void pcie_update_aspm_capable(struct pcie_link_state *root)
604{
605 struct pcie_link_state *link;
606 BUG_ON(root->parent);
607 list_for_each_entry(link, &link_list, sibling) {
608 if (link->root != root)
609 continue;
610 link->aspm_capable = link->aspm_support;
611 }
612 list_for_each_entry(link, &link_list, sibling) {
613 struct pci_dev *child;
614 struct pci_bus *linkbus = link->pdev->subordinate;
615 if (link->root != root)
616 continue;
617 list_for_each_entry(child, &linkbus->devices, bus_list) {
618 if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT) &&
619 (child->pcie_type != PCI_EXP_TYPE_LEG_END))
620 continue;
621 pcie_aspm_check_latency(child);
622 }
623 }
624}
625
667/* @pdev: the endpoint device */ 626/* @pdev: the endpoint device */
668void pcie_aspm_exit_link_state(struct pci_dev *pdev) 627void pcie_aspm_exit_link_state(struct pci_dev *pdev)
669{ 628{
670 struct pci_dev *parent = pdev->bus->self; 629 struct pci_dev *parent = pdev->bus->self;
671 struct pcie_link_state *link_state = parent->link_state; 630 struct pcie_link_state *link, *root, *parent_link;
672 631
673 if (aspm_disabled || !pdev->is_pcie || !parent || !link_state) 632 if (aspm_disabled || !pdev->is_pcie || !parent || !parent->link_state)
674 return; 633 return;
675 if (parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT && 634 if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
676 parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) 635 (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
677 return; 636 return;
637
678 down_read(&pci_bus_sem); 638 down_read(&pci_bus_sem);
679 mutex_lock(&aspm_lock); 639 mutex_lock(&aspm_lock);
680
681 /* 640 /*
682 * All PCIe functions are in one slot, remove one function will remove 641 * All PCIe functions are in one slot, remove one function will remove
683 * the whole slot, so just wait until we are the last function left. 642 * the whole slot, so just wait until we are the last function left.
@@ -685,13 +644,20 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
685 if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices)) 644 if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices))
686 goto out; 645 goto out;
687 646
647 link = parent->link_state;
648 root = link->root;
649 parent_link = link->parent;
650
688 /* All functions are removed, so just disable ASPM for the link */ 651 /* All functions are removed, so just disable ASPM for the link */
689 __pcie_aspm_config_one_dev(parent, 0); 652 pcie_config_aspm_link(link, 0);
690 list_del(&link_state->sibling); 653 list_del(&link->sibling);
691 list_del(&link_state->link); 654 list_del(&link->link);
692 /* Clock PM is for endpoint device */ 655 /* Clock PM is for endpoint device */
656 free_link_state(link);
693 657
694 free_link_state(link_state); 658 /* Recheck latencies and configure upstream links */
659 pcie_update_aspm_capable(root);
660 pcie_config_aspm_path(parent_link);
695out: 661out:
696 mutex_unlock(&aspm_lock); 662 mutex_unlock(&aspm_lock);
697 up_read(&pci_bus_sem); 663 up_read(&pci_bus_sem);
@@ -700,18 +666,23 @@ out:
700/* @pdev: the root port or switch downstream port */ 666/* @pdev: the root port or switch downstream port */
701void pcie_aspm_pm_state_change(struct pci_dev *pdev) 667void pcie_aspm_pm_state_change(struct pci_dev *pdev)
702{ 668{
703 struct pcie_link_state *link_state = pdev->link_state; 669 struct pcie_link_state *link = pdev->link_state;
704 670
705 if (aspm_disabled || !pdev->is_pcie || !pdev->link_state) 671 if (aspm_disabled || !pdev->is_pcie || !link)
706 return; 672 return;
707 if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && 673 if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
708 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) 674 (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
709 return; 675 return;
710 /* 676 /*
711 * devices changed PM state, we should recheck if latency meets all 677 * Devices changed PM state, we should recheck if latency
712 * functions' requirement 678 * meets all functions' requirement
713 */ 679 */
714 pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled); 680 down_read(&pci_bus_sem);
681 mutex_lock(&aspm_lock);
682 pcie_update_aspm_capable(link->root);
683 pcie_config_aspm_path(link);
684 mutex_unlock(&aspm_lock);
685 up_read(&pci_bus_sem);
715} 686}
716 687
717/* 688/*
@@ -721,7 +692,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
721void pci_disable_link_state(struct pci_dev *pdev, int state) 692void pci_disable_link_state(struct pci_dev *pdev, int state)
722{ 693{
723 struct pci_dev *parent = pdev->bus->self; 694 struct pci_dev *parent = pdev->bus->self;
724 struct pcie_link_state *link_state; 695 struct pcie_link_state *link;
725 696
726 if (aspm_disabled || !pdev->is_pcie) 697 if (aspm_disabled || !pdev->is_pcie)
727 return; 698 return;
@@ -733,12 +704,16 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
733 704
734 down_read(&pci_bus_sem); 705 down_read(&pci_bus_sem);
735 mutex_lock(&aspm_lock); 706 mutex_lock(&aspm_lock);
736 link_state = parent->link_state; 707 link = parent->link_state;
737 link_state->aspm_support &= ~state; 708 if (state & PCIE_LINK_STATE_L0S)
738 __pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled); 709 link->aspm_disable |= ASPM_STATE_L0S;
710 if (state & PCIE_LINK_STATE_L1)
711 link->aspm_disable |= ASPM_STATE_L1;
712 pcie_config_aspm_link(link, policy_to_aspm_state(link));
713
739 if (state & PCIE_LINK_STATE_CLKPM) { 714 if (state & PCIE_LINK_STATE_CLKPM) {
740 link_state->clkpm_capable = 0; 715 link->clkpm_capable = 0;
741 pcie_set_clkpm(link_state, 0); 716 pcie_set_clkpm(link, 0);
742 } 717 }
743 mutex_unlock(&aspm_lock); 718 mutex_unlock(&aspm_lock);
744 up_read(&pci_bus_sem); 719 up_read(&pci_bus_sem);
@@ -748,7 +723,7 @@ EXPORT_SYMBOL(pci_disable_link_state);
748static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) 723static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
749{ 724{
750 int i; 725 int i;
751 struct pcie_link_state *link_state; 726 struct pcie_link_state *link;
752 727
753 for (i = 0; i < ARRAY_SIZE(policy_str); i++) 728 for (i = 0; i < ARRAY_SIZE(policy_str); i++)
754 if (!strncmp(val, policy_str[i], strlen(policy_str[i]))) 729 if (!strncmp(val, policy_str[i], strlen(policy_str[i])))
@@ -761,10 +736,9 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
761 down_read(&pci_bus_sem); 736 down_read(&pci_bus_sem);
762 mutex_lock(&aspm_lock); 737 mutex_lock(&aspm_lock);
763 aspm_policy = i; 738 aspm_policy = i;
764 list_for_each_entry(link_state, &link_list, sibling) { 739 list_for_each_entry(link, &link_list, sibling) {
765 __pcie_aspm_configure_link_state(link_state, 740 pcie_config_aspm_link(link, policy_to_aspm_state(link));
766 policy_to_aspm_state(link_state)); 741 pcie_set_clkpm(link, policy_to_clkpm_state(link));
767 pcie_set_clkpm(link_state, policy_to_clkpm_state(link_state));
768 } 742 }
769 mutex_unlock(&aspm_lock); 743 mutex_unlock(&aspm_lock);
770 up_read(&pci_bus_sem); 744 up_read(&pci_bus_sem);
@@ -802,18 +776,28 @@ static ssize_t link_state_store(struct device *dev,
802 size_t n) 776 size_t n)
803{ 777{
804 struct pci_dev *pdev = to_pci_dev(dev); 778 struct pci_dev *pdev = to_pci_dev(dev);
805 int state; 779 struct pcie_link_state *link, *root = pdev->link_state->root;
780 u32 val = buf[0] - '0', state = 0;
806 781
807 if (n < 1) 782 if (n < 1 || val > 3)
808 return -EINVAL; 783 return -EINVAL;
809 state = buf[0]-'0';
810 if (state >= 0 && state <= 3) {
811 /* setup link aspm state */
812 pcie_aspm_configure_link_state(pdev->link_state, state);
813 return n;
814 }
815 784
816 return -EINVAL; 785 /* Convert requested state to ASPM state */
786 if (val & PCIE_LINK_STATE_L0S)
787 state |= ASPM_STATE_L0S;
788 if (val & PCIE_LINK_STATE_L1)
789 state |= ASPM_STATE_L1;
790
791 down_read(&pci_bus_sem);
792 mutex_lock(&aspm_lock);
793 list_for_each_entry(link, &link_list, sibling) {
794 if (link->root != root)
795 continue;
796 pcie_config_aspm_link(link, state);
797 }
798 mutex_unlock(&aspm_lock);
799 up_read(&pci_bus_sem);
800 return n;
817} 801}
818 802
819static ssize_t clk_ctl_show(struct device *dev, 803static ssize_t clk_ctl_show(struct device *dev,
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 13ffdc35ea0e..52f84fca9f7d 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -187,14 +187,9 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
187 */ 187 */
188static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) 188static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
189{ 189{
190 struct pcie_port_data *port_data = pci_get_drvdata(dev);
191 int irq, interrupt_mode = PCIE_PORT_NO_IRQ; 190 int irq, interrupt_mode = PCIE_PORT_NO_IRQ;
192 int i; 191 int i;
193 192
194 /* Check MSI quirk */
195 if (port_data->port_type == PCIE_RC_PORT && pcie_mch_quirk)
196 goto Fallback;
197
198 /* Try to use MSI-X if supported */ 193 /* Try to use MSI-X if supported */
199 if (!pcie_port_enable_msix(dev, vectors, mask)) 194 if (!pcie_port_enable_msix(dev, vectors, mask))
200 return PCIE_PORT_MSIX_MODE; 195 return PCIE_PORT_MSIX_MODE;
@@ -203,7 +198,6 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
203 if (!pci_enable_msi(dev)) 198 if (!pci_enable_msi(dev))
204 interrupt_mode = PCIE_PORT_MSI_MODE; 199 interrupt_mode = PCIE_PORT_MSI_MODE;
205 200
206 Fallback:
207 if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin) 201 if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin)
208 interrupt_mode = PCIE_PORT_INTx_MODE; 202 interrupt_mode = PCIE_PORT_INTx_MODE;
209 203
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 091ce70051e0..6df5c984a791 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -205,6 +205,7 @@ static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
205 205
206 /* If fatal, restore cfg space for possible link reset at upstream */ 206 /* If fatal, restore cfg space for possible link reset at upstream */
207 if (dev->error_state == pci_channel_io_frozen) { 207 if (dev->error_state == pci_channel_io_frozen) {
208 dev->state_saved = true;
208 pci_restore_state(dev); 209 pci_restore_state(dev);
209 pcie_portdrv_restore_config(dev); 210 pcie_portdrv_restore_config(dev);
210 pci_enable_pcie_error_reporting(dev); 211 pci_enable_pcie_error_reporting(dev);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 40e75f6a5056..8105e32117f6 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -235,7 +235,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
235 res->start = l64; 235 res->start = l64;
236 res->end = l64 + sz64; 236 res->end = l64 + sz64;
237 dev_printk(KERN_DEBUG, &dev->dev, 237 dev_printk(KERN_DEBUG, &dev->dev,
238 "reg %x 64bit mmio: %pR\n", pos, res); 238 "reg %x %s: %pR\n", pos,
239 (res->flags & IORESOURCE_PREFETCH) ?
240 "64bit mmio pref" : "64bit mmio",
241 res);
239 } 242 }
240 243
241 res->flags |= IORESOURCE_MEM_64; 244 res->flags |= IORESOURCE_MEM_64;
@@ -249,7 +252,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
249 res->end = l + sz; 252 res->end = l + sz;
250 253
251 dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos, 254 dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos,
252 (res->flags & IORESOURCE_IO) ? "io port" : "32bit mmio", 255 (res->flags & IORESOURCE_IO) ? "io port" :
256 ((res->flags & IORESOURCE_PREFETCH) ?
257 "32bit mmio pref" : "32bit mmio"),
253 res); 258 res);
254 } 259 }
255 260
@@ -692,6 +697,23 @@ static void set_pcie_port_type(struct pci_dev *pdev)
692 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; 697 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
693} 698}
694 699
700static void set_pcie_hotplug_bridge(struct pci_dev *pdev)
701{
702 int pos;
703 u16 reg16;
704 u32 reg32;
705
706 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
707 if (!pos)
708 return;
709 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
710 if (!(reg16 & PCI_EXP_FLAGS_SLOT))
711 return;
712 pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &reg32);
713 if (reg32 & PCI_EXP_SLTCAP_HPC)
714 pdev->is_hotplug_bridge = 1;
715}
716
695#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 717#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
696 718
697/** 719/**
@@ -799,6 +821,7 @@ int pci_setup_device(struct pci_dev *dev)
799 pci_read_irq(dev); 821 pci_read_irq(dev);
800 dev->transparent = ((dev->class & 0xff) == 1); 822 dev->transparent = ((dev->class & 0xff) == 1);
801 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); 823 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
824 set_pcie_hotplug_bridge(dev);
802 break; 825 break;
803 826
804 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ 827 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
@@ -1009,6 +1032,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1009 /* Fix up broken headers */ 1032 /* Fix up broken headers */
1010 pci_fixup_device(pci_fixup_header, dev); 1033 pci_fixup_device(pci_fixup_header, dev);
1011 1034
1035 /* Clear the state_saved flag. */
1036 dev->state_saved = false;
1037
1012 /* Initialize various capabilities */ 1038 /* Initialize various capabilities */
1013 pci_init_capabilities(dev); 1039 pci_init_capabilities(dev);
1014 1040
@@ -1061,8 +1087,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
1061 if (dev && !dev->is_added) /* new device? */ 1087 if (dev && !dev->is_added) /* new device? */
1062 nr++; 1088 nr++;
1063 1089
1064 if ((dev && dev->multifunction) || 1090 if (dev && dev->multifunction) {
1065 (!dev && pcibios_scan_all_fns(bus, devfn))) {
1066 for (fn = 1; fn < 8; fn++) { 1091 for (fn = 1; fn < 8; fn++) {
1067 dev = pci_scan_single_device(bus, devfn + fn); 1092 dev = pci_scan_single_device(bus, devfn + fn);
1068 if (dev) { 1093 if (dev) {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 06b965623962..6099facecd79 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -31,8 +31,6 @@ int isa_dma_bridge_buggy;
31EXPORT_SYMBOL(isa_dma_bridge_buggy); 31EXPORT_SYMBOL(isa_dma_bridge_buggy);
32int pci_pci_problems; 32int pci_pci_problems;
33EXPORT_SYMBOL(pci_pci_problems); 33EXPORT_SYMBOL(pci_pci_problems);
34int pcie_mch_quirk;
35EXPORT_SYMBOL(pcie_mch_quirk);
36 34
37#ifdef CONFIG_PCI_QUIRKS 35#ifdef CONFIG_PCI_QUIRKS
38/* 36/*
@@ -992,7 +990,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX,
992 990
993static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev) 991static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev)
994{ 992{
995 /* set sb600/sb700/sb800 sata to ahci mode */ 993 /* set SBX00 SATA in IDE mode to AHCI mode */
996 u8 tmp; 994 u8 tmp;
997 995
998 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp); 996 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
@@ -1011,6 +1009,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk
1011DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); 1009DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1012DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); 1010DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1013DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); 1011DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1012DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode);
1013DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode);
1014 1014
1015/* 1015/*
1016 * Serverworks CSB5 IDE does not fully support native mode 1016 * Serverworks CSB5 IDE does not fully support native mode
@@ -1201,6 +1201,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
1201 switch(dev->subsystem_device) { 1201 switch(dev->subsystem_device) {
1202 case 0x00b8: /* Compaq Evo D510 CMT */ 1202 case 0x00b8: /* Compaq Evo D510 CMT */
1203 case 0x00b9: /* Compaq Evo D510 SFF */ 1203 case 0x00b9: /* Compaq Evo D510 SFF */
1204 case 0x00ba: /* Compaq Evo D510 USDT */
1204 /* Motherboard doesn't have Host bridge 1205 /* Motherboard doesn't have Host bridge
1205 * subvendor/subdevice IDs and on-board VGA 1206 * subvendor/subdevice IDs and on-board VGA
1206 * controller is disabled if an AGP card is 1207 * controller is disabled if an AGP card is
@@ -1499,7 +1500,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_a
1499 1500
1500static void __devinit quirk_pcie_mch(struct pci_dev *pdev) 1501static void __devinit quirk_pcie_mch(struct pci_dev *pdev)
1501{ 1502{
1502 pcie_mch_quirk = 1; 1503 pci_msi_off(pdev);
1504 pdev->no_msi = 1;
1503} 1505}
1504DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch); 1506DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
1505DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch); 1507DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
@@ -1567,10 +1569,8 @@ static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
1567 return; 1569 return;
1568 1570
1569 dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT; 1571 dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
1570 1572 dev_info(&dev->dev, "rerouting interrupts for [%04x:%04x]\n",
1571 printk(KERN_INFO "PCI quirk: reroute interrupts for 0x%04x:0x%04x\n", 1573 dev->vendor, dev->device);
1572 dev->vendor, dev->device);
1573 return;
1574} 1574}
1575DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); 1575DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
1576DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); 1576DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
@@ -1612,8 +1612,8 @@ static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
1612 pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; 1612 pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
1613 pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word); 1613 pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
1614 1614
1615 printk(KERN_INFO "disabled boot interrupt on device 0x%04x:0x%04x\n", 1615 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
1616 dev->vendor, dev->device); 1616 dev->vendor, dev->device);
1617} 1617}
1618DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); 1618DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
1619DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); 1619DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
@@ -1645,8 +1645,8 @@ static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
1645 1645
1646 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword); 1646 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
1647 1647
1648 printk(KERN_INFO "disabled boot interrupts on PCI device" 1648 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
1649 "0x%04x:0x%04x\n", dev->vendor, dev->device); 1649 dev->vendor, dev->device);
1650} 1650}
1651DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); 1651DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
1652DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); 1652DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
@@ -1676,8 +1676,8 @@ static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
1676 pci_config_dword &= ~AMD_813X_NOIOAMODE; 1676 pci_config_dword &= ~AMD_813X_NOIOAMODE;
1677 pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword); 1677 pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
1678 1678
1679 printk(KERN_INFO "disabled boot interrupts on PCI device " 1679 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
1680 "0x%04x:0x%04x\n", dev->vendor, dev->device); 1680 dev->vendor, dev->device);
1681} 1681}
1682DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); 1682DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
1683DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); 1683DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
@@ -1693,14 +1693,13 @@ static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
1693 1693
1694 pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word); 1694 pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
1695 if (!pci_config_word) { 1695 if (!pci_config_word) {
1696 printk(KERN_INFO "boot interrupts on PCI device 0x%04x:0x%04x " 1696 dev_info(&dev->dev, "boot interrupts on device [%04x:%04x] "
1697 "already disabled\n", 1697 "already disabled\n", dev->vendor, dev->device);
1698 dev->vendor, dev->device);
1699 return; 1698 return;
1700 } 1699 }
1701 pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0); 1700 pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
1702 printk(KERN_INFO "disabled boot interrupts on PCI device " 1701 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
1703 "0x%04x:0x%04x\n", dev->vendor, dev->device); 1702 dev->vendor, dev->device);
1704} 1703}
1705DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); 1704DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
1706DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); 1705DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
@@ -2382,8 +2381,10 @@ static void __devinit nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
2382} 2381}
2383 2382
2384DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf); 2383DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2384DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2385 2385
2386DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all); 2386DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2387DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2387 2388
2388static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev) 2389static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev)
2389{ 2390{
@@ -2492,6 +2493,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov);
2492DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov); 2493DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
2493DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov); 2494DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
2494DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov); 2495DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
2496DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov);
2495 2497
2496#endif /* CONFIG_PCI_IOV */ 2498#endif /* CONFIG_PCI_IOV */
2497 2499
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index e8cb5051c311..ec415352d9ba 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -113,37 +113,6 @@ pci_find_next_bus(const struct pci_bus *from)
113 return b; 113 return b;
114} 114}
115 115
116#ifdef CONFIG_PCI_LEGACY
117/**
118 * pci_find_device - begin or continue searching for a PCI device by vendor/device id
119 * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
120 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
121 * @from: Previous PCI device found in search, or %NULL for new search.
122 *
123 * Iterates through the list of known PCI devices. If a PCI device is found
124 * with a matching @vendor and @device, a pointer to its device structure is
125 * returned. Otherwise, %NULL is returned.
126 * A new search is initiated by passing %NULL as the @from argument.
127 * Otherwise if @from is not %NULL, searches continue from next device
128 * on the global list.
129 *
130 * NOTE: Do not use this function any more; use pci_get_device() instead, as
131 * the PCI device returned by this function can disappear at any moment in
132 * time.
133 */
134struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device,
135 struct pci_dev *from)
136{
137 struct pci_dev *pdev;
138
139 pci_dev_get(from);
140 pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
141 pci_dev_put(pdev);
142 return pdev;
143}
144EXPORT_SYMBOL(pci_find_device);
145#endif /* CONFIG_PCI_LEGACY */
146
147/** 116/**
148 * pci_get_slot - locate PCI device for a given PCI slot 117 * pci_get_slot - locate PCI device for a given PCI slot
149 * @bus: PCI bus on which desired PCI device resides 118 * @bus: PCI bus on which desired PCI device resides
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 7c443b4583ab..cb1a027eb552 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -309,7 +309,7 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned lon
309 since these windows have 4K granularity and the IO ranges 309 since these windows have 4K granularity and the IO ranges
310 of non-bridge PCI devices are limited to 256 bytes. 310 of non-bridge PCI devices are limited to 256 bytes.
311 We must be careful with the ISA aliasing though. */ 311 We must be careful with the ISA aliasing though. */
312static void pbus_size_io(struct pci_bus *bus) 312static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size)
313{ 313{
314 struct pci_dev *dev; 314 struct pci_dev *dev;
315 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); 315 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
@@ -336,6 +336,8 @@ static void pbus_size_io(struct pci_bus *bus)
336 size1 += r_size; 336 size1 += r_size;
337 } 337 }
338 } 338 }
339 if (size < min_size)
340 size = min_size;
339/* To be fixed in 2.5: we should have sort of HAVE_ISA 341/* To be fixed in 2.5: we should have sort of HAVE_ISA
340 flag in the struct pci_bus. */ 342 flag in the struct pci_bus. */
341#if defined(CONFIG_ISA) || defined(CONFIG_EISA) 343#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
@@ -354,7 +356,8 @@ static void pbus_size_io(struct pci_bus *bus)
354 356
355/* Calculate the size of the bus and minimal alignment which 357/* Calculate the size of the bus and minimal alignment which
356 guarantees that all child resources fit in this size. */ 358 guarantees that all child resources fit in this size. */
357static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long type) 359static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
360 unsigned long type, resource_size_t min_size)
358{ 361{
359 struct pci_dev *dev; 362 struct pci_dev *dev;
360 resource_size_t min_align, align, size; 363 resource_size_t min_align, align, size;
@@ -404,6 +407,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
404 mem64_mask &= r->flags & IORESOURCE_MEM_64; 407 mem64_mask &= r->flags & IORESOURCE_MEM_64;
405 } 408 }
406 } 409 }
410 if (size < min_size)
411 size = min_size;
407 412
408 align = 0; 413 align = 0;
409 min_align = 0; 414 min_align = 0;
@@ -483,6 +488,7 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
483{ 488{
484 struct pci_dev *dev; 489 struct pci_dev *dev;
485 unsigned long mask, prefmask; 490 unsigned long mask, prefmask;
491 resource_size_t min_mem_size = 0, min_io_size = 0;
486 492
487 list_for_each_entry(dev, &bus->devices, bus_list) { 493 list_for_each_entry(dev, &bus->devices, bus_list) {
488 struct pci_bus *b = dev->subordinate; 494 struct pci_bus *b = dev->subordinate;
@@ -512,8 +518,12 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
512 518
513 case PCI_CLASS_BRIDGE_PCI: 519 case PCI_CLASS_BRIDGE_PCI:
514 pci_bridge_check_ranges(bus); 520 pci_bridge_check_ranges(bus);
521 if (bus->self->is_hotplug_bridge) {
522 min_io_size = pci_hotplug_io_size;
523 min_mem_size = pci_hotplug_mem_size;
524 }
515 default: 525 default:
516 pbus_size_io(bus); 526 pbus_size_io(bus, min_io_size);
517 /* If the bridge supports prefetchable range, size it 527 /* If the bridge supports prefetchable range, size it
518 separately. If it doesn't, or its prefetchable window 528 separately. If it doesn't, or its prefetchable window
519 has already been allocated by arch code, try 529 has already been allocated by arch code, try
@@ -521,9 +531,11 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
521 resources. */ 531 resources. */
522 mask = IORESOURCE_MEM; 532 mask = IORESOURCE_MEM;
523 prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; 533 prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
524 if (pbus_size_mem(bus, prefmask, prefmask)) 534 if (pbus_size_mem(bus, prefmask, prefmask, min_mem_size))
525 mask = prefmask; /* Success, size non-prefetch only. */ 535 mask = prefmask; /* Success, size non-prefetch only. */
526 pbus_size_mem(bus, mask, IORESOURCE_MEM); 536 else
537 min_mem_size += min_mem_size;
538 pbus_size_mem(bus, mask, IORESOURCE_MEM, min_mem_size);
527 break; 539 break;
528 } 540 }
529} 541}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 88cdd1a937d6..706f82d8111f 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -119,6 +119,7 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
119 119
120 return err; 120 return err;
121} 121}
122EXPORT_SYMBOL(pci_claim_resource);
122 123
123#ifdef CONFIG_PCI_QUIRKS 124#ifdef CONFIG_PCI_QUIRKS
124void pci_disable_bridge_window(struct pci_dev *dev) 125void pci_disable_bridge_window(struct pci_dev *dev)