aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/dmar.c73
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c7
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c15
-rw-r--r--drivers/pci/intr_remapping.c21
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c48
-rw-r--r--drivers/pci/pcie/portdrv_pci.c2
-rw-r--r--drivers/pci/quirks.c122
8 files changed, 229 insertions, 61 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 519f5f91e765..5f333403c2ea 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -332,6 +332,14 @@ parse_dmar_table(void)
332 entry_header = (struct acpi_dmar_header *)(dmar + 1); 332 entry_header = (struct acpi_dmar_header *)(dmar + 1);
333 while (((unsigned long)entry_header) < 333 while (((unsigned long)entry_header) <
334 (((unsigned long)dmar) + dmar_tbl->length)) { 334 (((unsigned long)dmar) + dmar_tbl->length)) {
335 /* Avoid looping forever on bad ACPI tables */
336 if (entry_header->length == 0) {
337 printk(KERN_WARNING PREFIX
338 "Invalid 0-length structure\n");
339 ret = -EINVAL;
340 break;
341 }
342
335 dmar_table_print_dmar_entry(entry_header); 343 dmar_table_print_dmar_entry(entry_header);
336 344
337 switch (entry_header->type) { 345 switch (entry_header->type) {
@@ -494,7 +502,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
494 int map_size; 502 int map_size;
495 u32 ver; 503 u32 ver;
496 static int iommu_allocated = 0; 504 static int iommu_allocated = 0;
497 int agaw; 505 int agaw = 0;
498 506
499 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); 507 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
500 if (!iommu) 508 if (!iommu)
@@ -510,6 +518,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
510 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); 518 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
511 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); 519 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
512 520
521#ifdef CONFIG_DMAR
513 agaw = iommu_calculate_agaw(iommu); 522 agaw = iommu_calculate_agaw(iommu);
514 if (agaw < 0) { 523 if (agaw < 0) {
515 printk(KERN_ERR 524 printk(KERN_ERR
@@ -517,6 +526,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
517 iommu->seq_id); 526 iommu->seq_id);
518 goto error; 527 goto error;
519 } 528 }
529#endif
520 iommu->agaw = agaw; 530 iommu->agaw = agaw;
521 531
522 /* the registers might be more than one page */ 532 /* the registers might be more than one page */
@@ -574,19 +584,49 @@ static inline void reclaim_free_desc(struct q_inval *qi)
574 } 584 }
575} 585}
576 586
587static int qi_check_fault(struct intel_iommu *iommu, int index)
588{
589 u32 fault;
590 int head;
591 struct q_inval *qi = iommu->qi;
592 int wait_index = (index + 1) % QI_LENGTH;
593
594 fault = readl(iommu->reg + DMAR_FSTS_REG);
595
596 /*
597 * If IQE happens, the head points to the descriptor associated
598 * with the error. No new descriptors are fetched until the IQE
599 * is cleared.
600 */
601 if (fault & DMA_FSTS_IQE) {
602 head = readl(iommu->reg + DMAR_IQH_REG);
603 if ((head >> 4) == index) {
604 memcpy(&qi->desc[index], &qi->desc[wait_index],
605 sizeof(struct qi_desc));
606 __iommu_flush_cache(iommu, &qi->desc[index],
607 sizeof(struct qi_desc));
608 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
609 return -EINVAL;
610 }
611 }
612
613 return 0;
614}
615
577/* 616/*
578 * Submit the queued invalidation descriptor to the remapping 617 * Submit the queued invalidation descriptor to the remapping
579 * hardware unit and wait for its completion. 618 * hardware unit and wait for its completion.
580 */ 619 */
581void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) 620int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
582{ 621{
622 int rc = 0;
583 struct q_inval *qi = iommu->qi; 623 struct q_inval *qi = iommu->qi;
584 struct qi_desc *hw, wait_desc; 624 struct qi_desc *hw, wait_desc;
585 int wait_index, index; 625 int wait_index, index;
586 unsigned long flags; 626 unsigned long flags;
587 627
588 if (!qi) 628 if (!qi)
589 return; 629 return 0;
590 630
591 hw = qi->desc; 631 hw = qi->desc;
592 632
@@ -604,7 +644,8 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
604 644
605 hw[index] = *desc; 645 hw[index] = *desc;
606 646
607 wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; 647 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
648 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
608 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); 649 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
609 650
610 hw[wait_index] = wait_desc; 651 hw[wait_index] = wait_desc;
@@ -615,13 +656,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
615 qi->free_head = (qi->free_head + 2) % QI_LENGTH; 656 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
616 qi->free_cnt -= 2; 657 qi->free_cnt -= 2;
617 658
618 spin_lock(&iommu->register_lock);
619 /* 659 /*
620 * update the HW tail register indicating the presence of 660 * update the HW tail register indicating the presence of
621 * new descriptors. 661 * new descriptors.
622 */ 662 */
623 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); 663 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
624 spin_unlock(&iommu->register_lock);
625 664
626 while (qi->desc_status[wait_index] != QI_DONE) { 665 while (qi->desc_status[wait_index] != QI_DONE) {
627 /* 666 /*
@@ -631,15 +670,21 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
631 * a deadlock where the interrupt context can wait indefinitely 670 * a deadlock where the interrupt context can wait indefinitely
632 * for free slots in the queue. 671 * for free slots in the queue.
633 */ 672 */
673 rc = qi_check_fault(iommu, index);
674 if (rc)
675 goto out;
676
634 spin_unlock(&qi->q_lock); 677 spin_unlock(&qi->q_lock);
635 cpu_relax(); 678 cpu_relax();
636 spin_lock(&qi->q_lock); 679 spin_lock(&qi->q_lock);
637 } 680 }
638 681out:
639 qi->desc_status[index] = QI_DONE; 682 qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
640 683
641 reclaim_free_desc(qi); 684 reclaim_free_desc(qi);
642 spin_unlock_irqrestore(&qi->q_lock, flags); 685 spin_unlock_irqrestore(&qi->q_lock, flags);
686
687 return rc;
643} 688}
644 689
645/* 690/*
@@ -652,13 +697,13 @@ void qi_global_iec(struct intel_iommu *iommu)
652 desc.low = QI_IEC_TYPE; 697 desc.low = QI_IEC_TYPE;
653 desc.high = 0; 698 desc.high = 0;
654 699
700 /* should never fail */
655 qi_submit_sync(&desc, iommu); 701 qi_submit_sync(&desc, iommu);
656} 702}
657 703
658int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, 704int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
659 u64 type, int non_present_entry_flush) 705 u64 type, int non_present_entry_flush)
660{ 706{
661
662 struct qi_desc desc; 707 struct qi_desc desc;
663 708
664 if (non_present_entry_flush) { 709 if (non_present_entry_flush) {
@@ -672,10 +717,7 @@ int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
672 | QI_CC_GRAN(type) | QI_CC_TYPE; 717 | QI_CC_GRAN(type) | QI_CC_TYPE;
673 desc.high = 0; 718 desc.high = 0;
674 719
675 qi_submit_sync(&desc, iommu); 720 return qi_submit_sync(&desc, iommu);
676
677 return 0;
678
679} 721}
680 722
681int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, 723int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
@@ -705,10 +747,7 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
705 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) 747 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
706 | QI_IOTLB_AM(size_order); 748 | QI_IOTLB_AM(size_order);
707 749
708 qi_submit_sync(&desc, iommu); 750 return qi_submit_sync(&desc, iommu);
709
710 return 0;
711
712} 751}
713 752
714/* 753/*
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index db85284ffb62..39ae37589fda 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -111,6 +111,7 @@ struct controller {
111 int cmd_busy; 111 int cmd_busy;
112 unsigned int no_cmd_complete:1; 112 unsigned int no_cmd_complete:1;
113 unsigned int link_active_reporting:1; 113 unsigned int link_active_reporting:1;
114 unsigned int notification_enabled:1;
114}; 115};
115 116
116#define INT_BUTTON_IGNORE 0 117#define INT_BUTTON_IGNORE 0
@@ -170,6 +171,7 @@ extern int pciehp_configure_device(struct slot *p_slot);
170extern int pciehp_unconfigure_device(struct slot *p_slot); 171extern int pciehp_unconfigure_device(struct slot *p_slot);
171extern void pciehp_queue_pushbutton_work(struct work_struct *work); 172extern void pciehp_queue_pushbutton_work(struct work_struct *work);
172struct controller *pcie_init(struct pcie_device *dev); 173struct controller *pcie_init(struct pcie_device *dev);
174int pcie_init_notification(struct controller *ctrl);
173int pciehp_enable_slot(struct slot *p_slot); 175int pciehp_enable_slot(struct slot *p_slot);
174int pciehp_disable_slot(struct slot *p_slot); 176int pciehp_disable_slot(struct slot *p_slot);
175int pcie_enable_notification(struct controller *ctrl); 177int pcie_enable_notification(struct controller *ctrl);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index c2485542f543..681e3912b821 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -434,6 +434,13 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
434 goto err_out_release_ctlr; 434 goto err_out_release_ctlr;
435 } 435 }
436 436
437 /* Enable events after we have setup the data structures */
438 rc = pcie_init_notification(ctrl);
439 if (rc) {
440 ctrl_err(ctrl, "Notification initialization failed\n");
441 goto err_out_release_ctlr;
442 }
443
437 /* Check if slot is occupied */ 444 /* Check if slot is occupied */
438 t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); 445 t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
439 t_slot->hpc_ops->get_adapter_status(t_slot, &value); 446 t_slot->hpc_ops->get_adapter_status(t_slot, &value);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 71a8012886b0..7a16c6897bb9 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -934,7 +934,7 @@ static void pcie_disable_notification(struct controller *ctrl)
934 ctrl_warn(ctrl, "Cannot disable software notification\n"); 934 ctrl_warn(ctrl, "Cannot disable software notification\n");
935} 935}
936 936
937static int pcie_init_notification(struct controller *ctrl) 937int pcie_init_notification(struct controller *ctrl)
938{ 938{
939 if (pciehp_request_irq(ctrl)) 939 if (pciehp_request_irq(ctrl))
940 return -1; 940 return -1;
@@ -942,13 +942,17 @@ static int pcie_init_notification(struct controller *ctrl)
942 pciehp_free_irq(ctrl); 942 pciehp_free_irq(ctrl);
943 return -1; 943 return -1;
944 } 944 }
945 ctrl->notification_enabled = 1;
945 return 0; 946 return 0;
946} 947}
947 948
948static void pcie_shutdown_notification(struct controller *ctrl) 949static void pcie_shutdown_notification(struct controller *ctrl)
949{ 950{
950 pcie_disable_notification(ctrl); 951 if (ctrl->notification_enabled) {
951 pciehp_free_irq(ctrl); 952 pcie_disable_notification(ctrl);
953 pciehp_free_irq(ctrl);
954 ctrl->notification_enabled = 0;
955 }
952} 956}
953 957
954static int pcie_init_slot(struct controller *ctrl) 958static int pcie_init_slot(struct controller *ctrl)
@@ -1110,13 +1114,8 @@ struct controller *pcie_init(struct pcie_device *dev)
1110 if (pcie_init_slot(ctrl)) 1114 if (pcie_init_slot(ctrl))
1111 goto abort_ctrl; 1115 goto abort_ctrl;
1112 1116
1113 if (pcie_init_notification(ctrl))
1114 goto abort_slot;
1115
1116 return ctrl; 1117 return ctrl;
1117 1118
1118abort_slot:
1119 pcie_cleanup_slot(ctrl);
1120abort_ctrl: 1119abort_ctrl:
1121 kfree(ctrl); 1120 kfree(ctrl);
1122abort: 1121abort:
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 5a57753ea9fc..8e44db040db7 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -208,7 +208,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
208 return index; 208 return index;
209} 209}
210 210
211static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) 211static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
212{ 212{
213 struct qi_desc desc; 213 struct qi_desc desc;
214 214
@@ -216,7 +216,7 @@ static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
216 | QI_IEC_SELECTIVE; 216 | QI_IEC_SELECTIVE;
217 desc.high = 0; 217 desc.high = 0;
218 218
219 qi_submit_sync(&desc, iommu); 219 return qi_submit_sync(&desc, iommu);
220} 220}
221 221
222int map_irq_to_irte_handle(int irq, u16 *sub_handle) 222int map_irq_to_irte_handle(int irq, u16 *sub_handle)
@@ -284,6 +284,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
284 284
285int modify_irte(int irq, struct irte *irte_modified) 285int modify_irte(int irq, struct irte *irte_modified)
286{ 286{
287 int rc;
287 int index; 288 int index;
288 struct irte *irte; 289 struct irte *irte;
289 struct intel_iommu *iommu; 290 struct intel_iommu *iommu;
@@ -304,14 +305,15 @@ int modify_irte(int irq, struct irte *irte_modified)
304 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); 305 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
305 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 306 __iommu_flush_cache(iommu, irte, sizeof(*irte));
306 307
307 qi_flush_iec(iommu, index, 0); 308 rc = qi_flush_iec(iommu, index, 0);
308
309 spin_unlock(&irq_2_ir_lock); 309 spin_unlock(&irq_2_ir_lock);
310 return 0; 310
311 return rc;
311} 312}
312 313
313int flush_irte(int irq) 314int flush_irte(int irq)
314{ 315{
316 int rc;
315 int index; 317 int index;
316 struct intel_iommu *iommu; 318 struct intel_iommu *iommu;
317 struct irq_2_iommu *irq_iommu; 319 struct irq_2_iommu *irq_iommu;
@@ -327,10 +329,10 @@ int flush_irte(int irq)
327 329
328 index = irq_iommu->irte_index + irq_iommu->sub_handle; 330 index = irq_iommu->irte_index + irq_iommu->sub_handle;
329 331
330 qi_flush_iec(iommu, index, irq_iommu->irte_mask); 332 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
331 spin_unlock(&irq_2_ir_lock); 333 spin_unlock(&irq_2_ir_lock);
332 334
333 return 0; 335 return rc;
334} 336}
335 337
336struct intel_iommu *map_ioapic_to_ir(int apic) 338struct intel_iommu *map_ioapic_to_ir(int apic)
@@ -356,6 +358,7 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
356 358
357int free_irte(int irq) 359int free_irte(int irq)
358{ 360{
361 int rc = 0;
359 int index, i; 362 int index, i;
360 struct irte *irte; 363 struct irte *irte;
361 struct intel_iommu *iommu; 364 struct intel_iommu *iommu;
@@ -376,7 +379,7 @@ int free_irte(int irq)
376 if (!irq_iommu->sub_handle) { 379 if (!irq_iommu->sub_handle) {
377 for (i = 0; i < (1 << irq_iommu->irte_mask); i++) 380 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
378 set_64bit((unsigned long *)irte, 0); 381 set_64bit((unsigned long *)irte, 0);
379 qi_flush_iec(iommu, index, irq_iommu->irte_mask); 382 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
380 } 383 }
381 384
382 irq_iommu->iommu = NULL; 385 irq_iommu->iommu = NULL;
@@ -386,7 +389,7 @@ int free_irte(int irq)
386 389
387 spin_unlock(&irq_2_ir_lock); 390 spin_unlock(&irq_2_ir_lock);
388 391
389 return 0; 392 return rc;
390} 393}
391 394
392static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) 395static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index aac7006949f1..d0c973685868 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -108,6 +108,34 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
108} 108}
109#endif /* 0 */ 109#endif /* 0 */
110 110
111
112static void set_device_error_reporting(struct pci_dev *dev, void *data)
113{
114 bool enable = *((bool *)data);
115
116 if (dev->pcie_type != PCIE_RC_PORT &&
117 dev->pcie_type != PCIE_SW_UPSTREAM_PORT &&
118 dev->pcie_type != PCIE_SW_DOWNSTREAM_PORT)
119 return;
120
121 if (enable)
122 pci_enable_pcie_error_reporting(dev);
123 else
124 pci_disable_pcie_error_reporting(dev);
125}
126
127/**
128 * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports.
129 * @dev: pointer to root port's pci_dev data structure
130 * @enable: true = enable error reporting, false = disable error reporting.
131 */
132static void set_downstream_devices_error_reporting(struct pci_dev *dev,
133 bool enable)
134{
135 set_device_error_reporting(dev, &enable);
136 pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
137}
138
111static int find_device_iter(struct device *device, void *data) 139static int find_device_iter(struct device *device, void *data)
112{ 140{
113 struct pci_dev *dev; 141 struct pci_dev *dev;
@@ -525,15 +553,11 @@ void aer_enable_rootport(struct aer_rpc *rpc)
525 pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32); 553 pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
526 pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32); 554 pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
527 555
528 /* Enable Root Port device reporting error itself */ 556 /*
529 pci_read_config_word(pdev, pos+PCI_EXP_DEVCTL, &reg16); 557 * Enable error reporting for the root port device and downstream port
530 reg16 = reg16 | 558 * devices.
531 PCI_EXP_DEVCTL_CERE | 559 */
532 PCI_EXP_DEVCTL_NFERE | 560 set_downstream_devices_error_reporting(pdev, true);
533 PCI_EXP_DEVCTL_FERE |
534 PCI_EXP_DEVCTL_URRE;
535 pci_write_config_word(pdev, pos+PCI_EXP_DEVCTL,
536 reg16);
537 561
538 /* Enable Root Port's interrupt in response to error messages */ 562 /* Enable Root Port's interrupt in response to error messages */
539 pci_write_config_dword(pdev, 563 pci_write_config_dword(pdev,
@@ -553,6 +577,12 @@ static void disable_root_aer(struct aer_rpc *rpc)
553 u32 reg32; 577 u32 reg32;
554 int pos; 578 int pos;
555 579
580 /*
581 * Disable error reporting for the root port device and downstream port
582 * devices.
583 */
584 set_downstream_devices_error_reporting(pdev, false);
585
556 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 586 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
557 /* Disable Root's interrupt in response to error messages */ 587 /* Disable Root's interrupt in response to error messages */
558 pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0); 588 pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index f9b874eaeb9f..248b4db91552 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -97,8 +97,6 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
97 97
98 pcie_portdrv_save_config(dev); 98 pcie_portdrv_save_config(dev);
99 99
100 pci_enable_pcie_error_reporting(dev);
101
102 return 0; 100 return 0;
103} 101}
104 102
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index baad093aafe3..f20d55368edb 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1584,6 +1584,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_
1584 */ 1584 */
1585#define AMD_813X_MISC 0x40 1585#define AMD_813X_MISC 0x40
1586#define AMD_813X_NOIOAMODE (1<<0) 1586#define AMD_813X_NOIOAMODE (1<<0)
1587#define AMD_813X_REV_B2 0x13
1587 1588
1588static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) 1589static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
1589{ 1590{
@@ -1591,6 +1592,8 @@ static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
1591 1592
1592 if (noioapicquirk) 1593 if (noioapicquirk)
1593 return; 1594 return;
1595 if (dev->revision == AMD_813X_REV_B2)
1596 return;
1594 1597
1595 pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword); 1598 pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
1596 pci_config_dword &= ~AMD_813X_NOIOAMODE; 1599 pci_config_dword &= ~AMD_813X_NOIOAMODE;
@@ -1981,7 +1984,6 @@ static void __devinit quirk_msi_ht_cap(struct pci_dev *dev)
1981DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE, 1984DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
1982 quirk_msi_ht_cap); 1985 quirk_msi_ht_cap);
1983 1986
1984
1985/* The nVidia CK804 chipset may have 2 HT MSI mappings. 1987/* The nVidia CK804 chipset may have 2 HT MSI mappings.
1986 * MSI are supported if the MSI capability set in any of these mappings. 1988 * MSI are supported if the MSI capability set in any of these mappings.
1987 */ 1989 */
@@ -2032,6 +2034,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
2032 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, 2034 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
2033 ht_enable_msi_mapping); 2035 ht_enable_msi_mapping);
2034 2036
2037DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
2038 ht_enable_msi_mapping);
2039
2035/* The P5N32-SLI Premium motherboard from Asus has a problem with msi 2040/* The P5N32-SLI Premium motherboard from Asus has a problem with msi
2036 * for the MCP55 NIC. It is not yet determined whether the msi problem 2041 * for the MCP55 NIC. It is not yet determined whether the msi problem
2037 * also affects other devices. As for now, turn off msi for this device. 2042 * also affects other devices. As for now, turn off msi for this device.
@@ -2048,10 +2053,100 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2048 PCI_DEVICE_ID_NVIDIA_NVENET_15, 2053 PCI_DEVICE_ID_NVIDIA_NVENET_15,
2049 nvenet_msi_disable); 2054 nvenet_msi_disable);
2050 2055
2051static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) 2056static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev)
2052{ 2057{
2053 struct pci_dev *host_bridge; 2058 struct pci_dev *host_bridge;
2059 int pos;
2060 int i, dev_no;
2061 int found = 0;
2062
2063 dev_no = dev->devfn >> 3;
2064 for (i = dev_no; i >= 0; i--) {
2065 host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0));
2066 if (!host_bridge)
2067 continue;
2068
2069 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2070 if (pos != 0) {
2071 found = 1;
2072 break;
2073 }
2074 pci_dev_put(host_bridge);
2075 }
2076
2077 if (!found)
2078 return;
2079
2080 /* root did that ! */
2081 if (msi_ht_cap_enabled(host_bridge))
2082 goto out;
2083
2084 ht_enable_msi_mapping(dev);
2085
2086out:
2087 pci_dev_put(host_bridge);
2088}
2089
2090static void __devinit ht_disable_msi_mapping(struct pci_dev *dev)
2091{
2092 int pos, ttl = 48;
2093
2094 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2095 while (pos && ttl--) {
2096 u8 flags;
2097
2098 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2099 &flags) == 0) {
2100 dev_info(&dev->dev, "Enabling HT MSI Mapping\n");
2101
2102 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2103 flags & ~HT_MSI_FLAGS_ENABLE);
2104 }
2105 pos = pci_find_next_ht_capability(dev, pos,
2106 HT_CAPTYPE_MSI_MAPPING);
2107 }
2108}
2109
2110static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
2111{
2054 int pos, ttl = 48; 2112 int pos, ttl = 48;
2113 int found = 0;
2114
2115 /* check if there is HT MSI cap or enabled on this device */
2116 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2117 while (pos && ttl--) {
2118 u8 flags;
2119
2120 if (found < 1)
2121 found = 1;
2122 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2123 &flags) == 0) {
2124 if (flags & HT_MSI_FLAGS_ENABLE) {
2125 if (found < 2) {
2126 found = 2;
2127 break;
2128 }
2129 }
2130 }
2131 pos = pci_find_next_ht_capability(dev, pos,
2132 HT_CAPTYPE_MSI_MAPPING);
2133 }
2134
2135 return found;
2136}
2137
2138static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2139{
2140 struct pci_dev *host_bridge;
2141 int pos;
2142 int found;
2143
2144 /* check if there is HT MSI cap or enabled on this device */
2145 found = ht_check_msi_mapping(dev);
2146
2147 /* no HT MSI CAP */
2148 if (found == 0)
2149 return;
2055 2150
2056 /* 2151 /*
2057 * HT MSI mapping should be disabled on devices that are below 2152 * HT MSI mapping should be disabled on devices that are below
@@ -2067,24 +2162,19 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2067 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE); 2162 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2068 if (pos != 0) { 2163 if (pos != 0) {
2069 /* Host bridge is to HT */ 2164 /* Host bridge is to HT */
2070 ht_enable_msi_mapping(dev); 2165 if (found == 1) {
2166 /* it is not enabled, try to enable it */
2167 nv_ht_enable_msi_mapping(dev);
2168 }
2071 return; 2169 return;
2072 } 2170 }
2073 2171
2074 /* Host bridge is not to HT, disable HT MSI mapping on this device */ 2172 /* HT MSI is not enabled */
2075 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); 2173 if (found == 1)
2076 while (pos && ttl--) { 2174 return;
2077 u8 flags;
2078 2175
2079 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, 2176 /* Host bridge is not to HT, disable HT MSI mapping on this device */
2080 &flags) == 0) { 2177 ht_disable_msi_mapping(dev);
2081 dev_info(&dev->dev, "Disabling HT MSI mapping");
2082 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2083 flags & ~HT_MSI_FLAGS_ENABLE);
2084 }
2085 pos = pci_find_next_ht_capability(dev, pos,
2086 HT_CAPTYPE_MSI_MAPPING);
2087 }
2088} 2178}
2089DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk); 2179DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk);
2090DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk); 2180DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk);