aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/dmar.c73
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c7
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c15
-rw-r--r--drivers/pci/intel-iommu.c30
-rw-r--r--drivers/pci/intr_remapping.c21
-rw-r--r--drivers/pci/msi.c10
-rw-r--r--drivers/pci/pci-driver.c164
-rw-r--r--drivers/pci/pci-sysfs.c4
-rw-r--r--drivers/pci/pci.c17
-rw-r--r--drivers/pci/pci.h20
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c48
-rw-r--r--drivers/pci/pcie/aspm.c4
-rw-r--r--drivers/pci/pcie/portdrv_pci.c18
-rw-r--r--drivers/pci/quirks.c122
-rw-r--r--drivers/pci/rom.c9
16 files changed, 396 insertions, 168 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index f5a662a50acb..26c536b51c5a 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -330,6 +330,14 @@ parse_dmar_table(void)
330 entry_header = (struct acpi_dmar_header *)(dmar + 1); 330 entry_header = (struct acpi_dmar_header *)(dmar + 1);
331 while (((unsigned long)entry_header) < 331 while (((unsigned long)entry_header) <
332 (((unsigned long)dmar) + dmar_tbl->length)) { 332 (((unsigned long)dmar) + dmar_tbl->length)) {
333 /* Avoid looping forever on bad ACPI tables */
334 if (entry_header->length == 0) {
335 printk(KERN_WARNING PREFIX
336 "Invalid 0-length structure\n");
337 ret = -EINVAL;
338 break;
339 }
340
333 dmar_table_print_dmar_entry(entry_header); 341 dmar_table_print_dmar_entry(entry_header);
334 342
335 switch (entry_header->type) { 343 switch (entry_header->type) {
@@ -491,7 +499,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
491 int map_size; 499 int map_size;
492 u32 ver; 500 u32 ver;
493 static int iommu_allocated = 0; 501 static int iommu_allocated = 0;
494 int agaw; 502 int agaw = 0;
495 503
496 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); 504 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
497 if (!iommu) 505 if (!iommu)
@@ -507,6 +515,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
507 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); 515 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
508 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); 516 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
509 517
518#ifdef CONFIG_DMAR
510 agaw = iommu_calculate_agaw(iommu); 519 agaw = iommu_calculate_agaw(iommu);
511 if (agaw < 0) { 520 if (agaw < 0) {
512 printk(KERN_ERR 521 printk(KERN_ERR
@@ -514,6 +523,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
514 iommu->seq_id); 523 iommu->seq_id);
515 goto error; 524 goto error;
516 } 525 }
526#endif
517 iommu->agaw = agaw; 527 iommu->agaw = agaw;
518 528
519 /* the registers might be more than one page */ 529 /* the registers might be more than one page */
@@ -571,19 +581,49 @@ static inline void reclaim_free_desc(struct q_inval *qi)
571 } 581 }
572} 582}
573 583
584static int qi_check_fault(struct intel_iommu *iommu, int index)
585{
586 u32 fault;
587 int head;
588 struct q_inval *qi = iommu->qi;
589 int wait_index = (index + 1) % QI_LENGTH;
590
591 fault = readl(iommu->reg + DMAR_FSTS_REG);
592
593 /*
594 * If IQE happens, the head points to the descriptor associated
595 * with the error. No new descriptors are fetched until the IQE
596 * is cleared.
597 */
598 if (fault & DMA_FSTS_IQE) {
599 head = readl(iommu->reg + DMAR_IQH_REG);
600 if ((head >> 4) == index) {
601 memcpy(&qi->desc[index], &qi->desc[wait_index],
602 sizeof(struct qi_desc));
603 __iommu_flush_cache(iommu, &qi->desc[index],
604 sizeof(struct qi_desc));
605 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
606 return -EINVAL;
607 }
608 }
609
610 return 0;
611}
612
574/* 613/*
575 * Submit the queued invalidation descriptor to the remapping 614 * Submit the queued invalidation descriptor to the remapping
576 * hardware unit and wait for its completion. 615 * hardware unit and wait for its completion.
577 */ 616 */
578void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) 617int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
579{ 618{
619 int rc = 0;
580 struct q_inval *qi = iommu->qi; 620 struct q_inval *qi = iommu->qi;
581 struct qi_desc *hw, wait_desc; 621 struct qi_desc *hw, wait_desc;
582 int wait_index, index; 622 int wait_index, index;
583 unsigned long flags; 623 unsigned long flags;
584 624
585 if (!qi) 625 if (!qi)
586 return; 626 return 0;
587 627
588 hw = qi->desc; 628 hw = qi->desc;
589 629
@@ -601,7 +641,8 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
601 641
602 hw[index] = *desc; 642 hw[index] = *desc;
603 643
604 wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; 644 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
645 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
605 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); 646 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
606 647
607 hw[wait_index] = wait_desc; 648 hw[wait_index] = wait_desc;
@@ -612,13 +653,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
612 qi->free_head = (qi->free_head + 2) % QI_LENGTH; 653 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
613 qi->free_cnt -= 2; 654 qi->free_cnt -= 2;
614 655
615 spin_lock(&iommu->register_lock);
616 /* 656 /*
617 * update the HW tail register indicating the presence of 657 * update the HW tail register indicating the presence of
618 * new descriptors. 658 * new descriptors.
619 */ 659 */
620 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); 660 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
621 spin_unlock(&iommu->register_lock);
622 661
623 while (qi->desc_status[wait_index] != QI_DONE) { 662 while (qi->desc_status[wait_index] != QI_DONE) {
624 /* 663 /*
@@ -628,15 +667,21 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
628 * a deadlock where the interrupt context can wait indefinitely 667 * a deadlock where the interrupt context can wait indefinitely
629 * for free slots in the queue. 668 * for free slots in the queue.
630 */ 669 */
670 rc = qi_check_fault(iommu, index);
671 if (rc)
672 goto out;
673
631 spin_unlock(&qi->q_lock); 674 spin_unlock(&qi->q_lock);
632 cpu_relax(); 675 cpu_relax();
633 spin_lock(&qi->q_lock); 676 spin_lock(&qi->q_lock);
634 } 677 }
635 678out:
636 qi->desc_status[index] = QI_DONE; 679 qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
637 680
638 reclaim_free_desc(qi); 681 reclaim_free_desc(qi);
639 spin_unlock_irqrestore(&qi->q_lock, flags); 682 spin_unlock_irqrestore(&qi->q_lock, flags);
683
684 return rc;
640} 685}
641 686
642/* 687/*
@@ -649,13 +694,13 @@ void qi_global_iec(struct intel_iommu *iommu)
649 desc.low = QI_IEC_TYPE; 694 desc.low = QI_IEC_TYPE;
650 desc.high = 0; 695 desc.high = 0;
651 696
697 /* should never fail */
652 qi_submit_sync(&desc, iommu); 698 qi_submit_sync(&desc, iommu);
653} 699}
654 700
655int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, 701int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
656 u64 type, int non_present_entry_flush) 702 u64 type, int non_present_entry_flush)
657{ 703{
658
659 struct qi_desc desc; 704 struct qi_desc desc;
660 705
661 if (non_present_entry_flush) { 706 if (non_present_entry_flush) {
@@ -669,10 +714,7 @@ int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
669 | QI_CC_GRAN(type) | QI_CC_TYPE; 714 | QI_CC_GRAN(type) | QI_CC_TYPE;
670 desc.high = 0; 715 desc.high = 0;
671 716
672 qi_submit_sync(&desc, iommu); 717 return qi_submit_sync(&desc, iommu);
673
674 return 0;
675
676} 718}
677 719
678int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, 720int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
@@ -702,10 +744,7 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
702 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) 744 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
703 | QI_IOTLB_AM(size_order); 745 | QI_IOTLB_AM(size_order);
704 746
705 qi_submit_sync(&desc, iommu); 747 return qi_submit_sync(&desc, iommu);
706
707 return 0;
708
709} 748}
710 749
711/* 750/*
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index db85284ffb62..39ae37589fda 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -111,6 +111,7 @@ struct controller {
111 int cmd_busy; 111 int cmd_busy;
112 unsigned int no_cmd_complete:1; 112 unsigned int no_cmd_complete:1;
113 unsigned int link_active_reporting:1; 113 unsigned int link_active_reporting:1;
114 unsigned int notification_enabled:1;
114}; 115};
115 116
116#define INT_BUTTON_IGNORE 0 117#define INT_BUTTON_IGNORE 0
@@ -170,6 +171,7 @@ extern int pciehp_configure_device(struct slot *p_slot);
170extern int pciehp_unconfigure_device(struct slot *p_slot); 171extern int pciehp_unconfigure_device(struct slot *p_slot);
171extern void pciehp_queue_pushbutton_work(struct work_struct *work); 172extern void pciehp_queue_pushbutton_work(struct work_struct *work);
172struct controller *pcie_init(struct pcie_device *dev); 173struct controller *pcie_init(struct pcie_device *dev);
174int pcie_init_notification(struct controller *ctrl);
173int pciehp_enable_slot(struct slot *p_slot); 175int pciehp_enable_slot(struct slot *p_slot);
174int pciehp_disable_slot(struct slot *p_slot); 176int pciehp_disable_slot(struct slot *p_slot);
175int pcie_enable_notification(struct controller *ctrl); 177int pcie_enable_notification(struct controller *ctrl);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index c2485542f543..681e3912b821 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -434,6 +434,13 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
434 goto err_out_release_ctlr; 434 goto err_out_release_ctlr;
435 } 435 }
436 436
437 /* Enable events after we have setup the data structures */
438 rc = pcie_init_notification(ctrl);
439 if (rc) {
440 ctrl_err(ctrl, "Notification initialization failed\n");
441 goto err_out_release_ctlr;
442 }
443
437 /* Check if slot is occupied */ 444 /* Check if slot is occupied */
438 t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); 445 t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
439 t_slot->hpc_ops->get_adapter_status(t_slot, &value); 446 t_slot->hpc_ops->get_adapter_status(t_slot, &value);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 71a8012886b0..7a16c6897bb9 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -934,7 +934,7 @@ static void pcie_disable_notification(struct controller *ctrl)
934 ctrl_warn(ctrl, "Cannot disable software notification\n"); 934 ctrl_warn(ctrl, "Cannot disable software notification\n");
935} 935}
936 936
937static int pcie_init_notification(struct controller *ctrl) 937int pcie_init_notification(struct controller *ctrl)
938{ 938{
939 if (pciehp_request_irq(ctrl)) 939 if (pciehp_request_irq(ctrl))
940 return -1; 940 return -1;
@@ -942,13 +942,17 @@ static int pcie_init_notification(struct controller *ctrl)
942 pciehp_free_irq(ctrl); 942 pciehp_free_irq(ctrl);
943 return -1; 943 return -1;
944 } 944 }
945 ctrl->notification_enabled = 1;
945 return 0; 946 return 0;
946} 947}
947 948
948static void pcie_shutdown_notification(struct controller *ctrl) 949static void pcie_shutdown_notification(struct controller *ctrl)
949{ 950{
950 pcie_disable_notification(ctrl); 951 if (ctrl->notification_enabled) {
951 pciehp_free_irq(ctrl); 952 pcie_disable_notification(ctrl);
953 pciehp_free_irq(ctrl);
954 ctrl->notification_enabled = 0;
955 }
952} 956}
953 957
954static int pcie_init_slot(struct controller *ctrl) 958static int pcie_init_slot(struct controller *ctrl)
@@ -1110,13 +1114,8 @@ struct controller *pcie_init(struct pcie_device *dev)
1110 if (pcie_init_slot(ctrl)) 1114 if (pcie_init_slot(ctrl))
1111 goto abort_ctrl; 1115 goto abort_ctrl;
1112 1116
1113 if (pcie_init_notification(ctrl))
1114 goto abort_slot;
1115
1116 return ctrl; 1117 return ctrl;
1117 1118
1118abort_slot:
1119 pcie_cleanup_slot(ctrl);
1120abort_ctrl: 1119abort_ctrl:
1121 kfree(ctrl); 1120 kfree(ctrl);
1122abort: 1121abort:
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 3dfecb20d5e7..f3f686581a90 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -61,6 +61,8 @@
61/* global iommu list, set NULL for ignored DMAR units */ 61/* global iommu list, set NULL for ignored DMAR units */
62static struct intel_iommu **g_iommus; 62static struct intel_iommu **g_iommus;
63 63
64static int rwbf_quirk;
65
64/* 66/*
65 * 0: Present 67 * 0: Present
66 * 1-11: Reserved 68 * 1-11: Reserved
@@ -268,7 +270,12 @@ static long list_size;
268 270
269static void domain_remove_dev_info(struct dmar_domain *domain); 271static void domain_remove_dev_info(struct dmar_domain *domain);
270 272
271int dmar_disabled; 273#ifdef CONFIG_DMAR_DEFAULT_ON
274int dmar_disabled = 0;
275#else
276int dmar_disabled = 1;
277#endif /*CONFIG_DMAR_DEFAULT_ON*/
278
272static int __initdata dmar_map_gfx = 1; 279static int __initdata dmar_map_gfx = 1;
273static int dmar_forcedac; 280static int dmar_forcedac;
274static int intel_iommu_strict; 281static int intel_iommu_strict;
@@ -284,9 +291,12 @@ static int __init intel_iommu_setup(char *str)
284 if (!str) 291 if (!str)
285 return -EINVAL; 292 return -EINVAL;
286 while (*str) { 293 while (*str) {
287 if (!strncmp(str, "off", 3)) { 294 if (!strncmp(str, "on", 2)) {
295 dmar_disabled = 0;
296 printk(KERN_INFO "Intel-IOMMU: enabled\n");
297 } else if (!strncmp(str, "off", 3)) {
288 dmar_disabled = 1; 298 dmar_disabled = 1;
289 printk(KERN_INFO"Intel-IOMMU: disabled\n"); 299 printk(KERN_INFO "Intel-IOMMU: disabled\n");
290 } else if (!strncmp(str, "igfx_off", 8)) { 300 } else if (!strncmp(str, "igfx_off", 8)) {
291 dmar_map_gfx = 0; 301 dmar_map_gfx = 0;
292 printk(KERN_INFO 302 printk(KERN_INFO
@@ -777,7 +787,7 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
777 u32 val; 787 u32 val;
778 unsigned long flag; 788 unsigned long flag;
779 789
780 if (!cap_rwbf(iommu->cap)) 790 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
781 return; 791 return;
782 val = iommu->gcmd | DMA_GCMD_WBF; 792 val = iommu->gcmd | DMA_GCMD_WBF;
783 793
@@ -3129,3 +3139,15 @@ static struct iommu_ops intel_iommu_ops = {
3129 .unmap = intel_iommu_unmap_range, 3139 .unmap = intel_iommu_unmap_range,
3130 .iova_to_phys = intel_iommu_iova_to_phys, 3140 .iova_to_phys = intel_iommu_iova_to_phys,
3131}; 3141};
3142
3143static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3144{
3145 /*
3146 * Mobile 4 Series Chipset neglects to set RWBF capability,
3147 * but needs it:
3148 */
3149 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3150 rwbf_quirk = 1;
3151}
3152
3153DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index f78371b22529..45effc5726c0 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -207,7 +207,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
207 return index; 207 return index;
208} 208}
209 209
210static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) 210static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
211{ 211{
212 struct qi_desc desc; 212 struct qi_desc desc;
213 213
@@ -215,7 +215,7 @@ static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
215 | QI_IEC_SELECTIVE; 215 | QI_IEC_SELECTIVE;
216 desc.high = 0; 216 desc.high = 0;
217 217
218 qi_submit_sync(&desc, iommu); 218 return qi_submit_sync(&desc, iommu);
219} 219}
220 220
221int map_irq_to_irte_handle(int irq, u16 *sub_handle) 221int map_irq_to_irte_handle(int irq, u16 *sub_handle)
@@ -283,6 +283,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
283 283
284int modify_irte(int irq, struct irte *irte_modified) 284int modify_irte(int irq, struct irte *irte_modified)
285{ 285{
286 int rc;
286 int index; 287 int index;
287 struct irte *irte; 288 struct irte *irte;
288 struct intel_iommu *iommu; 289 struct intel_iommu *iommu;
@@ -303,14 +304,15 @@ int modify_irte(int irq, struct irte *irte_modified)
303 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); 304 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
304 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 305 __iommu_flush_cache(iommu, irte, sizeof(*irte));
305 306
306 qi_flush_iec(iommu, index, 0); 307 rc = qi_flush_iec(iommu, index, 0);
307
308 spin_unlock(&irq_2_ir_lock); 308 spin_unlock(&irq_2_ir_lock);
309 return 0; 309
310 return rc;
310} 311}
311 312
312int flush_irte(int irq) 313int flush_irte(int irq)
313{ 314{
315 int rc;
314 int index; 316 int index;
315 struct intel_iommu *iommu; 317 struct intel_iommu *iommu;
316 struct irq_2_iommu *irq_iommu; 318 struct irq_2_iommu *irq_iommu;
@@ -326,10 +328,10 @@ int flush_irte(int irq)
326 328
327 index = irq_iommu->irte_index + irq_iommu->sub_handle; 329 index = irq_iommu->irte_index + irq_iommu->sub_handle;
328 330
329 qi_flush_iec(iommu, index, irq_iommu->irte_mask); 331 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
330 spin_unlock(&irq_2_ir_lock); 332 spin_unlock(&irq_2_ir_lock);
331 333
332 return 0; 334 return rc;
333} 335}
334 336
335struct intel_iommu *map_ioapic_to_ir(int apic) 337struct intel_iommu *map_ioapic_to_ir(int apic)
@@ -355,6 +357,7 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
355 357
356int free_irte(int irq) 358int free_irte(int irq)
357{ 359{
360 int rc = 0;
358 int index, i; 361 int index, i;
359 struct irte *irte; 362 struct irte *irte;
360 struct intel_iommu *iommu; 363 struct intel_iommu *iommu;
@@ -375,7 +378,7 @@ int free_irte(int irq)
375 if (!irq_iommu->sub_handle) { 378 if (!irq_iommu->sub_handle) {
376 for (i = 0; i < (1 << irq_iommu->irte_mask); i++) 379 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
377 set_64bit((unsigned long *)irte, 0); 380 set_64bit((unsigned long *)irte, 0);
378 qi_flush_iec(iommu, index, irq_iommu->irte_mask); 381 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
379 } 382 }
380 383
381 irq_iommu->iommu = NULL; 384 irq_iommu->iommu = NULL;
@@ -385,7 +388,7 @@ int free_irte(int irq)
385 388
386 spin_unlock(&irq_2_ir_lock); 389 spin_unlock(&irq_2_ir_lock);
387 390
388 return 0; 391 return rc;
389} 392}
390 393
391static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) 394static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 44f15ff70c1d..baba2eb5367d 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -103,14 +103,12 @@ static void msix_set_enable(struct pci_dev *dev, int enable)
103 } 103 }
104} 104}
105 105
106/*
107 * Essentially, this is ((1 << (1 << x)) - 1), but without the
108 * undefinedness of a << 32.
109 */
110static inline __attribute_const__ u32 msi_mask(unsigned x) 106static inline __attribute_const__ u32 msi_mask(unsigned x)
111{ 107{
112 static const u32 mask[] = { 1, 2, 4, 0xf, 0xff, 0xffff, 0xffffffff }; 108 /* Don't shift by >= width of type */
113 return mask[x]; 109 if (x >= 5)
110 return 0xffffffff;
111 return (1 << (1 << x)) - 1;
114} 112}
115 113
116static void msix_flush_writes(struct irq_desc *desc) 114static void msix_flush_writes(struct irq_desc *desc)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index ab1d615425a8..93eac1423585 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -355,6 +355,8 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
355 int i = 0; 355 int i = 0;
356 356
357 if (drv && drv->suspend) { 357 if (drv && drv->suspend) {
358 pci_power_t prev = pci_dev->current_state;
359
358 pci_dev->state_saved = false; 360 pci_dev->state_saved = false;
359 361
360 i = drv->suspend(pci_dev, state); 362 i = drv->suspend(pci_dev, state);
@@ -365,12 +367,16 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
365 if (pci_dev->state_saved) 367 if (pci_dev->state_saved)
366 goto Fixup; 368 goto Fixup;
367 369
368 if (WARN_ON_ONCE(pci_dev->current_state != PCI_D0)) 370 if (pci_dev->current_state != PCI_D0
371 && pci_dev->current_state != PCI_UNKNOWN) {
372 WARN_ONCE(pci_dev->current_state != prev,
373 "PCI PM: Device state not saved by %pF\n",
374 drv->suspend);
369 goto Fixup; 375 goto Fixup;
376 }
370 } 377 }
371 378
372 pci_save_state(pci_dev); 379 pci_save_state(pci_dev);
373 pci_dev->state_saved = true;
374 /* 380 /*
375 * This is for compatibility with existing code with legacy PM support. 381 * This is for compatibility with existing code with legacy PM support.
376 */ 382 */
@@ -424,35 +430,20 @@ static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
424 pci_fixup_device(pci_fixup_resume_early, pci_dev); 430 pci_fixup_device(pci_fixup_resume_early, pci_dev);
425} 431}
426 432
427static int pci_pm_default_resume(struct pci_dev *pci_dev) 433static void pci_pm_default_resume(struct pci_dev *pci_dev)
428{ 434{
429 pci_fixup_device(pci_fixup_resume, pci_dev); 435 pci_fixup_device(pci_fixup_resume, pci_dev);
430 436
431 if (!pci_is_bridge(pci_dev)) 437 if (!pci_is_bridge(pci_dev))
432 pci_enable_wake(pci_dev, PCI_D0, false); 438 pci_enable_wake(pci_dev, PCI_D0, false);
433
434 return pci_pm_reenable_device(pci_dev);
435}
436
437static void pci_pm_default_suspend_generic(struct pci_dev *pci_dev)
438{
439 /* If device is enabled at this point, disable it */
440 pci_disable_enabled_device(pci_dev);
441 /*
442 * Save state with interrupts enabled, because in principle the bus the
443 * device is on may be put into a low power state after this code runs.
444 */
445 pci_save_state(pci_dev);
446} 439}
447 440
448static void pci_pm_default_suspend(struct pci_dev *pci_dev) 441static void pci_pm_default_suspend(struct pci_dev *pci_dev)
449{ 442{
450 pci_pm_default_suspend_generic(pci_dev); 443 /* Disable non-bridge devices without PM support */
451
452 if (!pci_is_bridge(pci_dev)) 444 if (!pci_is_bridge(pci_dev))
453 pci_prepare_to_sleep(pci_dev); 445 pci_disable_enabled_device(pci_dev);
454 446 pci_save_state(pci_dev);
455 pci_fixup_device(pci_fixup_suspend, pci_dev);
456} 447}
457 448
458static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev) 449static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
@@ -497,21 +488,49 @@ static void pci_pm_complete(struct device *dev)
497static int pci_pm_suspend(struct device *dev) 488static int pci_pm_suspend(struct device *dev)
498{ 489{
499 struct pci_dev *pci_dev = to_pci_dev(dev); 490 struct pci_dev *pci_dev = to_pci_dev(dev);
500 struct device_driver *drv = dev->driver; 491 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
501 int error = 0;
502 492
503 if (pci_has_legacy_pm_support(pci_dev)) 493 if (pci_has_legacy_pm_support(pci_dev))
504 return pci_legacy_suspend(dev, PMSG_SUSPEND); 494 return pci_legacy_suspend(dev, PMSG_SUSPEND);
505 495
506 if (drv && drv->pm && drv->pm->suspend) { 496 if (!pm) {
507 error = drv->pm->suspend(dev); 497 pci_pm_default_suspend(pci_dev);
508 suspend_report_result(drv->pm->suspend, error); 498 goto Fixup;
509 } 499 }
510 500
511 if (!error) 501 pci_dev->state_saved = false;
512 pci_pm_default_suspend(pci_dev);
513 502
514 return error; 503 if (pm->suspend) {
504 pci_power_t prev = pci_dev->current_state;
505 int error;
506
507 error = pm->suspend(dev);
508 suspend_report_result(pm->suspend, error);
509 if (error)
510 return error;
511
512 if (pci_dev->state_saved)
513 goto Fixup;
514
515 if (pci_dev->current_state != PCI_D0
516 && pci_dev->current_state != PCI_UNKNOWN) {
517 WARN_ONCE(pci_dev->current_state != prev,
518 "PCI PM: State of device not saved by %pF\n",
519 pm->suspend);
520 goto Fixup;
521 }
522 }
523
524 if (!pci_dev->state_saved) {
525 pci_save_state(pci_dev);
526 if (!pci_is_bridge(pci_dev))
527 pci_prepare_to_sleep(pci_dev);
528 }
529
530 Fixup:
531 pci_fixup_device(pci_fixup_suspend, pci_dev);
532
533 return 0;
515} 534}
516 535
517static int pci_pm_suspend_noirq(struct device *dev) 536static int pci_pm_suspend_noirq(struct device *dev)
@@ -554,7 +573,7 @@ static int pci_pm_resume_noirq(struct device *dev)
554static int pci_pm_resume(struct device *dev) 573static int pci_pm_resume(struct device *dev)
555{ 574{
556 struct pci_dev *pci_dev = to_pci_dev(dev); 575 struct pci_dev *pci_dev = to_pci_dev(dev);
557 struct device_driver *drv = dev->driver; 576 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
558 int error = 0; 577 int error = 0;
559 578
560 /* 579 /*
@@ -567,12 +586,16 @@ static int pci_pm_resume(struct device *dev)
567 if (pci_has_legacy_pm_support(pci_dev)) 586 if (pci_has_legacy_pm_support(pci_dev))
568 return pci_legacy_resume(dev); 587 return pci_legacy_resume(dev);
569 588
570 error = pci_pm_default_resume(pci_dev); 589 pci_pm_default_resume(pci_dev);
571 590
572 if (!error && drv && drv->pm && drv->pm->resume) 591 if (pm) {
573 error = drv->pm->resume(dev); 592 if (pm->resume)
593 error = pm->resume(dev);
594 } else {
595 pci_pm_reenable_device(pci_dev);
596 }
574 597
575 return error; 598 return 0;
576} 599}
577 600
578#else /* !CONFIG_SUSPEND */ 601#else /* !CONFIG_SUSPEND */
@@ -589,21 +612,31 @@ static int pci_pm_resume(struct device *dev)
589static int pci_pm_freeze(struct device *dev) 612static int pci_pm_freeze(struct device *dev)
590{ 613{
591 struct pci_dev *pci_dev = to_pci_dev(dev); 614 struct pci_dev *pci_dev = to_pci_dev(dev);
592 struct device_driver *drv = dev->driver; 615 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
593 int error = 0;
594 616
595 if (pci_has_legacy_pm_support(pci_dev)) 617 if (pci_has_legacy_pm_support(pci_dev))
596 return pci_legacy_suspend(dev, PMSG_FREEZE); 618 return pci_legacy_suspend(dev, PMSG_FREEZE);
597 619
598 if (drv && drv->pm && drv->pm->freeze) { 620 if (!pm) {
599 error = drv->pm->freeze(dev); 621 pci_pm_default_suspend(pci_dev);
600 suspend_report_result(drv->pm->freeze, error); 622 return 0;
601 } 623 }
602 624
603 if (!error) 625 pci_dev->state_saved = false;
604 pci_pm_default_suspend_generic(pci_dev);
605 626
606 return error; 627 if (pm->freeze) {
628 int error;
629
630 error = pm->freeze(dev);
631 suspend_report_result(pm->freeze, error);
632 if (error)
633 return error;
634 }
635
636 if (!pci_dev->state_saved)
637 pci_save_state(pci_dev);
638
639 return 0;
607} 640}
608 641
609static int pci_pm_freeze_noirq(struct device *dev) 642static int pci_pm_freeze_noirq(struct device *dev)
@@ -646,16 +679,18 @@ static int pci_pm_thaw_noirq(struct device *dev)
646static int pci_pm_thaw(struct device *dev) 679static int pci_pm_thaw(struct device *dev)
647{ 680{
648 struct pci_dev *pci_dev = to_pci_dev(dev); 681 struct pci_dev *pci_dev = to_pci_dev(dev);
649 struct device_driver *drv = dev->driver; 682 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
650 int error = 0; 683 int error = 0;
651 684
652 if (pci_has_legacy_pm_support(pci_dev)) 685 if (pci_has_legacy_pm_support(pci_dev))
653 return pci_legacy_resume(dev); 686 return pci_legacy_resume(dev);
654 687
655 pci_pm_reenable_device(pci_dev); 688 if (pm) {
656 689 if (pm->thaw)
657 if (drv && drv->pm && drv->pm->thaw) 690 error = pm->thaw(dev);
658 error = drv->pm->thaw(dev); 691 } else {
692 pci_pm_reenable_device(pci_dev);
693 }
659 694
660 return error; 695 return error;
661} 696}
@@ -663,22 +698,29 @@ static int pci_pm_thaw(struct device *dev)
663static int pci_pm_poweroff(struct device *dev) 698static int pci_pm_poweroff(struct device *dev)
664{ 699{
665 struct pci_dev *pci_dev = to_pci_dev(dev); 700 struct pci_dev *pci_dev = to_pci_dev(dev);
666 struct device_driver *drv = dev->driver; 701 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
667 int error = 0; 702 int error = 0;
668 703
669 if (pci_has_legacy_pm_support(pci_dev)) 704 if (pci_has_legacy_pm_support(pci_dev))
670 return pci_legacy_suspend(dev, PMSG_HIBERNATE); 705 return pci_legacy_suspend(dev, PMSG_HIBERNATE);
671 706
672 if (!drv || !drv->pm) 707 if (!pm) {
673 return 0; 708 pci_pm_default_suspend(pci_dev);
709 goto Fixup;
710 }
711
712 pci_dev->state_saved = false;
674 713
675 if (drv->pm->poweroff) { 714 if (pm->poweroff) {
676 error = drv->pm->poweroff(dev); 715 error = pm->poweroff(dev);
677 suspend_report_result(drv->pm->poweroff, error); 716 suspend_report_result(pm->poweroff, error);
678 } 717 }
679 718
680 if (!error) 719 if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
681 pci_pm_default_suspend(pci_dev); 720 pci_prepare_to_sleep(pci_dev);
721
722 Fixup:
723 pci_fixup_device(pci_fixup_suspend, pci_dev);
682 724
683 return error; 725 return error;
684} 726}
@@ -719,7 +761,7 @@ static int pci_pm_restore_noirq(struct device *dev)
719static int pci_pm_restore(struct device *dev) 761static int pci_pm_restore(struct device *dev)
720{ 762{
721 struct pci_dev *pci_dev = to_pci_dev(dev); 763 struct pci_dev *pci_dev = to_pci_dev(dev);
722 struct device_driver *drv = dev->driver; 764 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
723 int error = 0; 765 int error = 0;
724 766
725 /* 767 /*
@@ -732,10 +774,14 @@ static int pci_pm_restore(struct device *dev)
732 if (pci_has_legacy_pm_support(pci_dev)) 774 if (pci_has_legacy_pm_support(pci_dev))
733 return pci_legacy_resume(dev); 775 return pci_legacy_resume(dev);
734 776
735 error = pci_pm_default_resume(pci_dev); 777 pci_pm_default_resume(pci_dev);
736 778
737 if (!error && drv && drv->pm && drv->pm->restore) 779 if (pm) {
738 error = drv->pm->restore(dev); 780 if (pm->restore)
781 error = pm->restore(dev);
782 } else {
783 pci_pm_reenable_device(pci_dev);
784 }
739 785
740 return error; 786 return error;
741} 787}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index db7ec14fa719..dfc4e0ddf241 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -768,8 +768,8 @@ pci_read_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
768 return -EINVAL; 768 return -EINVAL;
769 769
770 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */ 770 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */
771 if (!rom) 771 if (!rom || !size)
772 return 0; 772 return -EIO;
773 773
774 if (off >= size) 774 if (off >= size)
775 count = 0; 775 count = 0;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 48807556b47a..6d6120007af4 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1418,10 +1418,10 @@ int pci_restore_standard_config(struct pci_dev *dev)
1418 break; 1418 break;
1419 } 1419 }
1420 1420
1421 dev->current_state = PCI_D0; 1421 pci_update_current_state(dev, PCI_D0);
1422 1422
1423 Restore: 1423 Restore:
1424 return pci_restore_state(dev); 1424 return dev->state_saved ? pci_restore_state(dev) : 0;
1425} 1425}
1426 1426
1427/** 1427/**
@@ -1540,16 +1540,21 @@ void pci_release_region(struct pci_dev *pdev, int bar)
1540} 1540}
1541 1541
1542/** 1542/**
1543 * pci_request_region - Reserved PCI I/O and memory resource 1543 * __pci_request_region - Reserved PCI I/O and memory resource
1544 * @pdev: PCI device whose resources are to be reserved 1544 * @pdev: PCI device whose resources are to be reserved
1545 * @bar: BAR to be reserved 1545 * @bar: BAR to be reserved
1546 * @res_name: Name to be associated with resource. 1546 * @res_name: Name to be associated with resource.
1547 * @exclusive: whether the region access is exclusive or not
1547 * 1548 *
1548 * Mark the PCI region associated with PCI device @pdev BR @bar as 1549 * Mark the PCI region associated with PCI device @pdev BR @bar as
1549 * being reserved by owner @res_name. Do not access any 1550 * being reserved by owner @res_name. Do not access any
1550 * address inside the PCI regions unless this call returns 1551 * address inside the PCI regions unless this call returns
1551 * successfully. 1552 * successfully.
1552 * 1553 *
1554 * If @exclusive is set, then the region is marked so that userspace
1555 * is explicitly not allowed to map the resource via /dev/mem or
1556 * sysfs MMIO access.
1557 *
1553 * Returns 0 on success, or %EBUSY on error. A warning 1558 * Returns 0 on success, or %EBUSY on error. A warning
1554 * message is also printed on failure. 1559 * message is also printed on failure.
1555 */ 1560 */
@@ -1588,12 +1593,12 @@ err_out:
1588} 1593}
1589 1594
1590/** 1595/**
1591 * pci_request_region - Reserved PCI I/O and memory resource 1596 * pci_request_region - Reserve PCI I/O and memory resource
1592 * @pdev: PCI device whose resources are to be reserved 1597 * @pdev: PCI device whose resources are to be reserved
1593 * @bar: BAR to be reserved 1598 * @bar: BAR to be reserved
1594 * @res_name: Name to be associated with resource. 1599 * @res_name: Name to be associated with resource
1595 * 1600 *
1596 * Mark the PCI region associated with PCI device @pdev BR @bar as 1601 * Mark the PCI region associated with PCI device @pdev BAR @bar as
1597 * being reserved by owner @res_name. Do not access any 1602 * being reserved by owner @res_name. Do not access any
1598 * address inside the PCI regions unless this call returns 1603 * address inside the PCI regions unless this call returns
1599 * successfully. 1604 * successfully.
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 26ddf78ac300..07c0aa5275e6 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -16,21 +16,21 @@ extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
16#endif 16#endif
17 17
18/** 18/**
19 * Firmware PM callbacks 19 * struct pci_platform_pm_ops - Firmware PM callbacks
20 * 20 *
21 * @is_manageable - returns 'true' if given device is power manageable by the 21 * @is_manageable: returns 'true' if given device is power manageable by the
22 * platform firmware 22 * platform firmware
23 * 23 *
24 * @set_state - invokes the platform firmware to set the device's power state 24 * @set_state: invokes the platform firmware to set the device's power state
25 * 25 *
26 * @choose_state - returns PCI power state of given device preferred by the 26 * @choose_state: returns PCI power state of given device preferred by the
27 * platform; to be used during system-wide transitions from a 27 * platform; to be used during system-wide transitions from a
28 * sleeping state to the working state and vice versa 28 * sleeping state to the working state and vice versa
29 * 29 *
30 * @can_wakeup - returns 'true' if given device is capable of waking up the 30 * @can_wakeup: returns 'true' if given device is capable of waking up the
31 * system from a sleeping state 31 * system from a sleeping state
32 * 32 *
33 * @sleep_wake - enables/disables the system wake up capability of given device 33 * @sleep_wake: enables/disables the system wake up capability of given device
34 * 34 *
35 * If given platform is generally capable of power managing PCI devices, all of 35 * If given platform is generally capable of power managing PCI devices, all of
36 * these callbacks are mandatory. 36 * these callbacks are mandatory.
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index aac7006949f1..d0c973685868 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -108,6 +108,34 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
108} 108}
109#endif /* 0 */ 109#endif /* 0 */
110 110
111
112static void set_device_error_reporting(struct pci_dev *dev, void *data)
113{
114 bool enable = *((bool *)data);
115
116 if (dev->pcie_type != PCIE_RC_PORT &&
117 dev->pcie_type != PCIE_SW_UPSTREAM_PORT &&
118 dev->pcie_type != PCIE_SW_DOWNSTREAM_PORT)
119 return;
120
121 if (enable)
122 pci_enable_pcie_error_reporting(dev);
123 else
124 pci_disable_pcie_error_reporting(dev);
125}
126
127/**
128 * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports.
129 * @dev: pointer to root port's pci_dev data structure
130 * @enable: true = enable error reporting, false = disable error reporting.
131 */
132static void set_downstream_devices_error_reporting(struct pci_dev *dev,
133 bool enable)
134{
135 set_device_error_reporting(dev, &enable);
136 pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
137}
138
111static int find_device_iter(struct device *device, void *data) 139static int find_device_iter(struct device *device, void *data)
112{ 140{
113 struct pci_dev *dev; 141 struct pci_dev *dev;
@@ -525,15 +553,11 @@ void aer_enable_rootport(struct aer_rpc *rpc)
525 pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32); 553 pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
526 pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32); 554 pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
527 555
528 /* Enable Root Port device reporting error itself */ 556 /*
529 pci_read_config_word(pdev, pos+PCI_EXP_DEVCTL, &reg16); 557 * Enable error reporting for the root port device and downstream port
530 reg16 = reg16 | 558 * devices.
531 PCI_EXP_DEVCTL_CERE | 559 */
532 PCI_EXP_DEVCTL_NFERE | 560 set_downstream_devices_error_reporting(pdev, true);
533 PCI_EXP_DEVCTL_FERE |
534 PCI_EXP_DEVCTL_URRE;
535 pci_write_config_word(pdev, pos+PCI_EXP_DEVCTL,
536 reg16);
537 561
538 /* Enable Root Port's interrupt in response to error messages */ 562 /* Enable Root Port's interrupt in response to error messages */
539 pci_write_config_dword(pdev, 563 pci_write_config_dword(pdev,
@@ -553,6 +577,12 @@ static void disable_root_aer(struct aer_rpc *rpc)
553 u32 reg32; 577 u32 reg32;
554 int pos; 578 int pos;
555 579
580 /*
581 * Disable error reporting for the root port device and downstream port
582 * devices.
583 */
584 set_downstream_devices_error_reporting(pdev, false);
585
556 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 586 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
557 /* Disable Root's interrupt in response to error messages */ 587 /* Disable Root's interrupt in response to error messages */
558 pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0); 588 pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 586b6f75910d..b0367f168af4 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -718,9 +718,9 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
718 718
719 /* 719 /*
720 * All PCIe functions are in one slot, remove one function will remove 720 * All PCIe functions are in one slot, remove one function will remove
721 * the the whole slot, so just wait 721 * the whole slot, so just wait until we are the last function left.
722 */ 722 */
723 if (!list_empty(&parent->subordinate->devices)) 723 if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices))
724 goto out; 724 goto out;
725 725
726 /* All functions are removed, so just disable ASPM for the link */ 726 /* All functions are removed, so just disable ASPM for the link */
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 99a914a027f8..248b4db91552 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -55,25 +55,13 @@ static int pcie_portdrv_suspend(struct pci_dev *dev, pm_message_t state)
55 55
56} 56}
57 57
58static int pcie_portdrv_suspend_late(struct pci_dev *dev, pm_message_t state)
59{
60 return pci_save_state(dev);
61}
62
63static int pcie_portdrv_resume_early(struct pci_dev *dev)
64{
65 return pci_restore_state(dev);
66}
67
68static int pcie_portdrv_resume(struct pci_dev *dev) 58static int pcie_portdrv_resume(struct pci_dev *dev)
69{ 59{
70 pcie_portdrv_restore_config(dev); 60 pci_set_master(dev);
71 return pcie_port_device_resume(dev); 61 return pcie_port_device_resume(dev);
72} 62}
73#else 63#else
74#define pcie_portdrv_suspend NULL 64#define pcie_portdrv_suspend NULL
75#define pcie_portdrv_suspend_late NULL
76#define pcie_portdrv_resume_early NULL
77#define pcie_portdrv_resume NULL 65#define pcie_portdrv_resume NULL
78#endif 66#endif
79 67
@@ -109,8 +97,6 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
109 97
110 pcie_portdrv_save_config(dev); 98 pcie_portdrv_save_config(dev);
111 99
112 pci_enable_pcie_error_reporting(dev);
113
114 return 0; 100 return 0;
115} 101}
116 102
@@ -292,8 +278,6 @@ static struct pci_driver pcie_portdriver = {
292 .remove = pcie_portdrv_remove, 278 .remove = pcie_portdrv_remove,
293 279
294 .suspend = pcie_portdrv_suspend, 280 .suspend = pcie_portdrv_suspend,
295 .suspend_late = pcie_portdrv_suspend_late,
296 .resume_early = pcie_portdrv_resume_early,
297 .resume = pcie_portdrv_resume, 281 .resume = pcie_portdrv_resume,
298 282
299 .err_handler = &pcie_portdrv_err_handler, 283 .err_handler = &pcie_portdrv_err_handler,
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index baad093aafe3..f20d55368edb 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1584,6 +1584,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_
1584 */ 1584 */
1585#define AMD_813X_MISC 0x40 1585#define AMD_813X_MISC 0x40
1586#define AMD_813X_NOIOAMODE (1<<0) 1586#define AMD_813X_NOIOAMODE (1<<0)
1587#define AMD_813X_REV_B2 0x13
1587 1588
1588static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) 1589static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
1589{ 1590{
@@ -1591,6 +1592,8 @@ static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
1591 1592
1592 if (noioapicquirk) 1593 if (noioapicquirk)
1593 return; 1594 return;
1595 if (dev->revision == AMD_813X_REV_B2)
1596 return;
1594 1597
1595 pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword); 1598 pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
1596 pci_config_dword &= ~AMD_813X_NOIOAMODE; 1599 pci_config_dword &= ~AMD_813X_NOIOAMODE;
@@ -1981,7 +1984,6 @@ static void __devinit quirk_msi_ht_cap(struct pci_dev *dev)
1981DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE, 1984DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
1982 quirk_msi_ht_cap); 1985 quirk_msi_ht_cap);
1983 1986
1984
1985/* The nVidia CK804 chipset may have 2 HT MSI mappings. 1987/* The nVidia CK804 chipset may have 2 HT MSI mappings.
1986 * MSI are supported if the MSI capability set in any of these mappings. 1988 * MSI are supported if the MSI capability set in any of these mappings.
1987 */ 1989 */
@@ -2032,6 +2034,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
2032 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, 2034 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
2033 ht_enable_msi_mapping); 2035 ht_enable_msi_mapping);
2034 2036
2037DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
2038 ht_enable_msi_mapping);
2039
2035/* The P5N32-SLI Premium motherboard from Asus has a problem with msi 2040/* The P5N32-SLI Premium motherboard from Asus has a problem with msi
2036 * for the MCP55 NIC. It is not yet determined whether the msi problem 2041 * for the MCP55 NIC. It is not yet determined whether the msi problem
2037 * also affects other devices. As for now, turn off msi for this device. 2042 * also affects other devices. As for now, turn off msi for this device.
@@ -2048,10 +2053,100 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2048 PCI_DEVICE_ID_NVIDIA_NVENET_15, 2053 PCI_DEVICE_ID_NVIDIA_NVENET_15,
2049 nvenet_msi_disable); 2054 nvenet_msi_disable);
2050 2055
2051static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) 2056static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev)
2052{ 2057{
2053 struct pci_dev *host_bridge; 2058 struct pci_dev *host_bridge;
2059 int pos;
2060 int i, dev_no;
2061 int found = 0;
2062
2063 dev_no = dev->devfn >> 3;
2064 for (i = dev_no; i >= 0; i--) {
2065 host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0));
2066 if (!host_bridge)
2067 continue;
2068
2069 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2070 if (pos != 0) {
2071 found = 1;
2072 break;
2073 }
2074 pci_dev_put(host_bridge);
2075 }
2076
2077 if (!found)
2078 return;
2079
2080 /* root did that ! */
2081 if (msi_ht_cap_enabled(host_bridge))
2082 goto out;
2083
2084 ht_enable_msi_mapping(dev);
2085
2086out:
2087 pci_dev_put(host_bridge);
2088}
2089
2090static void __devinit ht_disable_msi_mapping(struct pci_dev *dev)
2091{
2092 int pos, ttl = 48;
2093
2094 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2095 while (pos && ttl--) {
2096 u8 flags;
2097
2098 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2099 &flags) == 0) {
2100 dev_info(&dev->dev, "Enabling HT MSI Mapping\n");
2101
2102 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2103 flags & ~HT_MSI_FLAGS_ENABLE);
2104 }
2105 pos = pci_find_next_ht_capability(dev, pos,
2106 HT_CAPTYPE_MSI_MAPPING);
2107 }
2108}
2109
2110static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
2111{
2054 int pos, ttl = 48; 2112 int pos, ttl = 48;
2113 int found = 0;
2114
2115 /* check if there is HT MSI cap or enabled on this device */
2116 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2117 while (pos && ttl--) {
2118 u8 flags;
2119
2120 if (found < 1)
2121 found = 1;
2122 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2123 &flags) == 0) {
2124 if (flags & HT_MSI_FLAGS_ENABLE) {
2125 if (found < 2) {
2126 found = 2;
2127 break;
2128 }
2129 }
2130 }
2131 pos = pci_find_next_ht_capability(dev, pos,
2132 HT_CAPTYPE_MSI_MAPPING);
2133 }
2134
2135 return found;
2136}
2137
2138static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2139{
2140 struct pci_dev *host_bridge;
2141 int pos;
2142 int found;
2143
2144 /* check if there is HT MSI cap or enabled on this device */
2145 found = ht_check_msi_mapping(dev);
2146
2147 /* no HT MSI CAP */
2148 if (found == 0)
2149 return;
2055 2150
2056 /* 2151 /*
2057 * HT MSI mapping should be disabled on devices that are below 2152 * HT MSI mapping should be disabled on devices that are below
@@ -2067,24 +2162,19 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2067 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE); 2162 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2068 if (pos != 0) { 2163 if (pos != 0) {
2069 /* Host bridge is to HT */ 2164 /* Host bridge is to HT */
2070 ht_enable_msi_mapping(dev); 2165 if (found == 1) {
2166 /* it is not enabled, try to enable it */
2167 nv_ht_enable_msi_mapping(dev);
2168 }
2071 return; 2169 return;
2072 } 2170 }
2073 2171
2074 /* Host bridge is not to HT, disable HT MSI mapping on this device */ 2172 /* HT MSI is not enabled */
2075 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); 2173 if (found == 1)
2076 while (pos && ttl--) { 2174 return;
2077 u8 flags;
2078 2175
2079 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, 2176 /* Host bridge is not to HT, disable HT MSI mapping on this device */
2080 &flags) == 0) { 2177 ht_disable_msi_mapping(dev);
2081 dev_info(&dev->dev, "Disabling HT MSI mapping");
2082 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2083 flags & ~HT_MSI_FLAGS_ENABLE);
2084 }
2085 pos = pci_find_next_ht_capability(dev, pos,
2086 HT_CAPTYPE_MSI_MAPPING);
2087 }
2088} 2178}
2089DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk); 2179DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk);
2090DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk); 2180DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk);
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 132a78159b60..36864a935d68 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -55,6 +55,7 @@ void pci_disable_rom(struct pci_dev *pdev)
55 55
56/** 56/**
57 * pci_get_rom_size - obtain the actual size of the ROM image 57 * pci_get_rom_size - obtain the actual size of the ROM image
58 * @pdev: target PCI device
58 * @rom: kernel virtual pointer to image of ROM 59 * @rom: kernel virtual pointer to image of ROM
59 * @size: size of PCI window 60 * @size: size of PCI window
60 * return: size of actual ROM image 61 * return: size of actual ROM image
@@ -63,7 +64,7 @@ void pci_disable_rom(struct pci_dev *pdev)
63 * The PCI window size could be much larger than the 64 * The PCI window size could be much larger than the
64 * actual image size. 65 * actual image size.
65 */ 66 */
66size_t pci_get_rom_size(void __iomem *rom, size_t size) 67size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
67{ 68{
68 void __iomem *image; 69 void __iomem *image;
69 int last_image; 70 int last_image;
@@ -72,8 +73,10 @@ size_t pci_get_rom_size(void __iomem *rom, size_t size)
72 do { 73 do {
73 void __iomem *pds; 74 void __iomem *pds;
74 /* Standard PCI ROMs start out with these bytes 55 AA */ 75 /* Standard PCI ROMs start out with these bytes 55 AA */
75 if (readb(image) != 0x55) 76 if (readb(image) != 0x55) {
77 dev_err(&pdev->dev, "Invalid ROM contents\n");
76 break; 78 break;
79 }
77 if (readb(image + 1) != 0xAA) 80 if (readb(image + 1) != 0xAA)
78 break; 81 break;
79 /* get the PCI data structure and check its signature */ 82 /* get the PCI data structure and check its signature */
@@ -159,7 +162,7 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
159 * size is much larger than the actual size of the ROM. 162 * size is much larger than the actual size of the ROM.
160 * True size is important if the ROM is going to be copied. 163 * True size is important if the ROM is going to be copied.
161 */ 164 */
162 *size = pci_get_rom_size(rom, *size); 165 *size = pci_get_rom_size(pdev, rom, *size);
163 return rom; 166 return rom;
164} 167}
165 168