aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/dmar.c
diff options
context:
space:
mode:
authorYu Zhao <yu.zhao@intel.com>2009-01-04 03:28:52 -0500
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-02-09 06:03:17 -0500
commit704126ad81b8cb7d3d70adb9ecb143f4d3fb38af (patch)
treee73c4d595799661757b7505cd67833addef0635e /drivers/pci/dmar.c
parent43f7392ba9e2585bf34f21399b1ed78692b5d437 (diff)
VT-d: handle Invalidation Queue Error to avoid system hang
When hardware detects any error with a descriptor from the invalidation queue, it stops fetching new descriptors from the queue until software clears the Invalidation Queue Error bit in the Fault Status register. Following fix handles the IQE so the kernel won't be trapped in an infinite loop. Signed-off-by: Yu Zhao <yu.zhao@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/dmar.c')
-rw-r--r--drivers/pci/dmar.c61
1 files changed, 45 insertions, 16 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 2b4162d9ca30..8d3e9c261061 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -573,19 +573,49 @@ static inline void reclaim_free_desc(struct q_inval *qi)
573 } 573 }
574} 574}
575 575
576static int qi_check_fault(struct intel_iommu *iommu, int index)
577{
578 u32 fault;
579 int head;
580 struct q_inval *qi = iommu->qi;
581 int wait_index = (index + 1) % QI_LENGTH;
582
583 fault = readl(iommu->reg + DMAR_FSTS_REG);
584
585 /*
586 * If IQE happens, the head points to the descriptor associated
587 * with the error. No new descriptors are fetched until the IQE
588 * is cleared.
589 */
590 if (fault & DMA_FSTS_IQE) {
591 head = readl(iommu->reg + DMAR_IQH_REG);
592 if ((head >> 4) == index) {
593 memcpy(&qi->desc[index], &qi->desc[wait_index],
594 sizeof(struct qi_desc));
595 __iommu_flush_cache(iommu, &qi->desc[index],
596 sizeof(struct qi_desc));
597 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
598 return -EINVAL;
599 }
600 }
601
602 return 0;
603}
604
576/* 605/*
577 * Submit the queued invalidation descriptor to the remapping 606 * Submit the queued invalidation descriptor to the remapping
578 * hardware unit and wait for its completion. 607 * hardware unit and wait for its completion.
579 */ 608 */
580void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) 609int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
581{ 610{
611 int rc = 0;
582 struct q_inval *qi = iommu->qi; 612 struct q_inval *qi = iommu->qi;
583 struct qi_desc *hw, wait_desc; 613 struct qi_desc *hw, wait_desc;
584 int wait_index, index; 614 int wait_index, index;
585 unsigned long flags; 615 unsigned long flags;
586 616
587 if (!qi) 617 if (!qi)
588 return; 618 return 0;
589 619
590 hw = qi->desc; 620 hw = qi->desc;
591 621
@@ -603,7 +633,8 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
603 633
604 hw[index] = *desc; 634 hw[index] = *desc;
605 635
606 wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; 636 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
637 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
607 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); 638 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
608 639
609 hw[wait_index] = wait_desc; 640 hw[wait_index] = wait_desc;
@@ -614,13 +645,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
614 qi->free_head = (qi->free_head + 2) % QI_LENGTH; 645 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
615 qi->free_cnt -= 2; 646 qi->free_cnt -= 2;
616 647
617 spin_lock(&iommu->register_lock);
618 /* 648 /*
619 * update the HW tail register indicating the presence of 649 * update the HW tail register indicating the presence of
620 * new descriptors. 650 * new descriptors.
621 */ 651 */
622 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); 652 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
623 spin_unlock(&iommu->register_lock);
624 653
625 while (qi->desc_status[wait_index] != QI_DONE) { 654 while (qi->desc_status[wait_index] != QI_DONE) {
626 /* 655 /*
@@ -630,15 +659,21 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
630 * a deadlock where the interrupt context can wait indefinitely 659 * a deadlock where the interrupt context can wait indefinitely
631 * for free slots in the queue. 660 * for free slots in the queue.
632 */ 661 */
662 rc = qi_check_fault(iommu, index);
663 if (rc)
664 goto out;
665
633 spin_unlock(&qi->q_lock); 666 spin_unlock(&qi->q_lock);
634 cpu_relax(); 667 cpu_relax();
635 spin_lock(&qi->q_lock); 668 spin_lock(&qi->q_lock);
636 } 669 }
637 670out:
638 qi->desc_status[index] = QI_DONE; 671 qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
639 672
640 reclaim_free_desc(qi); 673 reclaim_free_desc(qi);
641 spin_unlock_irqrestore(&qi->q_lock, flags); 674 spin_unlock_irqrestore(&qi->q_lock, flags);
675
676 return rc;
642} 677}
643 678
644/* 679/*
@@ -651,13 +686,13 @@ void qi_global_iec(struct intel_iommu *iommu)
651 desc.low = QI_IEC_TYPE; 686 desc.low = QI_IEC_TYPE;
652 desc.high = 0; 687 desc.high = 0;
653 688
689 /* should never fail */
654 qi_submit_sync(&desc, iommu); 690 qi_submit_sync(&desc, iommu);
655} 691}
656 692
657int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, 693int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
658 u64 type, int non_present_entry_flush) 694 u64 type, int non_present_entry_flush)
659{ 695{
660
661 struct qi_desc desc; 696 struct qi_desc desc;
662 697
663 if (non_present_entry_flush) { 698 if (non_present_entry_flush) {
@@ -671,10 +706,7 @@ int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
671 | QI_CC_GRAN(type) | QI_CC_TYPE; 706 | QI_CC_GRAN(type) | QI_CC_TYPE;
672 desc.high = 0; 707 desc.high = 0;
673 708
674 qi_submit_sync(&desc, iommu); 709 return qi_submit_sync(&desc, iommu);
675
676 return 0;
677
678} 710}
679 711
680int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, 712int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
@@ -704,10 +736,7 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
704 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) 736 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
705 | QI_IOTLB_AM(size_order); 737 | QI_IOTLB_AM(size_order);
706 738
707 qi_submit_sync(&desc, iommu); 739 return qi_submit_sync(&desc, iommu);
708
709 return 0;
710
711} 740}
712 741
713/* 742/*