diff options
Diffstat (limited to 'drivers/pci/dmar.c')
-rw-r--r-- | drivers/pci/dmar.c | 73 |
1 files changed, 56 insertions, 17 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 519f5f91e765..5f333403c2ea 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -332,6 +332,14 @@ parse_dmar_table(void) | |||
332 | entry_header = (struct acpi_dmar_header *)(dmar + 1); | 332 | entry_header = (struct acpi_dmar_header *)(dmar + 1); |
333 | while (((unsigned long)entry_header) < | 333 | while (((unsigned long)entry_header) < |
334 | (((unsigned long)dmar) + dmar_tbl->length)) { | 334 | (((unsigned long)dmar) + dmar_tbl->length)) { |
335 | /* Avoid looping forever on bad ACPI tables */ | ||
336 | if (entry_header->length == 0) { | ||
337 | printk(KERN_WARNING PREFIX | ||
338 | "Invalid 0-length structure\n"); | ||
339 | ret = -EINVAL; | ||
340 | break; | ||
341 | } | ||
342 | |||
335 | dmar_table_print_dmar_entry(entry_header); | 343 | dmar_table_print_dmar_entry(entry_header); |
336 | 344 | ||
337 | switch (entry_header->type) { | 345 | switch (entry_header->type) { |
@@ -494,7 +502,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
494 | int map_size; | 502 | int map_size; |
495 | u32 ver; | 503 | u32 ver; |
496 | static int iommu_allocated = 0; | 504 | static int iommu_allocated = 0; |
497 | int agaw; | 505 | int agaw = 0; |
498 | 506 | ||
499 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | 507 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); |
500 | if (!iommu) | 508 | if (!iommu) |
@@ -510,6 +518,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
510 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | 518 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); |
511 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | 519 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); |
512 | 520 | ||
521 | #ifdef CONFIG_DMAR | ||
513 | agaw = iommu_calculate_agaw(iommu); | 522 | agaw = iommu_calculate_agaw(iommu); |
514 | if (agaw < 0) { | 523 | if (agaw < 0) { |
515 | printk(KERN_ERR | 524 | printk(KERN_ERR |
@@ -517,6 +526,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
517 | iommu->seq_id); | 526 | iommu->seq_id); |
518 | goto error; | 527 | goto error; |
519 | } | 528 | } |
529 | #endif | ||
520 | iommu->agaw = agaw; | 530 | iommu->agaw = agaw; |
521 | 531 | ||
522 | /* the registers might be more than one page */ | 532 | /* the registers might be more than one page */ |
@@ -574,19 +584,49 @@ static inline void reclaim_free_desc(struct q_inval *qi) | |||
574 | } | 584 | } |
575 | } | 585 | } |
576 | 586 | ||
587 | static int qi_check_fault(struct intel_iommu *iommu, int index) | ||
588 | { | ||
589 | u32 fault; | ||
590 | int head; | ||
591 | struct q_inval *qi = iommu->qi; | ||
592 | int wait_index = (index + 1) % QI_LENGTH; | ||
593 | |||
594 | fault = readl(iommu->reg + DMAR_FSTS_REG); | ||
595 | |||
596 | /* | ||
597 | * If IQE happens, the head points to the descriptor associated | ||
598 | * with the error. No new descriptors are fetched until the IQE | ||
599 | * is cleared. | ||
600 | */ | ||
601 | if (fault & DMA_FSTS_IQE) { | ||
602 | head = readl(iommu->reg + DMAR_IQH_REG); | ||
603 | if ((head >> 4) == index) { | ||
604 | memcpy(&qi->desc[index], &qi->desc[wait_index], | ||
605 | sizeof(struct qi_desc)); | ||
606 | __iommu_flush_cache(iommu, &qi->desc[index], | ||
607 | sizeof(struct qi_desc)); | ||
608 | writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); | ||
609 | return -EINVAL; | ||
610 | } | ||
611 | } | ||
612 | |||
613 | return 0; | ||
614 | } | ||
615 | |||
577 | /* | 616 | /* |
578 | * Submit the queued invalidation descriptor to the remapping | 617 | * Submit the queued invalidation descriptor to the remapping |
579 | * hardware unit and wait for its completion. | 618 | * hardware unit and wait for its completion. |
580 | */ | 619 | */ |
581 | void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | 620 | int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) |
582 | { | 621 | { |
622 | int rc = 0; | ||
583 | struct q_inval *qi = iommu->qi; | 623 | struct q_inval *qi = iommu->qi; |
584 | struct qi_desc *hw, wait_desc; | 624 | struct qi_desc *hw, wait_desc; |
585 | int wait_index, index; | 625 | int wait_index, index; |
586 | unsigned long flags; | 626 | unsigned long flags; |
587 | 627 | ||
588 | if (!qi) | 628 | if (!qi) |
589 | return; | 629 | return 0; |
590 | 630 | ||
591 | hw = qi->desc; | 631 | hw = qi->desc; |
592 | 632 | ||
@@ -604,7 +644,8 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
604 | 644 | ||
605 | hw[index] = *desc; | 645 | hw[index] = *desc; |
606 | 646 | ||
607 | wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; | 647 | wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) | |
648 | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; | ||
608 | wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); | 649 | wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); |
609 | 650 | ||
610 | hw[wait_index] = wait_desc; | 651 | hw[wait_index] = wait_desc; |
@@ -615,13 +656,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
615 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; | 656 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; |
616 | qi->free_cnt -= 2; | 657 | qi->free_cnt -= 2; |
617 | 658 | ||
618 | spin_lock(&iommu->register_lock); | ||
619 | /* | 659 | /* |
620 | * update the HW tail register indicating the presence of | 660 | * update the HW tail register indicating the presence of |
621 | * new descriptors. | 661 | * new descriptors. |
622 | */ | 662 | */ |
623 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); | 663 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); |
624 | spin_unlock(&iommu->register_lock); | ||
625 | 664 | ||
626 | while (qi->desc_status[wait_index] != QI_DONE) { | 665 | while (qi->desc_status[wait_index] != QI_DONE) { |
627 | /* | 666 | /* |
@@ -631,15 +670,21 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
631 | * a deadlock where the interrupt context can wait indefinitely | 670 | * a deadlock where the interrupt context can wait indefinitely |
632 | * for free slots in the queue. | 671 | * for free slots in the queue. |
633 | */ | 672 | */ |
673 | rc = qi_check_fault(iommu, index); | ||
674 | if (rc) | ||
675 | goto out; | ||
676 | |||
634 | spin_unlock(&qi->q_lock); | 677 | spin_unlock(&qi->q_lock); |
635 | cpu_relax(); | 678 | cpu_relax(); |
636 | spin_lock(&qi->q_lock); | 679 | spin_lock(&qi->q_lock); |
637 | } | 680 | } |
638 | 681 | out: | |
639 | qi->desc_status[index] = QI_DONE; | 682 | qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE; |
640 | 683 | ||
641 | reclaim_free_desc(qi); | 684 | reclaim_free_desc(qi); |
642 | spin_unlock_irqrestore(&qi->q_lock, flags); | 685 | spin_unlock_irqrestore(&qi->q_lock, flags); |
686 | |||
687 | return rc; | ||
643 | } | 688 | } |
644 | 689 | ||
645 | /* | 690 | /* |
@@ -652,13 +697,13 @@ void qi_global_iec(struct intel_iommu *iommu) | |||
652 | desc.low = QI_IEC_TYPE; | 697 | desc.low = QI_IEC_TYPE; |
653 | desc.high = 0; | 698 | desc.high = 0; |
654 | 699 | ||
700 | /* should never fail */ | ||
655 | qi_submit_sync(&desc, iommu); | 701 | qi_submit_sync(&desc, iommu); |
656 | } | 702 | } |
657 | 703 | ||
658 | int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, | 704 | int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, |
659 | u64 type, int non_present_entry_flush) | 705 | u64 type, int non_present_entry_flush) |
660 | { | 706 | { |
661 | |||
662 | struct qi_desc desc; | 707 | struct qi_desc desc; |
663 | 708 | ||
664 | if (non_present_entry_flush) { | 709 | if (non_present_entry_flush) { |
@@ -672,10 +717,7 @@ int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, | |||
672 | | QI_CC_GRAN(type) | QI_CC_TYPE; | 717 | | QI_CC_GRAN(type) | QI_CC_TYPE; |
673 | desc.high = 0; | 718 | desc.high = 0; |
674 | 719 | ||
675 | qi_submit_sync(&desc, iommu); | 720 | return qi_submit_sync(&desc, iommu); |
676 | |||
677 | return 0; | ||
678 | |||
679 | } | 721 | } |
680 | 722 | ||
681 | int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | 723 | int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, |
@@ -705,10 +747,7 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | |||
705 | desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) | 747 | desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) |
706 | | QI_IOTLB_AM(size_order); | 748 | | QI_IOTLB_AM(size_order); |
707 | 749 | ||
708 | qi_submit_sync(&desc, iommu); | 750 | return qi_submit_sync(&desc, iommu); |
709 | |||
710 | return 0; | ||
711 | |||
712 | } | 751 | } |
713 | 752 | ||
714 | /* | 753 | /* |