diff options
author | David S. Miller <davem@davemloft.net> | 2009-03-02 00:35:16 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-03-02 00:35:16 -0500 |
commit | aa4abc9bcce0d2a7ec189e897f8f8c58ca04643b (patch) | |
tree | 22ef88d84a2e06380bb6a853c3ba28657e4e5f92 /drivers/pci/dmar.c | |
parent | 814c01dc7c533033b4e99981a2e24a6195bfb43c (diff) | |
parent | 52c0326beaa3cb0049d0f1c51c6ad5d4a04e4430 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
drivers/net/wireless/iwlwifi/iwl-tx.c
net/8021q/vlan_core.c
net/core/dev.c
Diffstat (limited to 'drivers/pci/dmar.c')
-rw-r--r-- | drivers/pci/dmar.c | 73 |
1 files changed, 56 insertions, 17 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index f5a662a50acb..26c536b51c5a 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -330,6 +330,14 @@ parse_dmar_table(void) | |||
330 | entry_header = (struct acpi_dmar_header *)(dmar + 1); | 330 | entry_header = (struct acpi_dmar_header *)(dmar + 1); |
331 | while (((unsigned long)entry_header) < | 331 | while (((unsigned long)entry_header) < |
332 | (((unsigned long)dmar) + dmar_tbl->length)) { | 332 | (((unsigned long)dmar) + dmar_tbl->length)) { |
333 | /* Avoid looping forever on bad ACPI tables */ | ||
334 | if (entry_header->length == 0) { | ||
335 | printk(KERN_WARNING PREFIX | ||
336 | "Invalid 0-length structure\n"); | ||
337 | ret = -EINVAL; | ||
338 | break; | ||
339 | } | ||
340 | |||
333 | dmar_table_print_dmar_entry(entry_header); | 341 | dmar_table_print_dmar_entry(entry_header); |
334 | 342 | ||
335 | switch (entry_header->type) { | 343 | switch (entry_header->type) { |
@@ -491,7 +499,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
491 | int map_size; | 499 | int map_size; |
492 | u32 ver; | 500 | u32 ver; |
493 | static int iommu_allocated = 0; | 501 | static int iommu_allocated = 0; |
494 | int agaw; | 502 | int agaw = 0; |
495 | 503 | ||
496 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | 504 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); |
497 | if (!iommu) | 505 | if (!iommu) |
@@ -507,6 +515,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
507 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | 515 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); |
508 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | 516 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); |
509 | 517 | ||
518 | #ifdef CONFIG_DMAR | ||
510 | agaw = iommu_calculate_agaw(iommu); | 519 | agaw = iommu_calculate_agaw(iommu); |
511 | if (agaw < 0) { | 520 | if (agaw < 0) { |
512 | printk(KERN_ERR | 521 | printk(KERN_ERR |
@@ -514,6 +523,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
514 | iommu->seq_id); | 523 | iommu->seq_id); |
515 | goto error; | 524 | goto error; |
516 | } | 525 | } |
526 | #endif | ||
517 | iommu->agaw = agaw; | 527 | iommu->agaw = agaw; |
518 | 528 | ||
519 | /* the registers might be more than one page */ | 529 | /* the registers might be more than one page */ |
@@ -571,19 +581,49 @@ static inline void reclaim_free_desc(struct q_inval *qi) | |||
571 | } | 581 | } |
572 | } | 582 | } |
573 | 583 | ||
584 | static int qi_check_fault(struct intel_iommu *iommu, int index) | ||
585 | { | ||
586 | u32 fault; | ||
587 | int head; | ||
588 | struct q_inval *qi = iommu->qi; | ||
589 | int wait_index = (index + 1) % QI_LENGTH; | ||
590 | |||
591 | fault = readl(iommu->reg + DMAR_FSTS_REG); | ||
592 | |||
593 | /* | ||
594 | * If IQE happens, the head points to the descriptor associated | ||
595 | * with the error. No new descriptors are fetched until the IQE | ||
596 | * is cleared. | ||
597 | */ | ||
598 | if (fault & DMA_FSTS_IQE) { | ||
599 | head = readl(iommu->reg + DMAR_IQH_REG); | ||
600 | if ((head >> 4) == index) { | ||
601 | memcpy(&qi->desc[index], &qi->desc[wait_index], | ||
602 | sizeof(struct qi_desc)); | ||
603 | __iommu_flush_cache(iommu, &qi->desc[index], | ||
604 | sizeof(struct qi_desc)); | ||
605 | writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); | ||
606 | return -EINVAL; | ||
607 | } | ||
608 | } | ||
609 | |||
610 | return 0; | ||
611 | } | ||
612 | |||
574 | /* | 613 | /* |
575 | * Submit the queued invalidation descriptor to the remapping | 614 | * Submit the queued invalidation descriptor to the remapping |
576 | * hardware unit and wait for its completion. | 615 | * hardware unit and wait for its completion. |
577 | */ | 616 | */ |
578 | void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | 617 | int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) |
579 | { | 618 | { |
619 | int rc = 0; | ||
580 | struct q_inval *qi = iommu->qi; | 620 | struct q_inval *qi = iommu->qi; |
581 | struct qi_desc *hw, wait_desc; | 621 | struct qi_desc *hw, wait_desc; |
582 | int wait_index, index; | 622 | int wait_index, index; |
583 | unsigned long flags; | 623 | unsigned long flags; |
584 | 624 | ||
585 | if (!qi) | 625 | if (!qi) |
586 | return; | 626 | return 0; |
587 | 627 | ||
588 | hw = qi->desc; | 628 | hw = qi->desc; |
589 | 629 | ||
@@ -601,7 +641,8 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
601 | 641 | ||
602 | hw[index] = *desc; | 642 | hw[index] = *desc; |
603 | 643 | ||
604 | wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; | 644 | wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) | |
645 | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; | ||
605 | wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); | 646 | wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); |
606 | 647 | ||
607 | hw[wait_index] = wait_desc; | 648 | hw[wait_index] = wait_desc; |
@@ -612,13 +653,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
612 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; | 653 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; |
613 | qi->free_cnt -= 2; | 654 | qi->free_cnt -= 2; |
614 | 655 | ||
615 | spin_lock(&iommu->register_lock); | ||
616 | /* | 656 | /* |
617 | * update the HW tail register indicating the presence of | 657 | * update the HW tail register indicating the presence of |
618 | * new descriptors. | 658 | * new descriptors. |
619 | */ | 659 | */ |
620 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); | 660 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); |
621 | spin_unlock(&iommu->register_lock); | ||
622 | 661 | ||
623 | while (qi->desc_status[wait_index] != QI_DONE) { | 662 | while (qi->desc_status[wait_index] != QI_DONE) { |
624 | /* | 663 | /* |
@@ -628,15 +667,21 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
628 | * a deadlock where the interrupt context can wait indefinitely | 667 | * a deadlock where the interrupt context can wait indefinitely |
629 | * for free slots in the queue. | 668 | * for free slots in the queue. |
630 | */ | 669 | */ |
670 | rc = qi_check_fault(iommu, index); | ||
671 | if (rc) | ||
672 | goto out; | ||
673 | |||
631 | spin_unlock(&qi->q_lock); | 674 | spin_unlock(&qi->q_lock); |
632 | cpu_relax(); | 675 | cpu_relax(); |
633 | spin_lock(&qi->q_lock); | 676 | spin_lock(&qi->q_lock); |
634 | } | 677 | } |
635 | 678 | out: | |
636 | qi->desc_status[index] = QI_DONE; | 679 | qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE; |
637 | 680 | ||
638 | reclaim_free_desc(qi); | 681 | reclaim_free_desc(qi); |
639 | spin_unlock_irqrestore(&qi->q_lock, flags); | 682 | spin_unlock_irqrestore(&qi->q_lock, flags); |
683 | |||
684 | return rc; | ||
640 | } | 685 | } |
641 | 686 | ||
642 | /* | 687 | /* |
@@ -649,13 +694,13 @@ void qi_global_iec(struct intel_iommu *iommu) | |||
649 | desc.low = QI_IEC_TYPE; | 694 | desc.low = QI_IEC_TYPE; |
650 | desc.high = 0; | 695 | desc.high = 0; |
651 | 696 | ||
697 | /* should never fail */ | ||
652 | qi_submit_sync(&desc, iommu); | 698 | qi_submit_sync(&desc, iommu); |
653 | } | 699 | } |
654 | 700 | ||
655 | int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, | 701 | int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, |
656 | u64 type, int non_present_entry_flush) | 702 | u64 type, int non_present_entry_flush) |
657 | { | 703 | { |
658 | |||
659 | struct qi_desc desc; | 704 | struct qi_desc desc; |
660 | 705 | ||
661 | if (non_present_entry_flush) { | 706 | if (non_present_entry_flush) { |
@@ -669,10 +714,7 @@ int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, | |||
669 | | QI_CC_GRAN(type) | QI_CC_TYPE; | 714 | | QI_CC_GRAN(type) | QI_CC_TYPE; |
670 | desc.high = 0; | 715 | desc.high = 0; |
671 | 716 | ||
672 | qi_submit_sync(&desc, iommu); | 717 | return qi_submit_sync(&desc, iommu); |
673 | |||
674 | return 0; | ||
675 | |||
676 | } | 718 | } |
677 | 719 | ||
678 | int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | 720 | int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, |
@@ -702,10 +744,7 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | |||
702 | desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) | 744 | desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) |
703 | | QI_IOTLB_AM(size_order); | 745 | | QI_IOTLB_AM(size_order); |
704 | 746 | ||
705 | qi_submit_sync(&desc, iommu); | 747 | return qi_submit_sync(&desc, iommu); |
706 | |||
707 | return 0; | ||
708 | |||
709 | } | 748 | } |
710 | 749 | ||
711 | /* | 750 | /* |