aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorFenghua Yu <fenghua.yu@intel.com>2009-03-27 17:22:42 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-04-03 16:45:54 -0400
commitf59c7b69bcba31cd355ababe067202b9895d6102 (patch)
tree4f06ceb6ab9a135acd9b316c806aaa99c097b373 /drivers/pci/intel-iommu.c
parent8f912ba4d7cdaf7d31cf39fe5a9b7732308a256d (diff)
Intel IOMMU Suspend/Resume Support - DMAR
This patch implements the suspend and resume feature for Intel IOMMU DMAR. It hooks to kernel suspend and resume interface. When suspend happens, it saves necessary hardware registers. When resume happens, it restores the registers and restarts IOMMU by enabling translation, setting up root entry, and re-enabling queued invalidation. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c146
1 files changed, 146 insertions, 0 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 23e56a564e05..ef5795d0396b 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -36,6 +36,7 @@
36#include <linux/iova.h> 36#include <linux/iova.h>
37#include <linux/iommu.h> 37#include <linux/iommu.h>
38#include <linux/intel-iommu.h> 38#include <linux/intel-iommu.h>
39#include <linux/sysdev.h>
39#include <asm/cacheflush.h> 40#include <asm/cacheflush.h>
40#include <asm/iommu.h> 41#include <asm/iommu.h>
41#include "pci.h" 42#include "pci.h"
@@ -2597,6 +2598,150 @@ static void __init init_no_remapping_devices(void)
2597 } 2598 }
2598} 2599}
2599 2600
2601#ifdef CONFIG_SUSPEND
2602static int init_iommu_hw(void)
2603{
2604 struct dmar_drhd_unit *drhd;
2605 struct intel_iommu *iommu = NULL;
2606
2607 for_each_active_iommu(iommu, drhd)
2608 if (iommu->qi)
2609 dmar_reenable_qi(iommu);
2610
2611 for_each_active_iommu(iommu, drhd) {
2612 iommu_flush_write_buffer(iommu);
2613
2614 iommu_set_root_entry(iommu);
2615
2616 iommu->flush.flush_context(iommu, 0, 0, 0,
2617 DMA_CCMD_GLOBAL_INVL, 0);
2618 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2619 DMA_TLB_GLOBAL_FLUSH, 0);
2620 iommu_disable_protect_mem_regions(iommu);
2621 iommu_enable_translation(iommu);
2622 }
2623
2624 return 0;
2625}
2626
2627static void iommu_flush_all(void)
2628{
2629 struct dmar_drhd_unit *drhd;
2630 struct intel_iommu *iommu;
2631
2632 for_each_active_iommu(iommu, drhd) {
2633 iommu->flush.flush_context(iommu, 0, 0, 0,
2634 DMA_CCMD_GLOBAL_INVL, 0);
2635 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2636 DMA_TLB_GLOBAL_FLUSH, 0);
2637 }
2638}
2639
2640static int iommu_suspend(struct sys_device *dev, pm_message_t state)
2641{
2642 struct dmar_drhd_unit *drhd;
2643 struct intel_iommu *iommu = NULL;
2644 unsigned long flag;
2645
2646 for_each_active_iommu(iommu, drhd) {
2647 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
2648 GFP_ATOMIC);
2649 if (!iommu->iommu_state)
2650 goto nomem;
2651 }
2652
2653 iommu_flush_all();
2654
2655 for_each_active_iommu(iommu, drhd) {
2656 iommu_disable_translation(iommu);
2657
2658 spin_lock_irqsave(&iommu->register_lock, flag);
2659
2660 iommu->iommu_state[SR_DMAR_FECTL_REG] =
2661 readl(iommu->reg + DMAR_FECTL_REG);
2662 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
2663 readl(iommu->reg + DMAR_FEDATA_REG);
2664 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
2665 readl(iommu->reg + DMAR_FEADDR_REG);
2666 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
2667 readl(iommu->reg + DMAR_FEUADDR_REG);
2668
2669 spin_unlock_irqrestore(&iommu->register_lock, flag);
2670 }
2671 return 0;
2672
2673nomem:
2674 for_each_active_iommu(iommu, drhd)
2675 kfree(iommu->iommu_state);
2676
2677 return -ENOMEM;
2678}
2679
2680static int iommu_resume(struct sys_device *dev)
2681{
2682 struct dmar_drhd_unit *drhd;
2683 struct intel_iommu *iommu = NULL;
2684 unsigned long flag;
2685
2686 if (init_iommu_hw()) {
2687 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
2688 return -EIO;
2689 }
2690
2691 for_each_active_iommu(iommu, drhd) {
2692
2693 spin_lock_irqsave(&iommu->register_lock, flag);
2694
2695 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
2696 iommu->reg + DMAR_FECTL_REG);
2697 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
2698 iommu->reg + DMAR_FEDATA_REG);
2699 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
2700 iommu->reg + DMAR_FEADDR_REG);
2701 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
2702 iommu->reg + DMAR_FEUADDR_REG);
2703
2704 spin_unlock_irqrestore(&iommu->register_lock, flag);
2705 }
2706
2707 for_each_active_iommu(iommu, drhd)
2708 kfree(iommu->iommu_state);
2709
2710 return 0;
2711}
2712
2713static struct sysdev_class iommu_sysclass = {
2714 .name = "iommu",
2715 .resume = iommu_resume,
2716 .suspend = iommu_suspend,
2717};
2718
2719static struct sys_device device_iommu = {
2720 .cls = &iommu_sysclass,
2721};
2722
2723static int __init init_iommu_sysfs(void)
2724{
2725 int error;
2726
2727 error = sysdev_class_register(&iommu_sysclass);
2728 if (error)
2729 return error;
2730
2731 error = sysdev_register(&device_iommu);
2732 if (error)
2733 sysdev_class_unregister(&iommu_sysclass);
2734
2735 return error;
2736}
2737
2738#else
2739static int __init init_iommu_sysfs(void)
2740{
2741 return 0;
2742}
2743#endif /* CONFIG_PM */
2744
2600int __init intel_iommu_init(void) 2745int __init intel_iommu_init(void)
2601{ 2746{
2602 int ret = 0; 2747 int ret = 0;
@@ -2632,6 +2777,7 @@ int __init intel_iommu_init(void)
2632 init_timer(&unmap_timer); 2777 init_timer(&unmap_timer);
2633 force_iommu = 1; 2778 force_iommu = 1;
2634 dma_ops = &intel_dma_ops; 2779 dma_ops = &intel_dma_ops;
2780 init_iommu_sysfs();
2635 2781
2636 register_iommu(&intel_iommu_ops); 2782 register_iommu(&intel_iommu_ops);
2637 2783