aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFenghua Yu <fenghua.yu@intel.com>2009-03-27 17:22:42 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-04-03 16:45:54 -0400
commitf59c7b69bcba31cd355ababe067202b9895d6102 (patch)
tree4f06ceb6ab9a135acd9b316c806aaa99c097b373
parent8f912ba4d7cdaf7d31cf39fe5a9b7732308a256d (diff)
Intel IOMMU Suspend/Resume Support - DMAR
This patch implements the suspend and resume feature for Intel IOMMU DMAR. It hooks to kernel suspend and resume interface. When suspend happens, it saves necessary hardware registers. When resume happens, it restores the registers and restarts IOMMU by enabling translation, setting up root entry, and re-enabling queued invalidation. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
-rw-r--r--drivers/pci/intel-iommu.c146
-rw-r--r--include/linux/intel-iommu.h11
2 files changed, 157 insertions, 0 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 23e56a564e05..ef5795d0396b 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -36,6 +36,7 @@
36#include <linux/iova.h> 36#include <linux/iova.h>
37#include <linux/iommu.h> 37#include <linux/iommu.h>
38#include <linux/intel-iommu.h> 38#include <linux/intel-iommu.h>
39#include <linux/sysdev.h>
39#include <asm/cacheflush.h> 40#include <asm/cacheflush.h>
40#include <asm/iommu.h> 41#include <asm/iommu.h>
41#include "pci.h" 42#include "pci.h"
@@ -2597,6 +2598,150 @@ static void __init init_no_remapping_devices(void)
2597 } 2598 }
2598} 2599}
2599 2600
2601#ifdef CONFIG_SUSPEND
2602static int init_iommu_hw(void)
2603{
2604 struct dmar_drhd_unit *drhd;
2605 struct intel_iommu *iommu = NULL;
2606
2607 for_each_active_iommu(iommu, drhd)
2608 if (iommu->qi)
2609 dmar_reenable_qi(iommu);
2610
2611 for_each_active_iommu(iommu, drhd) {
2612 iommu_flush_write_buffer(iommu);
2613
2614 iommu_set_root_entry(iommu);
2615
2616 iommu->flush.flush_context(iommu, 0, 0, 0,
2617 DMA_CCMD_GLOBAL_INVL, 0);
2618 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2619 DMA_TLB_GLOBAL_FLUSH, 0);
2620 iommu_disable_protect_mem_regions(iommu);
2621 iommu_enable_translation(iommu);
2622 }
2623
2624 return 0;
2625}
2626
2627static void iommu_flush_all(void)
2628{
2629 struct dmar_drhd_unit *drhd;
2630 struct intel_iommu *iommu;
2631
2632 for_each_active_iommu(iommu, drhd) {
2633 iommu->flush.flush_context(iommu, 0, 0, 0,
2634 DMA_CCMD_GLOBAL_INVL, 0);
2635 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2636 DMA_TLB_GLOBAL_FLUSH, 0);
2637 }
2638}
2639
2640static int iommu_suspend(struct sys_device *dev, pm_message_t state)
2641{
2642 struct dmar_drhd_unit *drhd;
2643 struct intel_iommu *iommu = NULL;
2644 unsigned long flag;
2645
2646 for_each_active_iommu(iommu, drhd) {
2647 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
2648 GFP_ATOMIC);
2649 if (!iommu->iommu_state)
2650 goto nomem;
2651 }
2652
2653 iommu_flush_all();
2654
2655 for_each_active_iommu(iommu, drhd) {
2656 iommu_disable_translation(iommu);
2657
2658 spin_lock_irqsave(&iommu->register_lock, flag);
2659
2660 iommu->iommu_state[SR_DMAR_FECTL_REG] =
2661 readl(iommu->reg + DMAR_FECTL_REG);
2662 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
2663 readl(iommu->reg + DMAR_FEDATA_REG);
2664 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
2665 readl(iommu->reg + DMAR_FEADDR_REG);
2666 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
2667 readl(iommu->reg + DMAR_FEUADDR_REG);
2668
2669 spin_unlock_irqrestore(&iommu->register_lock, flag);
2670 }
2671 return 0;
2672
2673nomem:
2674 for_each_active_iommu(iommu, drhd)
2675 kfree(iommu->iommu_state);
2676
2677 return -ENOMEM;
2678}
2679
2680static int iommu_resume(struct sys_device *dev)
2681{
2682 struct dmar_drhd_unit *drhd;
2683 struct intel_iommu *iommu = NULL;
2684 unsigned long flag;
2685
2686 if (init_iommu_hw()) {
2687 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
2688 return -EIO;
2689 }
2690
2691 for_each_active_iommu(iommu, drhd) {
2692
2693 spin_lock_irqsave(&iommu->register_lock, flag);
2694
2695 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
2696 iommu->reg + DMAR_FECTL_REG);
2697 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
2698 iommu->reg + DMAR_FEDATA_REG);
2699 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
2700 iommu->reg + DMAR_FEADDR_REG);
2701 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
2702 iommu->reg + DMAR_FEUADDR_REG);
2703
2704 spin_unlock_irqrestore(&iommu->register_lock, flag);
2705 }
2706
2707 for_each_active_iommu(iommu, drhd)
2708 kfree(iommu->iommu_state);
2709
2710 return 0;
2711}
2712
2713static struct sysdev_class iommu_sysclass = {
2714 .name = "iommu",
2715 .resume = iommu_resume,
2716 .suspend = iommu_suspend,
2717};
2718
2719static struct sys_device device_iommu = {
2720 .cls = &iommu_sysclass,
2721};
2722
2723static int __init init_iommu_sysfs(void)
2724{
2725 int error;
2726
2727 error = sysdev_class_register(&iommu_sysclass);
2728 if (error)
2729 return error;
2730
2731 error = sysdev_register(&device_iommu);
2732 if (error)
2733 sysdev_class_unregister(&iommu_sysclass);
2734
2735 return error;
2736}
2737
2738#else
2739static int __init init_iommu_sysfs(void)
2740{
2741 return 0;
2742}
2743#endif /* CONFIG_PM */
2744
2600int __init intel_iommu_init(void) 2745int __init intel_iommu_init(void)
2601{ 2746{
2602 int ret = 0; 2747 int ret = 0;
@@ -2632,6 +2777,7 @@ int __init intel_iommu_init(void)
2632 init_timer(&unmap_timer); 2777 init_timer(&unmap_timer);
2633 force_iommu = 1; 2778 force_iommu = 1;
2634 dma_ops = &intel_dma_ops; 2779 dma_ops = &intel_dma_ops;
2780 init_iommu_sysfs();
2635 2781
2636 register_iommu(&intel_iommu_ops); 2782 register_iommu(&intel_iommu_ops);
2637 2783
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 77214ead1a36..3771cd1f876e 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -284,6 +284,14 @@ struct iommu_flush {
284 unsigned int size_order, u64 type, int non_present_entry_flush); 284 unsigned int size_order, u64 type, int non_present_entry_flush);
285}; 285};
286 286
287enum {
288 SR_DMAR_FECTL_REG,
289 SR_DMAR_FEDATA_REG,
290 SR_DMAR_FEADDR_REG,
291 SR_DMAR_FEUADDR_REG,
292 MAX_SR_DMAR_REGS
293};
294
287struct intel_iommu { 295struct intel_iommu {
288 void __iomem *reg; /* Pointer to hardware regs, virtual addr */ 296 void __iomem *reg; /* Pointer to hardware regs, virtual addr */
289 u64 cap; 297 u64 cap;
@@ -304,6 +312,8 @@ struct intel_iommu {
304 struct iommu_flush flush; 312 struct iommu_flush flush;
305#endif 313#endif
306 struct q_inval *qi; /* Queued invalidation info */ 314 struct q_inval *qi; /* Queued invalidation info */
315 u32 *iommu_state; /* Store iommu states between suspend and resume.*/
316
307#ifdef CONFIG_INTR_REMAP 317#ifdef CONFIG_INTR_REMAP
308 struct ir_table *ir_table; /* Interrupt remapping info */ 318 struct ir_table *ir_table; /* Interrupt remapping info */
309#endif 319#endif
@@ -322,6 +332,7 @@ extern int alloc_iommu(struct dmar_drhd_unit *drhd);
322extern void free_iommu(struct intel_iommu *iommu); 332extern void free_iommu(struct intel_iommu *iommu);
323extern int dmar_enable_qi(struct intel_iommu *iommu); 333extern int dmar_enable_qi(struct intel_iommu *iommu);
324extern void dmar_disable_qi(struct intel_iommu *iommu); 334extern void dmar_disable_qi(struct intel_iommu *iommu);
335extern int dmar_reenable_qi(struct intel_iommu *iommu);
325extern void qi_global_iec(struct intel_iommu *iommu); 336extern void qi_global_iec(struct intel_iommu *iommu);
326 337
327extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, 338extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,