diff options
| -rw-r--r-- | drivers/pci/intel-iommu.c | 146 | ||||
| -rw-r--r-- | include/linux/intel-iommu.h | 11 |
2 files changed, 157 insertions, 0 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 23e56a564e05..ef5795d0396b 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <linux/iova.h> | 36 | #include <linux/iova.h> |
| 37 | #include <linux/iommu.h> | 37 | #include <linux/iommu.h> |
| 38 | #include <linux/intel-iommu.h> | 38 | #include <linux/intel-iommu.h> |
| 39 | #include <linux/sysdev.h> | ||
| 39 | #include <asm/cacheflush.h> | 40 | #include <asm/cacheflush.h> |
| 40 | #include <asm/iommu.h> | 41 | #include <asm/iommu.h> |
| 41 | #include "pci.h" | 42 | #include "pci.h" |
| @@ -2597,6 +2598,150 @@ static void __init init_no_remapping_devices(void) | |||
| 2597 | } | 2598 | } |
| 2598 | } | 2599 | } |
| 2599 | 2600 | ||
| 2601 | #ifdef CONFIG_SUSPEND | ||
| 2602 | static int init_iommu_hw(void) | ||
| 2603 | { | ||
| 2604 | struct dmar_drhd_unit *drhd; | ||
| 2605 | struct intel_iommu *iommu = NULL; | ||
| 2606 | |||
| 2607 | for_each_active_iommu(iommu, drhd) | ||
| 2608 | if (iommu->qi) | ||
| 2609 | dmar_reenable_qi(iommu); | ||
| 2610 | |||
| 2611 | for_each_active_iommu(iommu, drhd) { | ||
| 2612 | iommu_flush_write_buffer(iommu); | ||
| 2613 | |||
| 2614 | iommu_set_root_entry(iommu); | ||
| 2615 | |||
| 2616 | iommu->flush.flush_context(iommu, 0, 0, 0, | ||
| 2617 | DMA_CCMD_GLOBAL_INVL, 0); | ||
| 2618 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, | ||
| 2619 | DMA_TLB_GLOBAL_FLUSH, 0); | ||
| 2620 | iommu_disable_protect_mem_regions(iommu); | ||
| 2621 | iommu_enable_translation(iommu); | ||
| 2622 | } | ||
| 2623 | |||
| 2624 | return 0; | ||
| 2625 | } | ||
| 2626 | |||
| 2627 | static void iommu_flush_all(void) | ||
| 2628 | { | ||
| 2629 | struct dmar_drhd_unit *drhd; | ||
| 2630 | struct intel_iommu *iommu; | ||
| 2631 | |||
| 2632 | for_each_active_iommu(iommu, drhd) { | ||
| 2633 | iommu->flush.flush_context(iommu, 0, 0, 0, | ||
| 2634 | DMA_CCMD_GLOBAL_INVL, 0); | ||
| 2635 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, | ||
| 2636 | DMA_TLB_GLOBAL_FLUSH, 0); | ||
| 2637 | } | ||
| 2638 | } | ||
| 2639 | |||
| 2640 | static int iommu_suspend(struct sys_device *dev, pm_message_t state) | ||
| 2641 | { | ||
| 2642 | struct dmar_drhd_unit *drhd; | ||
| 2643 | struct intel_iommu *iommu = NULL; | ||
| 2644 | unsigned long flag; | ||
| 2645 | |||
| 2646 | for_each_active_iommu(iommu, drhd) { | ||
| 2647 | iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS, | ||
| 2648 | GFP_ATOMIC); | ||
| 2649 | if (!iommu->iommu_state) | ||
| 2650 | goto nomem; | ||
| 2651 | } | ||
| 2652 | |||
| 2653 | iommu_flush_all(); | ||
| 2654 | |||
| 2655 | for_each_active_iommu(iommu, drhd) { | ||
| 2656 | iommu_disable_translation(iommu); | ||
| 2657 | |||
| 2658 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
| 2659 | |||
| 2660 | iommu->iommu_state[SR_DMAR_FECTL_REG] = | ||
| 2661 | readl(iommu->reg + DMAR_FECTL_REG); | ||
| 2662 | iommu->iommu_state[SR_DMAR_FEDATA_REG] = | ||
| 2663 | readl(iommu->reg + DMAR_FEDATA_REG); | ||
| 2664 | iommu->iommu_state[SR_DMAR_FEADDR_REG] = | ||
| 2665 | readl(iommu->reg + DMAR_FEADDR_REG); | ||
| 2666 | iommu->iommu_state[SR_DMAR_FEUADDR_REG] = | ||
| 2667 | readl(iommu->reg + DMAR_FEUADDR_REG); | ||
| 2668 | |||
| 2669 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
| 2670 | } | ||
| 2671 | return 0; | ||
| 2672 | |||
| 2673 | nomem: | ||
| 2674 | for_each_active_iommu(iommu, drhd) | ||
| 2675 | kfree(iommu->iommu_state); | ||
| 2676 | |||
| 2677 | return -ENOMEM; | ||
| 2678 | } | ||
| 2679 | |||
| 2680 | static int iommu_resume(struct sys_device *dev) | ||
| 2681 | { | ||
| 2682 | struct dmar_drhd_unit *drhd; | ||
| 2683 | struct intel_iommu *iommu = NULL; | ||
| 2684 | unsigned long flag; | ||
| 2685 | |||
| 2686 | if (init_iommu_hw()) { | ||
| 2687 | WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); | ||
| 2688 | return -EIO; | ||
| 2689 | } | ||
| 2690 | |||
| 2691 | for_each_active_iommu(iommu, drhd) { | ||
| 2692 | |||
| 2693 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
| 2694 | |||
| 2695 | writel(iommu->iommu_state[SR_DMAR_FECTL_REG], | ||
| 2696 | iommu->reg + DMAR_FECTL_REG); | ||
| 2697 | writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], | ||
| 2698 | iommu->reg + DMAR_FEDATA_REG); | ||
| 2699 | writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], | ||
| 2700 | iommu->reg + DMAR_FEADDR_REG); | ||
| 2701 | writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], | ||
| 2702 | iommu->reg + DMAR_FEUADDR_REG); | ||
| 2703 | |||
| 2704 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
| 2705 | } | ||
| 2706 | |||
| 2707 | for_each_active_iommu(iommu, drhd) | ||
| 2708 | kfree(iommu->iommu_state); | ||
| 2709 | |||
| 2710 | return 0; | ||
| 2711 | } | ||
| 2712 | |||
| 2713 | static struct sysdev_class iommu_sysclass = { | ||
| 2714 | .name = "iommu", | ||
| 2715 | .resume = iommu_resume, | ||
| 2716 | .suspend = iommu_suspend, | ||
| 2717 | }; | ||
| 2718 | |||
| 2719 | static struct sys_device device_iommu = { | ||
| 2720 | .cls = &iommu_sysclass, | ||
| 2721 | }; | ||
| 2722 | |||
| 2723 | static int __init init_iommu_sysfs(void) | ||
| 2724 | { | ||
| 2725 | int error; | ||
| 2726 | |||
| 2727 | error = sysdev_class_register(&iommu_sysclass); | ||
| 2728 | if (error) | ||
| 2729 | return error; | ||
| 2730 | |||
| 2731 | error = sysdev_register(&device_iommu); | ||
| 2732 | if (error) | ||
| 2733 | sysdev_class_unregister(&iommu_sysclass); | ||
| 2734 | |||
| 2735 | return error; | ||
| 2736 | } | ||
| 2737 | |||
| 2738 | #else | ||
| 2739 | static int __init init_iommu_sysfs(void) | ||
| 2740 | { | ||
| 2741 | return 0; | ||
| 2742 | } | ||
| 2743 | #endif /* CONFIG_PM */ | ||
| 2744 | |||
| 2600 | int __init intel_iommu_init(void) | 2745 | int __init intel_iommu_init(void) |
| 2601 | { | 2746 | { |
| 2602 | int ret = 0; | 2747 | int ret = 0; |
| @@ -2632,6 +2777,7 @@ int __init intel_iommu_init(void) | |||
| 2632 | init_timer(&unmap_timer); | 2777 | init_timer(&unmap_timer); |
| 2633 | force_iommu = 1; | 2778 | force_iommu = 1; |
| 2634 | dma_ops = &intel_dma_ops; | 2779 | dma_ops = &intel_dma_ops; |
| 2780 | init_iommu_sysfs(); | ||
| 2635 | 2781 | ||
| 2636 | register_iommu(&intel_iommu_ops); | 2782 | register_iommu(&intel_iommu_ops); |
| 2637 | 2783 | ||
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 77214ead1a36..3771cd1f876e 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
| @@ -284,6 +284,14 @@ struct iommu_flush { | |||
| 284 | unsigned int size_order, u64 type, int non_present_entry_flush); | 284 | unsigned int size_order, u64 type, int non_present_entry_flush); |
| 285 | }; | 285 | }; |
| 286 | 286 | ||
| 287 | enum { | ||
| 288 | SR_DMAR_FECTL_REG, | ||
| 289 | SR_DMAR_FEDATA_REG, | ||
| 290 | SR_DMAR_FEADDR_REG, | ||
| 291 | SR_DMAR_FEUADDR_REG, | ||
| 292 | MAX_SR_DMAR_REGS | ||
| 293 | }; | ||
| 294 | |||
| 287 | struct intel_iommu { | 295 | struct intel_iommu { |
| 288 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ | 296 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ |
| 289 | u64 cap; | 297 | u64 cap; |
| @@ -304,6 +312,8 @@ struct intel_iommu { | |||
| 304 | struct iommu_flush flush; | 312 | struct iommu_flush flush; |
| 305 | #endif | 313 | #endif |
| 306 | struct q_inval *qi; /* Queued invalidation info */ | 314 | struct q_inval *qi; /* Queued invalidation info */ |
| 315 | u32 *iommu_state; /* Store iommu states between suspend and resume.*/ | ||
| 316 | |||
| 307 | #ifdef CONFIG_INTR_REMAP | 317 | #ifdef CONFIG_INTR_REMAP |
| 308 | struct ir_table *ir_table; /* Interrupt remapping info */ | 318 | struct ir_table *ir_table; /* Interrupt remapping info */ |
| 309 | #endif | 319 | #endif |
| @@ -322,6 +332,7 @@ extern int alloc_iommu(struct dmar_drhd_unit *drhd); | |||
| 322 | extern void free_iommu(struct intel_iommu *iommu); | 332 | extern void free_iommu(struct intel_iommu *iommu); |
| 323 | extern int dmar_enable_qi(struct intel_iommu *iommu); | 333 | extern int dmar_enable_qi(struct intel_iommu *iommu); |
| 324 | extern void dmar_disable_qi(struct intel_iommu *iommu); | 334 | extern void dmar_disable_qi(struct intel_iommu *iommu); |
| 335 | extern int dmar_reenable_qi(struct intel_iommu *iommu); | ||
| 325 | extern void qi_global_iec(struct intel_iommu *iommu); | 336 | extern void qi_global_iec(struct intel_iommu *iommu); |
| 326 | 337 | ||
| 327 | extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, | 338 | extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, |
