diff options
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r-- | drivers/pci/intel-iommu.c | 530 |
1 files changed, 267 insertions, 263 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 3f7b81c065d2..a2692724b68f 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -18,6 +18,7 @@ | |||
18 | * Author: Ashok Raj <ashok.raj@intel.com> | 18 | * Author: Ashok Raj <ashok.raj@intel.com> |
19 | * Author: Shaohua Li <shaohua.li@intel.com> | 19 | * Author: Shaohua Li <shaohua.li@intel.com> |
20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
21 | * Author: Fenghua Yu <fenghua.yu@intel.com> | ||
21 | */ | 22 | */ |
22 | 23 | ||
23 | #include <linux/init.h> | 24 | #include <linux/init.h> |
@@ -33,13 +34,15 @@ | |||
33 | #include <linux/dma-mapping.h> | 34 | #include <linux/dma-mapping.h> |
34 | #include <linux/mempool.h> | 35 | #include <linux/mempool.h> |
35 | #include <linux/timer.h> | 36 | #include <linux/timer.h> |
36 | #include "iova.h" | 37 | #include <linux/iova.h> |
37 | #include "intel-iommu.h" | 38 | #include <linux/intel-iommu.h> |
38 | #include <asm/proto.h> /* force_iommu in this header in x86-64*/ | ||
39 | #include <asm/cacheflush.h> | 39 | #include <asm/cacheflush.h> |
40 | #include <asm/gart.h> | 40 | #include <asm/iommu.h> |
41 | #include "pci.h" | 41 | #include "pci.h" |
42 | 42 | ||
43 | #define ROOT_SIZE VTD_PAGE_SIZE | ||
44 | #define CONTEXT_SIZE VTD_PAGE_SIZE | ||
45 | |||
43 | #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) | 46 | #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) |
44 | #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) | 47 | #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) |
45 | 48 | ||
@@ -49,8 +52,6 @@ | |||
49 | 52 | ||
50 | #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 | 53 | #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 |
51 | 54 | ||
52 | #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */ | ||
53 | |||
54 | #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) | 55 | #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) |
55 | 56 | ||
56 | 57 | ||
@@ -58,8 +59,6 @@ static void flush_unmaps_timeout(unsigned long data); | |||
58 | 59 | ||
59 | DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); | 60 | DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); |
60 | 61 | ||
61 | static struct intel_iommu *g_iommus; | ||
62 | |||
63 | #define HIGH_WATER_MARK 250 | 62 | #define HIGH_WATER_MARK 250 |
64 | struct deferred_flush_tables { | 63 | struct deferred_flush_tables { |
65 | int next; | 64 | int next; |
@@ -80,7 +79,7 @@ static long list_size; | |||
80 | 79 | ||
81 | static void domain_remove_dev_info(struct dmar_domain *domain); | 80 | static void domain_remove_dev_info(struct dmar_domain *domain); |
82 | 81 | ||
83 | static int dmar_disabled; | 82 | int dmar_disabled; |
84 | static int __initdata dmar_map_gfx = 1; | 83 | static int __initdata dmar_map_gfx = 1; |
85 | static int dmar_forcedac; | 84 | static int dmar_forcedac; |
86 | static int intel_iommu_strict; | 85 | static int intel_iommu_strict; |
@@ -160,7 +159,7 @@ static inline void *alloc_domain_mem(void) | |||
160 | return iommu_kmem_cache_alloc(iommu_domain_cache); | 159 | return iommu_kmem_cache_alloc(iommu_domain_cache); |
161 | } | 160 | } |
162 | 161 | ||
163 | static inline void free_domain_mem(void *vaddr) | 162 | static void free_domain_mem(void *vaddr) |
164 | { | 163 | { |
165 | kmem_cache_free(iommu_domain_cache, vaddr); | 164 | kmem_cache_free(iommu_domain_cache, vaddr); |
166 | } | 165 | } |
@@ -185,13 +184,6 @@ void free_iova_mem(struct iova *iova) | |||
185 | kmem_cache_free(iommu_iova_cache, iova); | 184 | kmem_cache_free(iommu_iova_cache, iova); |
186 | } | 185 | } |
187 | 186 | ||
188 | static inline void __iommu_flush_cache( | ||
189 | struct intel_iommu *iommu, void *addr, int size) | ||
190 | { | ||
191 | if (!ecap_coherent(iommu->ecap)) | ||
192 | clflush_cache_range(addr, size); | ||
193 | } | ||
194 | |||
195 | /* Gets context entry for a given bus and devfn */ | 187 | /* Gets context entry for a given bus and devfn */ |
196 | static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, | 188 | static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, |
197 | u8 bus, u8 devfn) | 189 | u8 bus, u8 devfn) |
@@ -210,7 +202,7 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, | |||
210 | spin_unlock_irqrestore(&iommu->lock, flags); | 202 | spin_unlock_irqrestore(&iommu->lock, flags); |
211 | return NULL; | 203 | return NULL; |
212 | } | 204 | } |
213 | __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K); | 205 | __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); |
214 | phy_addr = virt_to_phys((void *)context); | 206 | phy_addr = virt_to_phys((void *)context); |
215 | set_root_value(root, phy_addr); | 207 | set_root_value(root, phy_addr); |
216 | set_root_present(root); | 208 | set_root_present(root); |
@@ -356,7 +348,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) | |||
356 | return NULL; | 348 | return NULL; |
357 | } | 349 | } |
358 | __iommu_flush_cache(domain->iommu, tmp_page, | 350 | __iommu_flush_cache(domain->iommu, tmp_page, |
359 | PAGE_SIZE_4K); | 351 | PAGE_SIZE); |
360 | dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); | 352 | dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); |
361 | /* | 353 | /* |
362 | * high level table always sets r/w, last level page | 354 | * high level table always sets r/w, last level page |
@@ -419,13 +411,13 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) | |||
419 | start &= (((u64)1) << addr_width) - 1; | 411 | start &= (((u64)1) << addr_width) - 1; |
420 | end &= (((u64)1) << addr_width) - 1; | 412 | end &= (((u64)1) << addr_width) - 1; |
421 | /* in case it's partial page */ | 413 | /* in case it's partial page */ |
422 | start = PAGE_ALIGN_4K(start); | 414 | start = PAGE_ALIGN(start); |
423 | end &= PAGE_MASK_4K; | 415 | end &= PAGE_MASK; |
424 | 416 | ||
425 | /* we don't need lock here, nobody else touches the iova range */ | 417 | /* we don't need lock here, nobody else touches the iova range */ |
426 | while (start < end) { | 418 | while (start < end) { |
427 | dma_pte_clear_one(domain, start); | 419 | dma_pte_clear_one(domain, start); |
428 | start += PAGE_SIZE_4K; | 420 | start += VTD_PAGE_SIZE; |
429 | } | 421 | } |
430 | } | 422 | } |
431 | 423 | ||
@@ -479,7 +471,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) | |||
479 | if (!root) | 471 | if (!root) |
480 | return -ENOMEM; | 472 | return -ENOMEM; |
481 | 473 | ||
482 | __iommu_flush_cache(iommu, root, PAGE_SIZE_4K); | 474 | __iommu_flush_cache(iommu, root, ROOT_SIZE); |
483 | 475 | ||
484 | spin_lock_irqsave(&iommu->lock, flags); | 476 | spin_lock_irqsave(&iommu->lock, flags); |
485 | iommu->root_entry = root; | 477 | iommu->root_entry = root; |
@@ -488,19 +480,6 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) | |||
488 | return 0; | 480 | return 0; |
489 | } | 481 | } |
490 | 482 | ||
491 | #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ | ||
492 | {\ | ||
493 | cycles_t start_time = get_cycles();\ | ||
494 | while (1) {\ | ||
495 | sts = op (iommu->reg + offset);\ | ||
496 | if (cond)\ | ||
497 | break;\ | ||
498 | if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ | ||
499 | panic("DMAR hardware is malfunctioning\n");\ | ||
500 | cpu_relax();\ | ||
501 | }\ | ||
502 | } | ||
503 | |||
504 | static void iommu_set_root_entry(struct intel_iommu *iommu) | 483 | static void iommu_set_root_entry(struct intel_iommu *iommu) |
505 | { | 484 | { |
506 | void *addr; | 485 | void *addr; |
@@ -587,31 +566,10 @@ static int __iommu_flush_context(struct intel_iommu *iommu, | |||
587 | 566 | ||
588 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 567 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
589 | 568 | ||
590 | /* flush context entry will implictly flush write buffer */ | 569 | /* flush context entry will implicitly flush write buffer */ |
591 | return 0; | 570 | return 0; |
592 | } | 571 | } |
593 | 572 | ||
594 | static int inline iommu_flush_context_global(struct intel_iommu *iommu, | ||
595 | int non_present_entry_flush) | ||
596 | { | ||
597 | return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, | ||
598 | non_present_entry_flush); | ||
599 | } | ||
600 | |||
601 | static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did, | ||
602 | int non_present_entry_flush) | ||
603 | { | ||
604 | return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL, | ||
605 | non_present_entry_flush); | ||
606 | } | ||
607 | |||
608 | static int inline iommu_flush_context_device(struct intel_iommu *iommu, | ||
609 | u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush) | ||
610 | { | ||
611 | return __iommu_flush_context(iommu, did, source_id, function_mask, | ||
612 | DMA_CCMD_DEVICE_INVL, non_present_entry_flush); | ||
613 | } | ||
614 | |||
615 | /* return value determine if we need a write buffer flush */ | 573 | /* return value determine if we need a write buffer flush */ |
616 | static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, | 574 | static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, |
617 | u64 addr, unsigned int size_order, u64 type, | 575 | u64 addr, unsigned int size_order, u64 type, |
@@ -679,37 +637,25 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, | |||
679 | printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); | 637 | printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); |
680 | if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) | 638 | if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) |
681 | pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", | 639 | pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", |
682 | DMA_TLB_IIRG(type), DMA_TLB_IAIG(val)); | 640 | (unsigned long long)DMA_TLB_IIRG(type), |
683 | /* flush context entry will implictly flush write buffer */ | 641 | (unsigned long long)DMA_TLB_IAIG(val)); |
642 | /* flush iotlb entry will implicitly flush write buffer */ | ||
684 | return 0; | 643 | return 0; |
685 | } | 644 | } |
686 | 645 | ||
687 | static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu, | ||
688 | int non_present_entry_flush) | ||
689 | { | ||
690 | return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, | ||
691 | non_present_entry_flush); | ||
692 | } | ||
693 | |||
694 | static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did, | ||
695 | int non_present_entry_flush) | ||
696 | { | ||
697 | return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, | ||
698 | non_present_entry_flush); | ||
699 | } | ||
700 | |||
701 | static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, | 646 | static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, |
702 | u64 addr, unsigned int pages, int non_present_entry_flush) | 647 | u64 addr, unsigned int pages, int non_present_entry_flush) |
703 | { | 648 | { |
704 | unsigned int mask; | 649 | unsigned int mask; |
705 | 650 | ||
706 | BUG_ON(addr & (~PAGE_MASK_4K)); | 651 | BUG_ON(addr & (~VTD_PAGE_MASK)); |
707 | BUG_ON(pages == 0); | 652 | BUG_ON(pages == 0); |
708 | 653 | ||
709 | /* Fallback to domain selective flush if no PSI support */ | 654 | /* Fallback to domain selective flush if no PSI support */ |
710 | if (!cap_pgsel_inv(iommu->cap)) | 655 | if (!cap_pgsel_inv(iommu->cap)) |
711 | return iommu_flush_iotlb_dsi(iommu, did, | 656 | return iommu->flush.flush_iotlb(iommu, did, 0, 0, |
712 | non_present_entry_flush); | 657 | DMA_TLB_DSI_FLUSH, |
658 | non_present_entry_flush); | ||
713 | 659 | ||
714 | /* | 660 | /* |
715 | * PSI requires page size to be 2 ^ x, and the base address is naturally | 661 | * PSI requires page size to be 2 ^ x, and the base address is naturally |
@@ -718,11 +664,12 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, | |||
718 | mask = ilog2(__roundup_pow_of_two(pages)); | 664 | mask = ilog2(__roundup_pow_of_two(pages)); |
719 | /* Fallback to domain selective flush if size is too big */ | 665 | /* Fallback to domain selective flush if size is too big */ |
720 | if (mask > cap_max_amask_val(iommu->cap)) | 666 | if (mask > cap_max_amask_val(iommu->cap)) |
721 | return iommu_flush_iotlb_dsi(iommu, did, | 667 | return iommu->flush.flush_iotlb(iommu, did, 0, 0, |
722 | non_present_entry_flush); | 668 | DMA_TLB_DSI_FLUSH, non_present_entry_flush); |
723 | 669 | ||
724 | return __iommu_flush_iotlb(iommu, did, addr, mask, | 670 | return iommu->flush.flush_iotlb(iommu, did, addr, mask, |
725 | DMA_TLB_PSI_FLUSH, non_present_entry_flush); | 671 | DMA_TLB_PSI_FLUSH, |
672 | non_present_entry_flush); | ||
726 | } | 673 | } |
727 | 674 | ||
728 | static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) | 675 | static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) |
@@ -855,7 +802,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg) | |||
855 | } | 802 | } |
856 | 803 | ||
857 | static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, | 804 | static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, |
858 | u8 fault_reason, u16 source_id, u64 addr) | 805 | u8 fault_reason, u16 source_id, unsigned long long addr) |
859 | { | 806 | { |
860 | const char *reason; | 807 | const char *reason; |
861 | 808 | ||
@@ -990,6 +937,8 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
990 | return -ENOMEM; | 937 | return -ENOMEM; |
991 | } | 938 | } |
992 | 939 | ||
940 | spin_lock_init(&iommu->lock); | ||
941 | |||
993 | /* | 942 | /* |
994 | * if Caching mode is set, then invalid translations are tagged | 943 | * if Caching mode is set, then invalid translations are tagged |
995 | * with domainid 0. Hence we need to pre-allocate it. | 944 | * with domainid 0. Hence we need to pre-allocate it. |
@@ -998,62 +947,15 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
998 | set_bit(0, iommu->domain_ids); | 947 | set_bit(0, iommu->domain_ids); |
999 | return 0; | 948 | return 0; |
1000 | } | 949 | } |
1001 | static struct intel_iommu *alloc_iommu(struct intel_iommu *iommu, | ||
1002 | struct dmar_drhd_unit *drhd) | ||
1003 | { | ||
1004 | int ret; | ||
1005 | int map_size; | ||
1006 | u32 ver; | ||
1007 | 950 | ||
1008 | iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); | ||
1009 | if (!iommu->reg) { | ||
1010 | printk(KERN_ERR "IOMMU: can't map the region\n"); | ||
1011 | goto error; | ||
1012 | } | ||
1013 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | ||
1014 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | ||
1015 | |||
1016 | /* the registers might be more than one page */ | ||
1017 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | ||
1018 | cap_max_fault_reg_offset(iommu->cap)); | ||
1019 | map_size = PAGE_ALIGN_4K(map_size); | ||
1020 | if (map_size > PAGE_SIZE_4K) { | ||
1021 | iounmap(iommu->reg); | ||
1022 | iommu->reg = ioremap(drhd->reg_base_addr, map_size); | ||
1023 | if (!iommu->reg) { | ||
1024 | printk(KERN_ERR "IOMMU: can't map the region\n"); | ||
1025 | goto error; | ||
1026 | } | ||
1027 | } | ||
1028 | |||
1029 | ver = readl(iommu->reg + DMAR_VER_REG); | ||
1030 | pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", | ||
1031 | drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | ||
1032 | iommu->cap, iommu->ecap); | ||
1033 | ret = iommu_init_domains(iommu); | ||
1034 | if (ret) | ||
1035 | goto error_unmap; | ||
1036 | spin_lock_init(&iommu->lock); | ||
1037 | spin_lock_init(&iommu->register_lock); | ||
1038 | |||
1039 | drhd->iommu = iommu; | ||
1040 | return iommu; | ||
1041 | error_unmap: | ||
1042 | iounmap(iommu->reg); | ||
1043 | error: | ||
1044 | kfree(iommu); | ||
1045 | return NULL; | ||
1046 | } | ||
1047 | 951 | ||
1048 | static void domain_exit(struct dmar_domain *domain); | 952 | static void domain_exit(struct dmar_domain *domain); |
1049 | static void free_iommu(struct intel_iommu *iommu) | 953 | |
954 | void free_dmar_iommu(struct intel_iommu *iommu) | ||
1050 | { | 955 | { |
1051 | struct dmar_domain *domain; | 956 | struct dmar_domain *domain; |
1052 | int i; | 957 | int i; |
1053 | 958 | ||
1054 | if (!iommu) | ||
1055 | return; | ||
1056 | |||
1057 | i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); | 959 | i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); |
1058 | for (; i < cap_ndoms(iommu->cap); ) { | 960 | for (; i < cap_ndoms(iommu->cap); ) { |
1059 | domain = iommu->domains[i]; | 961 | domain = iommu->domains[i]; |
@@ -1078,10 +980,6 @@ static void free_iommu(struct intel_iommu *iommu) | |||
1078 | 980 | ||
1079 | /* free context mapping */ | 981 | /* free context mapping */ |
1080 | free_context_table(iommu); | 982 | free_context_table(iommu); |
1081 | |||
1082 | if (iommu->reg) | ||
1083 | iounmap(iommu->reg); | ||
1084 | kfree(iommu); | ||
1085 | } | 983 | } |
1086 | 984 | ||
1087 | static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) | 985 | static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) |
@@ -1157,9 +1055,9 @@ static void dmar_init_reserved_ranges(void) | |||
1157 | if (!r->flags || !(r->flags & IORESOURCE_MEM)) | 1055 | if (!r->flags || !(r->flags & IORESOURCE_MEM)) |
1158 | continue; | 1056 | continue; |
1159 | addr = r->start; | 1057 | addr = r->start; |
1160 | addr &= PAGE_MASK_4K; | 1058 | addr &= PAGE_MASK; |
1161 | size = r->end - addr; | 1059 | size = r->end - addr; |
1162 | size = PAGE_ALIGN_4K(size); | 1060 | size = PAGE_ALIGN(size); |
1163 | iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), | 1061 | iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), |
1164 | IOVA_PFN(size + addr) - 1); | 1062 | IOVA_PFN(size + addr) - 1); |
1165 | if (!iova) | 1063 | if (!iova) |
@@ -1221,7 +1119,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1221 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); | 1119 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); |
1222 | if (!domain->pgd) | 1120 | if (!domain->pgd) |
1223 | return -ENOMEM; | 1121 | return -ENOMEM; |
1224 | __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K); | 1122 | __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); |
1225 | return 0; | 1123 | return 0; |
1226 | } | 1124 | } |
1227 | 1125 | ||
@@ -1237,7 +1135,7 @@ static void domain_exit(struct dmar_domain *domain) | |||
1237 | /* destroy iovas */ | 1135 | /* destroy iovas */ |
1238 | put_iova_domain(&domain->iovad); | 1136 | put_iova_domain(&domain->iovad); |
1239 | end = DOMAIN_MAX_ADDR(domain->gaw); | 1137 | end = DOMAIN_MAX_ADDR(domain->gaw); |
1240 | end = end & (~PAGE_MASK_4K); | 1138 | end = end & (~PAGE_MASK); |
1241 | 1139 | ||
1242 | /* clear ptes */ | 1140 | /* clear ptes */ |
1243 | dma_pte_clear_range(domain, 0, end); | 1141 | dma_pte_clear_range(domain, 0, end); |
@@ -1277,11 +1175,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain, | |||
1277 | __iommu_flush_cache(iommu, context, sizeof(*context)); | 1175 | __iommu_flush_cache(iommu, context, sizeof(*context)); |
1278 | 1176 | ||
1279 | /* it's a non-present to present mapping */ | 1177 | /* it's a non-present to present mapping */ |
1280 | if (iommu_flush_context_device(iommu, domain->id, | 1178 | if (iommu->flush.flush_context(iommu, domain->id, |
1281 | (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1)) | 1179 | (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, |
1180 | DMA_CCMD_DEVICE_INVL, 1)) | ||
1282 | iommu_flush_write_buffer(iommu); | 1181 | iommu_flush_write_buffer(iommu); |
1283 | else | 1182 | else |
1284 | iommu_flush_iotlb_dsi(iommu, 0, 0); | 1183 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); |
1184 | |||
1285 | spin_unlock_irqrestore(&iommu->lock, flags); | 1185 | spin_unlock_irqrestore(&iommu->lock, flags); |
1286 | return 0; | 1186 | return 0; |
1287 | } | 1187 | } |
@@ -1356,22 +1256,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, | |||
1356 | u64 start_pfn, end_pfn; | 1256 | u64 start_pfn, end_pfn; |
1357 | struct dma_pte *pte; | 1257 | struct dma_pte *pte; |
1358 | int index; | 1258 | int index; |
1259 | int addr_width = agaw_to_width(domain->agaw); | ||
1260 | |||
1261 | hpa &= (((u64)1) << addr_width) - 1; | ||
1359 | 1262 | ||
1360 | if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) | 1263 | if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) |
1361 | return -EINVAL; | 1264 | return -EINVAL; |
1362 | iova &= PAGE_MASK_4K; | 1265 | iova &= PAGE_MASK; |
1363 | start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K; | 1266 | start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT; |
1364 | end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K; | 1267 | end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT; |
1365 | index = 0; | 1268 | index = 0; |
1366 | while (start_pfn < end_pfn) { | 1269 | while (start_pfn < end_pfn) { |
1367 | pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index); | 1270 | pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index); |
1368 | if (!pte) | 1271 | if (!pte) |
1369 | return -ENOMEM; | 1272 | return -ENOMEM; |
1370 | /* We don't need lock here, nobody else | 1273 | /* We don't need lock here, nobody else |
1371 | * touches the iova range | 1274 | * touches the iova range |
1372 | */ | 1275 | */ |
1373 | BUG_ON(dma_pte_addr(*pte)); | 1276 | BUG_ON(dma_pte_addr(*pte)); |
1374 | dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K); | 1277 | dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT); |
1375 | dma_set_pte_prot(*pte, prot); | 1278 | dma_set_pte_prot(*pte, prot); |
1376 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); | 1279 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); |
1377 | start_pfn++; | 1280 | start_pfn++; |
@@ -1383,8 +1286,10 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, | |||
1383 | static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) | 1286 | static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) |
1384 | { | 1287 | { |
1385 | clear_context_table(domain->iommu, bus, devfn); | 1288 | clear_context_table(domain->iommu, bus, devfn); |
1386 | iommu_flush_context_global(domain->iommu, 0); | 1289 | domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0, |
1387 | iommu_flush_iotlb_global(domain->iommu, 0); | 1290 | DMA_CCMD_GLOBAL_INVL, 0); |
1291 | domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0, | ||
1292 | DMA_TLB_GLOBAL_FLUSH, 0); | ||
1388 | } | 1293 | } |
1389 | 1294 | ||
1390 | static void domain_remove_dev_info(struct dmar_domain *domain) | 1295 | static void domain_remove_dev_info(struct dmar_domain *domain) |
@@ -1414,7 +1319,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain) | |||
1414 | * find_domain | 1319 | * find_domain |
1415 | * Note: we use struct pci_dev->dev.archdata.iommu stores the info | 1320 | * Note: we use struct pci_dev->dev.archdata.iommu stores the info |
1416 | */ | 1321 | */ |
1417 | struct dmar_domain * | 1322 | static struct dmar_domain * |
1418 | find_domain(struct pci_dev *pdev) | 1323 | find_domain(struct pci_dev *pdev) |
1419 | { | 1324 | { |
1420 | struct device_domain_info *info; | 1325 | struct device_domain_info *info; |
@@ -1426,37 +1331,6 @@ find_domain(struct pci_dev *pdev) | |||
1426 | return NULL; | 1331 | return NULL; |
1427 | } | 1332 | } |
1428 | 1333 | ||
1429 | static int dmar_pci_device_match(struct pci_dev *devices[], int cnt, | ||
1430 | struct pci_dev *dev) | ||
1431 | { | ||
1432 | int index; | ||
1433 | |||
1434 | while (dev) { | ||
1435 | for (index = 0; index < cnt; index++) | ||
1436 | if (dev == devices[index]) | ||
1437 | return 1; | ||
1438 | |||
1439 | /* Check our parent */ | ||
1440 | dev = dev->bus->self; | ||
1441 | } | ||
1442 | |||
1443 | return 0; | ||
1444 | } | ||
1445 | |||
1446 | static struct dmar_drhd_unit * | ||
1447 | dmar_find_matched_drhd_unit(struct pci_dev *dev) | ||
1448 | { | ||
1449 | struct dmar_drhd_unit *drhd = NULL; | ||
1450 | |||
1451 | list_for_each_entry(drhd, &dmar_drhd_units, list) { | ||
1452 | if (drhd->include_all || dmar_pci_device_match(drhd->devices, | ||
1453 | drhd->devices_cnt, dev)) | ||
1454 | return drhd; | ||
1455 | } | ||
1456 | |||
1457 | return NULL; | ||
1458 | } | ||
1459 | |||
1460 | /* domain is initialized */ | 1334 | /* domain is initialized */ |
1461 | static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | 1335 | static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) |
1462 | { | 1336 | { |
@@ -1578,11 +1452,13 @@ error: | |||
1578 | return find_domain(pdev); | 1452 | return find_domain(pdev); |
1579 | } | 1453 | } |
1580 | 1454 | ||
1581 | static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) | 1455 | static int iommu_prepare_identity_map(struct pci_dev *pdev, |
1456 | unsigned long long start, | ||
1457 | unsigned long long end) | ||
1582 | { | 1458 | { |
1583 | struct dmar_domain *domain; | 1459 | struct dmar_domain *domain; |
1584 | unsigned long size; | 1460 | unsigned long size; |
1585 | u64 base; | 1461 | unsigned long long base; |
1586 | int ret; | 1462 | int ret; |
1587 | 1463 | ||
1588 | printk(KERN_INFO | 1464 | printk(KERN_INFO |
@@ -1594,9 +1470,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) | |||
1594 | return -ENOMEM; | 1470 | return -ENOMEM; |
1595 | 1471 | ||
1596 | /* The address might not be aligned */ | 1472 | /* The address might not be aligned */ |
1597 | base = start & PAGE_MASK_4K; | 1473 | base = start & PAGE_MASK; |
1598 | size = end - base; | 1474 | size = end - base; |
1599 | size = PAGE_ALIGN_4K(size); | 1475 | size = PAGE_ALIGN(size); |
1600 | if (!reserve_iova(&domain->iovad, IOVA_PFN(base), | 1476 | if (!reserve_iova(&domain->iovad, IOVA_PFN(base), |
1601 | IOVA_PFN(base + size) - 1)) { | 1477 | IOVA_PFN(base + size) - 1)) { |
1602 | printk(KERN_ERR "IOMMU: reserve iova failed\n"); | 1478 | printk(KERN_ERR "IOMMU: reserve iova failed\n"); |
@@ -1729,8 +1605,6 @@ int __init init_dmars(void) | |||
1729 | * endfor | 1605 | * endfor |
1730 | */ | 1606 | */ |
1731 | for_each_drhd_unit(drhd) { | 1607 | for_each_drhd_unit(drhd) { |
1732 | if (drhd->ignored) | ||
1733 | continue; | ||
1734 | g_num_of_iommus++; | 1608 | g_num_of_iommus++; |
1735 | /* | 1609 | /* |
1736 | * lock not needed as this is only incremented in the single | 1610 | * lock not needed as this is only incremented in the single |
@@ -1739,12 +1613,6 @@ int __init init_dmars(void) | |||
1739 | */ | 1613 | */ |
1740 | } | 1614 | } |
1741 | 1615 | ||
1742 | g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL); | ||
1743 | if (!g_iommus) { | ||
1744 | ret = -ENOMEM; | ||
1745 | goto error; | ||
1746 | } | ||
1747 | |||
1748 | deferred_flush = kzalloc(g_num_of_iommus * | 1616 | deferred_flush = kzalloc(g_num_of_iommus * |
1749 | sizeof(struct deferred_flush_tables), GFP_KERNEL); | 1617 | sizeof(struct deferred_flush_tables), GFP_KERNEL); |
1750 | if (!deferred_flush) { | 1618 | if (!deferred_flush) { |
@@ -1752,16 +1620,15 @@ int __init init_dmars(void) | |||
1752 | goto error; | 1620 | goto error; |
1753 | } | 1621 | } |
1754 | 1622 | ||
1755 | i = 0; | ||
1756 | for_each_drhd_unit(drhd) { | 1623 | for_each_drhd_unit(drhd) { |
1757 | if (drhd->ignored) | 1624 | if (drhd->ignored) |
1758 | continue; | 1625 | continue; |
1759 | iommu = alloc_iommu(&g_iommus[i], drhd); | 1626 | |
1760 | i++; | 1627 | iommu = drhd->iommu; |
1761 | if (!iommu) { | 1628 | |
1762 | ret = -ENOMEM; | 1629 | ret = iommu_init_domains(iommu); |
1630 | if (ret) | ||
1763 | goto error; | 1631 | goto error; |
1764 | } | ||
1765 | 1632 | ||
1766 | /* | 1633 | /* |
1767 | * TBD: | 1634 | * TBD: |
@@ -1775,6 +1642,28 @@ int __init init_dmars(void) | |||
1775 | } | 1642 | } |
1776 | } | 1643 | } |
1777 | 1644 | ||
1645 | for_each_drhd_unit(drhd) { | ||
1646 | if (drhd->ignored) | ||
1647 | continue; | ||
1648 | |||
1649 | iommu = drhd->iommu; | ||
1650 | if (dmar_enable_qi(iommu)) { | ||
1651 | /* | ||
1652 | * Queued Invalidate not enabled, use Register Based | ||
1653 | * Invalidate | ||
1654 | */ | ||
1655 | iommu->flush.flush_context = __iommu_flush_context; | ||
1656 | iommu->flush.flush_iotlb = __iommu_flush_iotlb; | ||
1657 | printk(KERN_INFO "IOMMU 0x%Lx: using Register based " | ||
1658 | "invalidation\n", drhd->reg_base_addr); | ||
1659 | } else { | ||
1660 | iommu->flush.flush_context = qi_flush_context; | ||
1661 | iommu->flush.flush_iotlb = qi_flush_iotlb; | ||
1662 | printk(KERN_INFO "IOMMU 0x%Lx: using Queued " | ||
1663 | "invalidation\n", drhd->reg_base_addr); | ||
1664 | } | ||
1665 | } | ||
1666 | |||
1778 | /* | 1667 | /* |
1779 | * For each rmrr | 1668 | * For each rmrr |
1780 | * for each dev attached to rmrr | 1669 | * for each dev attached to rmrr |
@@ -1827,9 +1716,10 @@ int __init init_dmars(void) | |||
1827 | 1716 | ||
1828 | iommu_set_root_entry(iommu); | 1717 | iommu_set_root_entry(iommu); |
1829 | 1718 | ||
1830 | iommu_flush_context_global(iommu, 0); | 1719 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, |
1831 | iommu_flush_iotlb_global(iommu, 0); | 1720 | 0); |
1832 | 1721 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, | |
1722 | 0); | ||
1833 | iommu_disable_protect_mem_regions(iommu); | 1723 | iommu_disable_protect_mem_regions(iommu); |
1834 | 1724 | ||
1835 | ret = iommu_enable_translation(iommu); | 1725 | ret = iommu_enable_translation(iommu); |
@@ -1845,15 +1735,14 @@ error: | |||
1845 | iommu = drhd->iommu; | 1735 | iommu = drhd->iommu; |
1846 | free_iommu(iommu); | 1736 | free_iommu(iommu); |
1847 | } | 1737 | } |
1848 | kfree(g_iommus); | ||
1849 | return ret; | 1738 | return ret; |
1850 | } | 1739 | } |
1851 | 1740 | ||
1852 | static inline u64 aligned_size(u64 host_addr, size_t size) | 1741 | static inline u64 aligned_size(u64 host_addr, size_t size) |
1853 | { | 1742 | { |
1854 | u64 addr; | 1743 | u64 addr; |
1855 | addr = (host_addr & (~PAGE_MASK_4K)) + size; | 1744 | addr = (host_addr & (~PAGE_MASK)) + size; |
1856 | return PAGE_ALIGN_4K(addr); | 1745 | return PAGE_ALIGN(addr); |
1857 | } | 1746 | } |
1858 | 1747 | ||
1859 | struct iova * | 1748 | struct iova * |
@@ -1867,20 +1756,20 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end) | |||
1867 | return NULL; | 1756 | return NULL; |
1868 | 1757 | ||
1869 | piova = alloc_iova(&domain->iovad, | 1758 | piova = alloc_iova(&domain->iovad, |
1870 | size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1); | 1759 | size >> PAGE_SHIFT, IOVA_PFN(end), 1); |
1871 | return piova; | 1760 | return piova; |
1872 | } | 1761 | } |
1873 | 1762 | ||
1874 | static struct iova * | 1763 | static struct iova * |
1875 | __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, | 1764 | __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, |
1876 | size_t size) | 1765 | size_t size, u64 dma_mask) |
1877 | { | 1766 | { |
1878 | struct pci_dev *pdev = to_pci_dev(dev); | 1767 | struct pci_dev *pdev = to_pci_dev(dev); |
1879 | struct iova *iova = NULL; | 1768 | struct iova *iova = NULL; |
1880 | 1769 | ||
1881 | if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) { | 1770 | if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac) |
1882 | iova = iommu_alloc_iova(domain, size, pdev->dma_mask); | 1771 | iova = iommu_alloc_iova(domain, size, dma_mask); |
1883 | } else { | 1772 | else { |
1884 | /* | 1773 | /* |
1885 | * First try to allocate an io virtual address in | 1774 | * First try to allocate an io virtual address in |
1886 | * DMA_32BIT_MASK and if that fails then try allocating | 1775 | * DMA_32BIT_MASK and if that fails then try allocating |
@@ -1888,7 +1777,7 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, | |||
1888 | */ | 1777 | */ |
1889 | iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK); | 1778 | iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK); |
1890 | if (!iova) | 1779 | if (!iova) |
1891 | iova = iommu_alloc_iova(domain, size, pdev->dma_mask); | 1780 | iova = iommu_alloc_iova(domain, size, dma_mask); |
1892 | } | 1781 | } |
1893 | 1782 | ||
1894 | if (!iova) { | 1783 | if (!iova) { |
@@ -1927,12 +1816,12 @@ get_valid_domain_for_dev(struct pci_dev *pdev) | |||
1927 | return domain; | 1816 | return domain; |
1928 | } | 1817 | } |
1929 | 1818 | ||
1930 | static dma_addr_t | 1819 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, |
1931 | intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) | 1820 | size_t size, int dir, u64 dma_mask) |
1932 | { | 1821 | { |
1933 | struct pci_dev *pdev = to_pci_dev(hwdev); | 1822 | struct pci_dev *pdev = to_pci_dev(hwdev); |
1934 | struct dmar_domain *domain; | 1823 | struct dmar_domain *domain; |
1935 | unsigned long start_paddr; | 1824 | phys_addr_t start_paddr; |
1936 | struct iova *iova; | 1825 | struct iova *iova; |
1937 | int prot = 0; | 1826 | int prot = 0; |
1938 | int ret; | 1827 | int ret; |
@@ -1947,11 +1836,11 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) | |||
1947 | 1836 | ||
1948 | size = aligned_size((u64)paddr, size); | 1837 | size = aligned_size((u64)paddr, size); |
1949 | 1838 | ||
1950 | iova = __intel_alloc_iova(hwdev, domain, size); | 1839 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); |
1951 | if (!iova) | 1840 | if (!iova) |
1952 | goto error; | 1841 | goto error; |
1953 | 1842 | ||
1954 | start_paddr = iova->pfn_lo << PAGE_SHIFT_4K; | 1843 | start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; |
1955 | 1844 | ||
1956 | /* | 1845 | /* |
1957 | * Check if DMAR supports zero-length reads on write only | 1846 | * Check if DMAR supports zero-length reads on write only |
@@ -1969,30 +1858,33 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) | |||
1969 | * is not a big problem | 1858 | * is not a big problem |
1970 | */ | 1859 | */ |
1971 | ret = domain_page_mapping(domain, start_paddr, | 1860 | ret = domain_page_mapping(domain, start_paddr, |
1972 | ((u64)paddr) & PAGE_MASK_4K, size, prot); | 1861 | ((u64)paddr) & PAGE_MASK, size, prot); |
1973 | if (ret) | 1862 | if (ret) |
1974 | goto error; | 1863 | goto error; |
1975 | 1864 | ||
1976 | pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n", | ||
1977 | pci_name(pdev), size, (u64)paddr, | ||
1978 | size, (u64)start_paddr, dir); | ||
1979 | |||
1980 | /* it's a non-present to present mapping */ | 1865 | /* it's a non-present to present mapping */ |
1981 | ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, | 1866 | ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, |
1982 | start_paddr, size >> PAGE_SHIFT_4K, 1); | 1867 | start_paddr, size >> VTD_PAGE_SHIFT, 1); |
1983 | if (ret) | 1868 | if (ret) |
1984 | iommu_flush_write_buffer(domain->iommu); | 1869 | iommu_flush_write_buffer(domain->iommu); |
1985 | 1870 | ||
1986 | return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K))); | 1871 | return start_paddr + ((u64)paddr & (~PAGE_MASK)); |
1987 | 1872 | ||
1988 | error: | 1873 | error: |
1989 | if (iova) | 1874 | if (iova) |
1990 | __free_iova(&domain->iovad, iova); | 1875 | __free_iova(&domain->iovad, iova); |
1991 | printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", | 1876 | printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", |
1992 | pci_name(pdev), size, (u64)paddr, dir); | 1877 | pci_name(pdev), size, (unsigned long long)paddr, dir); |
1993 | return 0; | 1878 | return 0; |
1994 | } | 1879 | } |
1995 | 1880 | ||
1881 | dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, | ||
1882 | size_t size, int dir) | ||
1883 | { | ||
1884 | return __intel_map_single(hwdev, paddr, size, dir, | ||
1885 | to_pci_dev(hwdev)->dma_mask); | ||
1886 | } | ||
1887 | |||
1996 | static void flush_unmaps(void) | 1888 | static void flush_unmaps(void) |
1997 | { | 1889 | { |
1998 | int i, j; | 1890 | int i, j; |
@@ -2002,7 +1894,11 @@ static void flush_unmaps(void) | |||
2002 | /* just flush them all */ | 1894 | /* just flush them all */ |
2003 | for (i = 0; i < g_num_of_iommus; i++) { | 1895 | for (i = 0; i < g_num_of_iommus; i++) { |
2004 | if (deferred_flush[i].next) { | 1896 | if (deferred_flush[i].next) { |
2005 | iommu_flush_iotlb_global(&g_iommus[i], 0); | 1897 | struct intel_iommu *iommu = |
1898 | deferred_flush[i].domain[0]->iommu; | ||
1899 | |||
1900 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, | ||
1901 | DMA_TLB_GLOBAL_FLUSH, 0); | ||
2006 | for (j = 0; j < deferred_flush[i].next; j++) { | 1902 | for (j = 0; j < deferred_flush[i].next; j++) { |
2007 | __free_iova(&deferred_flush[i].domain[j]->iovad, | 1903 | __free_iova(&deferred_flush[i].domain[j]->iovad, |
2008 | deferred_flush[i].iova[j]); | 1904 | deferred_flush[i].iova[j]); |
@@ -2032,7 +1928,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) | |||
2032 | if (list_size == HIGH_WATER_MARK) | 1928 | if (list_size == HIGH_WATER_MARK) |
2033 | flush_unmaps(); | 1929 | flush_unmaps(); |
2034 | 1930 | ||
2035 | iommu_id = dom->iommu - g_iommus; | 1931 | iommu_id = dom->iommu->seq_id; |
1932 | |||
2036 | next = deferred_flush[iommu_id].next; | 1933 | next = deferred_flush[iommu_id].next; |
2037 | deferred_flush[iommu_id].domain[next] = dom; | 1934 | deferred_flush[iommu_id].domain[next] = dom; |
2038 | deferred_flush[iommu_id].iova[next] = iova; | 1935 | deferred_flush[iommu_id].iova[next] = iova; |
@@ -2046,8 +1943,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) | |||
2046 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); | 1943 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); |
2047 | } | 1944 | } |
2048 | 1945 | ||
2049 | static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, | 1946 | void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, |
2050 | size_t size, int dir) | 1947 | int dir) |
2051 | { | 1948 | { |
2052 | struct pci_dev *pdev = to_pci_dev(dev); | 1949 | struct pci_dev *pdev = to_pci_dev(dev); |
2053 | struct dmar_domain *domain; | 1950 | struct dmar_domain *domain; |
@@ -2063,11 +1960,11 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, | |||
2063 | if (!iova) | 1960 | if (!iova) |
2064 | return; | 1961 | return; |
2065 | 1962 | ||
2066 | start_addr = iova->pfn_lo << PAGE_SHIFT_4K; | 1963 | start_addr = iova->pfn_lo << PAGE_SHIFT; |
2067 | size = aligned_size((u64)dev_addr, size); | 1964 | size = aligned_size((u64)dev_addr, size); |
2068 | 1965 | ||
2069 | pr_debug("Device %s unmapping: %lx@%llx\n", | 1966 | pr_debug("Device %s unmapping: %lx@%llx\n", |
2070 | pci_name(pdev), size, (u64)start_addr); | 1967 | pci_name(pdev), size, (unsigned long long)start_addr); |
2071 | 1968 | ||
2072 | /* clear the whole page */ | 1969 | /* clear the whole page */ |
2073 | dma_pte_clear_range(domain, start_addr, start_addr + size); | 1970 | dma_pte_clear_range(domain, start_addr, start_addr + size); |
@@ -2075,7 +1972,7 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, | |||
2075 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); | 1972 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); |
2076 | if (intel_iommu_strict) { | 1973 | if (intel_iommu_strict) { |
2077 | if (iommu_flush_iotlb_psi(domain->iommu, | 1974 | if (iommu_flush_iotlb_psi(domain->iommu, |
2078 | domain->id, start_addr, size >> PAGE_SHIFT_4K, 0)) | 1975 | domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) |
2079 | iommu_flush_write_buffer(domain->iommu); | 1976 | iommu_flush_write_buffer(domain->iommu); |
2080 | /* free iova */ | 1977 | /* free iova */ |
2081 | __free_iova(&domain->iovad, iova); | 1978 | __free_iova(&domain->iovad, iova); |
@@ -2088,13 +1985,13 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, | |||
2088 | } | 1985 | } |
2089 | } | 1986 | } |
2090 | 1987 | ||
2091 | static void * intel_alloc_coherent(struct device *hwdev, size_t size, | 1988 | void *intel_alloc_coherent(struct device *hwdev, size_t size, |
2092 | dma_addr_t *dma_handle, gfp_t flags) | 1989 | dma_addr_t *dma_handle, gfp_t flags) |
2093 | { | 1990 | { |
2094 | void *vaddr; | 1991 | void *vaddr; |
2095 | int order; | 1992 | int order; |
2096 | 1993 | ||
2097 | size = PAGE_ALIGN_4K(size); | 1994 | size = PAGE_ALIGN(size); |
2098 | order = get_order(size); | 1995 | order = get_order(size); |
2099 | flags &= ~(GFP_DMA | GFP_DMA32); | 1996 | flags &= ~(GFP_DMA | GFP_DMA32); |
2100 | 1997 | ||
@@ -2103,19 +2000,21 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size, | |||
2103 | return NULL; | 2000 | return NULL; |
2104 | memset(vaddr, 0, size); | 2001 | memset(vaddr, 0, size); |
2105 | 2002 | ||
2106 | *dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL); | 2003 | *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size, |
2004 | DMA_BIDIRECTIONAL, | ||
2005 | hwdev->coherent_dma_mask); | ||
2107 | if (*dma_handle) | 2006 | if (*dma_handle) |
2108 | return vaddr; | 2007 | return vaddr; |
2109 | free_pages((unsigned long)vaddr, order); | 2008 | free_pages((unsigned long)vaddr, order); |
2110 | return NULL; | 2009 | return NULL; |
2111 | } | 2010 | } |
2112 | 2011 | ||
2113 | static void intel_free_coherent(struct device *hwdev, size_t size, | 2012 | void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
2114 | void *vaddr, dma_addr_t dma_handle) | 2013 | dma_addr_t dma_handle) |
2115 | { | 2014 | { |
2116 | int order; | 2015 | int order; |
2117 | 2016 | ||
2118 | size = PAGE_ALIGN_4K(size); | 2017 | size = PAGE_ALIGN(size); |
2119 | order = get_order(size); | 2018 | order = get_order(size); |
2120 | 2019 | ||
2121 | intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); | 2020 | intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); |
@@ -2123,8 +2022,9 @@ static void intel_free_coherent(struct device *hwdev, size_t size, | |||
2123 | } | 2022 | } |
2124 | 2023 | ||
2125 | #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) | 2024 | #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) |
2126 | static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | 2025 | |
2127 | int nelems, int dir) | 2026 | void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, |
2027 | int nelems, int dir) | ||
2128 | { | 2028 | { |
2129 | int i; | 2029 | int i; |
2130 | struct pci_dev *pdev = to_pci_dev(hwdev); | 2030 | struct pci_dev *pdev = to_pci_dev(hwdev); |
@@ -2148,7 +2048,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2148 | size += aligned_size((u64)addr, sg->length); | 2048 | size += aligned_size((u64)addr, sg->length); |
2149 | } | 2049 | } |
2150 | 2050 | ||
2151 | start_addr = iova->pfn_lo << PAGE_SHIFT_4K; | 2051 | start_addr = iova->pfn_lo << PAGE_SHIFT; |
2152 | 2052 | ||
2153 | /* clear the whole page */ | 2053 | /* clear the whole page */ |
2154 | dma_pte_clear_range(domain, start_addr, start_addr + size); | 2054 | dma_pte_clear_range(domain, start_addr, start_addr + size); |
@@ -2156,7 +2056,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2156 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); | 2056 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); |
2157 | 2057 | ||
2158 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, | 2058 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, |
2159 | size >> PAGE_SHIFT_4K, 0)) | 2059 | size >> VTD_PAGE_SHIFT, 0)) |
2160 | iommu_flush_write_buffer(domain->iommu); | 2060 | iommu_flush_write_buffer(domain->iommu); |
2161 | 2061 | ||
2162 | /* free iova */ | 2062 | /* free iova */ |
@@ -2177,8 +2077,8 @@ static int intel_nontranslate_map_sg(struct device *hddev, | |||
2177 | return nelems; | 2077 | return nelems; |
2178 | } | 2078 | } |
2179 | 2079 | ||
2180 | static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, | 2080 | int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, |
2181 | int nelems, int dir) | 2081 | int dir) |
2182 | { | 2082 | { |
2183 | void *addr; | 2083 | void *addr; |
2184 | int i; | 2084 | int i; |
@@ -2206,7 +2106,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2206 | size += aligned_size((u64)addr, sg->length); | 2106 | size += aligned_size((u64)addr, sg->length); |
2207 | } | 2107 | } |
2208 | 2108 | ||
2209 | iova = __intel_alloc_iova(hwdev, domain, size); | 2109 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); |
2210 | if (!iova) { | 2110 | if (!iova) { |
2211 | sglist->dma_length = 0; | 2111 | sglist->dma_length = 0; |
2212 | return 0; | 2112 | return 0; |
@@ -2222,14 +2122,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2222 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | 2122 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
2223 | prot |= DMA_PTE_WRITE; | 2123 | prot |= DMA_PTE_WRITE; |
2224 | 2124 | ||
2225 | start_addr = iova->pfn_lo << PAGE_SHIFT_4K; | 2125 | start_addr = iova->pfn_lo << PAGE_SHIFT; |
2226 | offset = 0; | 2126 | offset = 0; |
2227 | for_each_sg(sglist, sg, nelems, i) { | 2127 | for_each_sg(sglist, sg, nelems, i) { |
2228 | addr = SG_ENT_VIRT_ADDRESS(sg); | 2128 | addr = SG_ENT_VIRT_ADDRESS(sg); |
2229 | addr = (void *)virt_to_phys(addr); | 2129 | addr = (void *)virt_to_phys(addr); |
2230 | size = aligned_size((u64)addr, sg->length); | 2130 | size = aligned_size((u64)addr, sg->length); |
2231 | ret = domain_page_mapping(domain, start_addr + offset, | 2131 | ret = domain_page_mapping(domain, start_addr + offset, |
2232 | ((u64)addr) & PAGE_MASK_4K, | 2132 | ((u64)addr) & PAGE_MASK, |
2233 | size, prot); | 2133 | size, prot); |
2234 | if (ret) { | 2134 | if (ret) { |
2235 | /* clear the page */ | 2135 | /* clear the page */ |
@@ -2243,14 +2143,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2243 | return 0; | 2143 | return 0; |
2244 | } | 2144 | } |
2245 | sg->dma_address = start_addr + offset + | 2145 | sg->dma_address = start_addr + offset + |
2246 | ((u64)addr & (~PAGE_MASK_4K)); | 2146 | ((u64)addr & (~PAGE_MASK)); |
2247 | sg->dma_length = sg->length; | 2147 | sg->dma_length = sg->length; |
2248 | offset += size; | 2148 | offset += size; |
2249 | } | 2149 | } |
2250 | 2150 | ||
2251 | /* it's a non-present to present mapping */ | 2151 | /* it's a non-present to present mapping */ |
2252 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, | 2152 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, |
2253 | start_addr, offset >> PAGE_SHIFT_4K, 1)) | 2153 | start_addr, offset >> VTD_PAGE_SHIFT, 1)) |
2254 | iommu_flush_write_buffer(domain->iommu); | 2154 | iommu_flush_write_buffer(domain->iommu); |
2255 | return nelems; | 2155 | return nelems; |
2256 | } | 2156 | } |
@@ -2290,7 +2190,6 @@ static inline int iommu_devinfo_cache_init(void) | |||
2290 | sizeof(struct device_domain_info), | 2190 | sizeof(struct device_domain_info), |
2291 | 0, | 2191 | 0, |
2292 | SLAB_HWCACHE_ALIGN, | 2192 | SLAB_HWCACHE_ALIGN, |
2293 | |||
2294 | NULL); | 2193 | NULL); |
2295 | if (!iommu_devinfo_cache) { | 2194 | if (!iommu_devinfo_cache) { |
2296 | printk(KERN_ERR "Couldn't create devinfo cache\n"); | 2195 | printk(KERN_ERR "Couldn't create devinfo cache\n"); |
@@ -2308,7 +2207,6 @@ static inline int iommu_iova_cache_init(void) | |||
2308 | sizeof(struct iova), | 2207 | sizeof(struct iova), |
2309 | 0, | 2208 | 0, |
2310 | SLAB_HWCACHE_ALIGN, | 2209 | SLAB_HWCACHE_ALIGN, |
2311 | |||
2312 | NULL); | 2210 | NULL); |
2313 | if (!iommu_iova_cache) { | 2211 | if (!iommu_iova_cache) { |
2314 | printk(KERN_ERR "Couldn't create iova cache\n"); | 2212 | printk(KERN_ERR "Couldn't create iova cache\n"); |
@@ -2348,15 +2246,6 @@ static void __init iommu_exit_mempool(void) | |||
2348 | 2246 | ||
2349 | } | 2247 | } |
2350 | 2248 | ||
2351 | void __init detect_intel_iommu(void) | ||
2352 | { | ||
2353 | if (swiotlb || no_iommu || iommu_detected || dmar_disabled) | ||
2354 | return; | ||
2355 | if (early_dmar_detect()) { | ||
2356 | iommu_detected = 1; | ||
2357 | } | ||
2358 | } | ||
2359 | |||
2360 | static void __init init_no_remapping_devices(void) | 2249 | static void __init init_no_remapping_devices(void) |
2361 | { | 2250 | { |
2362 | struct dmar_drhd_unit *drhd; | 2251 | struct dmar_drhd_unit *drhd; |
@@ -2403,12 +2292,19 @@ int __init intel_iommu_init(void) | |||
2403 | { | 2292 | { |
2404 | int ret = 0; | 2293 | int ret = 0; |
2405 | 2294 | ||
2406 | if (no_iommu || swiotlb || dmar_disabled) | ||
2407 | return -ENODEV; | ||
2408 | |||
2409 | if (dmar_table_init()) | 2295 | if (dmar_table_init()) |
2410 | return -ENODEV; | 2296 | return -ENODEV; |
2411 | 2297 | ||
2298 | if (dmar_dev_scope_init()) | ||
2299 | return -ENODEV; | ||
2300 | |||
2301 | /* | ||
2302 | * Check the need for DMA-remapping initialization now. | ||
2303 | * Above initialization will also be used by Interrupt-remapping. | ||
2304 | */ | ||
2305 | if (no_iommu || swiotlb || dmar_disabled) | ||
2306 | return -ENODEV; | ||
2307 | |||
2412 | iommu_init_mempool(); | 2308 | iommu_init_mempool(); |
2413 | dmar_init_reserved_ranges(); | 2309 | dmar_init_reserved_ranges(); |
2414 | 2310 | ||
@@ -2430,3 +2326,111 @@ int __init intel_iommu_init(void) | |||
2430 | return 0; | 2326 | return 0; |
2431 | } | 2327 | } |
2432 | 2328 | ||
2329 | void intel_iommu_domain_exit(struct dmar_domain *domain) | ||
2330 | { | ||
2331 | u64 end; | ||
2332 | |||
2333 | /* Domain 0 is reserved, so dont process it */ | ||
2334 | if (!domain) | ||
2335 | return; | ||
2336 | |||
2337 | end = DOMAIN_MAX_ADDR(domain->gaw); | ||
2338 | end = end & (~VTD_PAGE_MASK); | ||
2339 | |||
2340 | /* clear ptes */ | ||
2341 | dma_pte_clear_range(domain, 0, end); | ||
2342 | |||
2343 | /* free page tables */ | ||
2344 | dma_pte_free_pagetable(domain, 0, end); | ||
2345 | |||
2346 | iommu_free_domain(domain); | ||
2347 | free_domain_mem(domain); | ||
2348 | } | ||
2349 | EXPORT_SYMBOL_GPL(intel_iommu_domain_exit); | ||
2350 | |||
2351 | struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev) | ||
2352 | { | ||
2353 | struct dmar_drhd_unit *drhd; | ||
2354 | struct dmar_domain *domain; | ||
2355 | struct intel_iommu *iommu; | ||
2356 | |||
2357 | drhd = dmar_find_matched_drhd_unit(pdev); | ||
2358 | if (!drhd) { | ||
2359 | printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n"); | ||
2360 | return NULL; | ||
2361 | } | ||
2362 | |||
2363 | iommu = drhd->iommu; | ||
2364 | if (!iommu) { | ||
2365 | printk(KERN_ERR | ||
2366 | "intel_iommu_domain_alloc: iommu == NULL\n"); | ||
2367 | return NULL; | ||
2368 | } | ||
2369 | domain = iommu_alloc_domain(iommu); | ||
2370 | if (!domain) { | ||
2371 | printk(KERN_ERR | ||
2372 | "intel_iommu_domain_alloc: domain == NULL\n"); | ||
2373 | return NULL; | ||
2374 | } | ||
2375 | if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | ||
2376 | printk(KERN_ERR | ||
2377 | "intel_iommu_domain_alloc: domain_init() failed\n"); | ||
2378 | intel_iommu_domain_exit(domain); | ||
2379 | return NULL; | ||
2380 | } | ||
2381 | return domain; | ||
2382 | } | ||
2383 | EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc); | ||
2384 | |||
2385 | int intel_iommu_context_mapping( | ||
2386 | struct dmar_domain *domain, struct pci_dev *pdev) | ||
2387 | { | ||
2388 | int rc; | ||
2389 | rc = domain_context_mapping(domain, pdev); | ||
2390 | return rc; | ||
2391 | } | ||
2392 | EXPORT_SYMBOL_GPL(intel_iommu_context_mapping); | ||
2393 | |||
2394 | int intel_iommu_page_mapping( | ||
2395 | struct dmar_domain *domain, dma_addr_t iova, | ||
2396 | u64 hpa, size_t size, int prot) | ||
2397 | { | ||
2398 | int rc; | ||
2399 | rc = domain_page_mapping(domain, iova, hpa, size, prot); | ||
2400 | return rc; | ||
2401 | } | ||
2402 | EXPORT_SYMBOL_GPL(intel_iommu_page_mapping); | ||
2403 | |||
2404 | void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn) | ||
2405 | { | ||
2406 | detach_domain_for_dev(domain, bus, devfn); | ||
2407 | } | ||
2408 | EXPORT_SYMBOL_GPL(intel_iommu_detach_dev); | ||
2409 | |||
2410 | struct dmar_domain * | ||
2411 | intel_iommu_find_domain(struct pci_dev *pdev) | ||
2412 | { | ||
2413 | return find_domain(pdev); | ||
2414 | } | ||
2415 | EXPORT_SYMBOL_GPL(intel_iommu_find_domain); | ||
2416 | |||
2417 | int intel_iommu_found(void) | ||
2418 | { | ||
2419 | return g_num_of_iommus; | ||
2420 | } | ||
2421 | EXPORT_SYMBOL_GPL(intel_iommu_found); | ||
2422 | |||
2423 | u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova) | ||
2424 | { | ||
2425 | struct dma_pte *pte; | ||
2426 | u64 pfn; | ||
2427 | |||
2428 | pfn = 0; | ||
2429 | pte = addr_to_dma_pte(domain, iova); | ||
2430 | |||
2431 | if (pte) | ||
2432 | pfn = dma_pte_addr(*pte); | ||
2433 | |||
2434 | return pfn >> VTD_PAGE_SHIFT; | ||
2435 | } | ||
2436 | EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); | ||