diff options
| author | Ingo Molnar <mingo@elte.hu> | 2009-09-24 06:59:11 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-09-24 06:59:18 -0400 |
| commit | d2ff6de537c61a0f05731c6679f3e1abc2d95e68 (patch) | |
| tree | 821aa38121f57a9d5419388ef10ea6f3aa445d4d /drivers/pci | |
| parent | e23a8b6a8f319c0f08b6ccef2dccbb37e7603dc2 (diff) | |
| parent | a724eada8c2a7b62463b73ccf73fd0bb6e928aeb (diff) | |
Merge branch 'linus' into x86/urgent
Merge reason: Queueing up dependent early-printk fix.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/pci')
| -rw-r--r-- | drivers/pci/dmar.c | 41 | ||||
| -rw-r--r-- | drivers/pci/hotplug/acpiphp_ibm.c | 12 | ||||
| -rw-r--r-- | drivers/pci/intel-iommu.c | 323 | ||||
| -rw-r--r-- | drivers/pci/intr_remapping.c | 8 | ||||
| -rw-r--r-- | drivers/pci/iova.c | 16 |
5 files changed, 209 insertions, 191 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index ab99783dccec..14bbaa17e2ca 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
| @@ -34,9 +34,9 @@ | |||
| 34 | #include <linux/irq.h> | 34 | #include <linux/irq.h> |
| 35 | #include <linux/interrupt.h> | 35 | #include <linux/interrupt.h> |
| 36 | #include <linux/tboot.h> | 36 | #include <linux/tboot.h> |
| 37 | #include <linux/dmi.h> | ||
| 37 | 38 | ||
| 38 | #undef PREFIX | 39 | #define PREFIX "DMAR: " |
| 39 | #define PREFIX "DMAR:" | ||
| 40 | 40 | ||
| 41 | /* No locks are needed as DMA remapping hardware unit | 41 | /* No locks are needed as DMA remapping hardware unit |
| 42 | * list is constructed at boot time and hotplug of | 42 | * list is constructed at boot time and hotplug of |
| @@ -577,9 +577,6 @@ int __init dmar_table_init(void) | |||
| 577 | printk(KERN_INFO PREFIX "No ATSR found\n"); | 577 | printk(KERN_INFO PREFIX "No ATSR found\n"); |
| 578 | #endif | 578 | #endif |
| 579 | 579 | ||
| 580 | #ifdef CONFIG_INTR_REMAP | ||
| 581 | parse_ioapics_under_ir(); | ||
| 582 | #endif | ||
| 583 | return 0; | 580 | return 0; |
| 584 | } | 581 | } |
| 585 | 582 | ||
| @@ -639,20 +636,31 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
| 639 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | 636 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); |
| 640 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | 637 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); |
| 641 | 638 | ||
| 639 | if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { | ||
| 640 | /* Promote an attitude of violence to a BIOS engineer today */ | ||
| 641 | WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n" | ||
| 642 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
| 643 | drhd->reg_base_addr, | ||
| 644 | dmi_get_system_info(DMI_BIOS_VENDOR), | ||
| 645 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
| 646 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
| 647 | goto err_unmap; | ||
| 648 | } | ||
| 649 | |||
| 642 | #ifdef CONFIG_DMAR | 650 | #ifdef CONFIG_DMAR |
| 643 | agaw = iommu_calculate_agaw(iommu); | 651 | agaw = iommu_calculate_agaw(iommu); |
| 644 | if (agaw < 0) { | 652 | if (agaw < 0) { |
| 645 | printk(KERN_ERR | 653 | printk(KERN_ERR |
| 646 | "Cannot get a valid agaw for iommu (seq_id = %d)\n", | 654 | "Cannot get a valid agaw for iommu (seq_id = %d)\n", |
| 647 | iommu->seq_id); | 655 | iommu->seq_id); |
| 648 | goto error; | 656 | goto err_unmap; |
| 649 | } | 657 | } |
| 650 | msagaw = iommu_calculate_max_sagaw(iommu); | 658 | msagaw = iommu_calculate_max_sagaw(iommu); |
| 651 | if (msagaw < 0) { | 659 | if (msagaw < 0) { |
| 652 | printk(KERN_ERR | 660 | printk(KERN_ERR |
| 653 | "Cannot get a valid max agaw for iommu (seq_id = %d)\n", | 661 | "Cannot get a valid max agaw for iommu (seq_id = %d)\n", |
| 654 | iommu->seq_id); | 662 | iommu->seq_id); |
| 655 | goto error; | 663 | goto err_unmap; |
| 656 | } | 664 | } |
| 657 | #endif | 665 | #endif |
| 658 | iommu->agaw = agaw; | 666 | iommu->agaw = agaw; |
| @@ -672,7 +680,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
| 672 | } | 680 | } |
| 673 | 681 | ||
| 674 | ver = readl(iommu->reg + DMAR_VER_REG); | 682 | ver = readl(iommu->reg + DMAR_VER_REG); |
| 675 | pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", | 683 | pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", |
| 676 | (unsigned long long)drhd->reg_base_addr, | 684 | (unsigned long long)drhd->reg_base_addr, |
| 677 | DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | 685 | DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), |
| 678 | (unsigned long long)iommu->cap, | 686 | (unsigned long long)iommu->cap, |
| @@ -682,7 +690,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
| 682 | 690 | ||
| 683 | drhd->iommu = iommu; | 691 | drhd->iommu = iommu; |
| 684 | return 0; | 692 | return 0; |
| 685 | error: | 693 | |
| 694 | err_unmap: | ||
| 695 | iounmap(iommu->reg); | ||
| 696 | error: | ||
| 686 | kfree(iommu); | 697 | kfree(iommu); |
| 687 | return -1; | 698 | return -1; |
| 688 | } | 699 | } |
| @@ -1219,7 +1230,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) | |||
| 1219 | source_id, guest_addr); | 1230 | source_id, guest_addr); |
| 1220 | 1231 | ||
| 1221 | fault_index++; | 1232 | fault_index++; |
| 1222 | if (fault_index > cap_num_fault_regs(iommu->cap)) | 1233 | if (fault_index >= cap_num_fault_regs(iommu->cap)) |
| 1223 | fault_index = 0; | 1234 | fault_index = 0; |
| 1224 | spin_lock_irqsave(&iommu->register_lock, flag); | 1235 | spin_lock_irqsave(&iommu->register_lock, flag); |
| 1225 | } | 1236 | } |
| @@ -1312,3 +1323,13 @@ int dmar_reenable_qi(struct intel_iommu *iommu) | |||
| 1312 | 1323 | ||
| 1313 | return 0; | 1324 | return 0; |
| 1314 | } | 1325 | } |
| 1326 | |||
| 1327 | /* | ||
| 1328 | * Check interrupt remapping support in DMAR table description. | ||
| 1329 | */ | ||
| 1330 | int dmar_ir_support(void) | ||
| 1331 | { | ||
| 1332 | struct acpi_table_dmar *dmar; | ||
| 1333 | dmar = (struct acpi_table_dmar *)dmar_tbl; | ||
| 1334 | return dmar->flags & 0x1; | ||
| 1335 | } | ||
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index 5befa7e379b7..a9d926b7d805 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c | |||
| @@ -398,23 +398,21 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle, | |||
| 398 | acpi_handle *phandle = (acpi_handle *)context; | 398 | acpi_handle *phandle = (acpi_handle *)context; |
| 399 | acpi_status status; | 399 | acpi_status status; |
| 400 | struct acpi_device_info *info; | 400 | struct acpi_device_info *info; |
| 401 | struct acpi_buffer info_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
| 402 | int retval = 0; | 401 | int retval = 0; |
| 403 | 402 | ||
| 404 | status = acpi_get_object_info(handle, &info_buffer); | 403 | status = acpi_get_object_info(handle, &info); |
| 405 | if (ACPI_FAILURE(status)) { | 404 | if (ACPI_FAILURE(status)) { |
| 406 | err("%s: Failed to get device information status=0x%x\n", | 405 | err("%s: Failed to get device information status=0x%x\n", |
| 407 | __func__, status); | 406 | __func__, status); |
| 408 | return retval; | 407 | return retval; |
| 409 | } | 408 | } |
| 410 | info = info_buffer.pointer; | 409 | info->hardware_id.string[sizeof(info->hardware_id.length) - 1] = '\0'; |
| 411 | info->hardware_id.value[sizeof(info->hardware_id.value) - 1] = '\0'; | ||
| 412 | 410 | ||
| 413 | if (info->current_status && (info->valid & ACPI_VALID_HID) && | 411 | if (info->current_status && (info->valid & ACPI_VALID_HID) && |
| 414 | (!strcmp(info->hardware_id.value, IBM_HARDWARE_ID1) || | 412 | (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) || |
| 415 | !strcmp(info->hardware_id.value, IBM_HARDWARE_ID2))) { | 413 | !strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) { |
| 416 | dbg("found hardware: %s, handle: %p\n", | 414 | dbg("found hardware: %s, handle: %p\n", |
| 417 | info->hardware_id.value, handle); | 415 | info->hardware_id.string, handle); |
| 418 | *phandle = handle; | 416 | *phandle = handle; |
| 419 | /* returning non-zero causes the search to stop | 417 | /* returning non-zero causes the search to stop |
| 420 | * and returns this value to the caller of | 418 | * and returns this value to the caller of |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 562221e11917..855dd7ca47f3 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <linux/intel-iommu.h> | 38 | #include <linux/intel-iommu.h> |
| 39 | #include <linux/sysdev.h> | 39 | #include <linux/sysdev.h> |
| 40 | #include <linux/tboot.h> | 40 | #include <linux/tboot.h> |
| 41 | #include <linux/dmi.h> | ||
| 41 | #include <asm/cacheflush.h> | 42 | #include <asm/cacheflush.h> |
| 42 | #include <asm/iommu.h> | 43 | #include <asm/iommu.h> |
| 43 | #include "pci.h" | 44 | #include "pci.h" |
| @@ -56,8 +57,14 @@ | |||
| 56 | 57 | ||
| 57 | #define MAX_AGAW_WIDTH 64 | 58 | #define MAX_AGAW_WIDTH 64 |
| 58 | 59 | ||
| 59 | #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) | 60 | #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1) |
| 60 | #define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1) | 61 | #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1) |
| 62 | |||
| 63 | /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR | ||
| 64 | to match. That way, we can use 'unsigned long' for PFNs with impunity. */ | ||
| 65 | #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \ | ||
| 66 | __DOMAIN_MAX_PFN(gaw), (unsigned long)-1)) | ||
| 67 | #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT) | ||
| 61 | 68 | ||
| 62 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) | 69 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) |
| 63 | #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) | 70 | #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) |
| @@ -252,7 +259,8 @@ static inline int first_pte_in_page(struct dma_pte *pte) | |||
| 252 | * 2. It maps to each iommu if successful. | 259 | * 2. It maps to each iommu if successful. |
| 253 | * 3. Each iommu mapps to this domain if successful. | 260 | * 3. Each iommu mapps to this domain if successful. |
| 254 | */ | 261 | */ |
| 255 | struct dmar_domain *si_domain; | 262 | static struct dmar_domain *si_domain; |
| 263 | static int hw_pass_through = 1; | ||
| 256 | 264 | ||
| 257 | /* devices under the same p2p bridge are owned in one domain */ | 265 | /* devices under the same p2p bridge are owned in one domain */ |
| 258 | #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) | 266 | #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) |
| @@ -728,7 +736,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | |||
| 728 | return NULL; | 736 | return NULL; |
| 729 | 737 | ||
| 730 | domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); | 738 | domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); |
| 731 | pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; | 739 | pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; |
| 732 | if (cmpxchg64(&pte->val, 0ULL, pteval)) { | 740 | if (cmpxchg64(&pte->val, 0ULL, pteval)) { |
| 733 | /* Someone else set it while we were thinking; use theirs. */ | 741 | /* Someone else set it while we were thinking; use theirs. */ |
| 734 | free_pgtable_page(tmp_page); | 742 | free_pgtable_page(tmp_page); |
| @@ -778,9 +786,10 @@ static void dma_pte_clear_range(struct dmar_domain *domain, | |||
| 778 | 786 | ||
| 779 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); | 787 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); |
| 780 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); | 788 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); |
| 789 | BUG_ON(start_pfn > last_pfn); | ||
| 781 | 790 | ||
| 782 | /* we don't need lock here; nobody else touches the iova range */ | 791 | /* we don't need lock here; nobody else touches the iova range */ |
| 783 | while (start_pfn <= last_pfn) { | 792 | do { |
| 784 | first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1); | 793 | first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1); |
| 785 | if (!pte) { | 794 | if (!pte) { |
| 786 | start_pfn = align_to_level(start_pfn + 1, 2); | 795 | start_pfn = align_to_level(start_pfn + 1, 2); |
| @@ -794,7 +803,8 @@ static void dma_pte_clear_range(struct dmar_domain *domain, | |||
| 794 | 803 | ||
| 795 | domain_flush_cache(domain, first_pte, | 804 | domain_flush_cache(domain, first_pte, |
| 796 | (void *)pte - (void *)first_pte); | 805 | (void *)pte - (void *)first_pte); |
| 797 | } | 806 | |
| 807 | } while (start_pfn && start_pfn <= last_pfn); | ||
| 798 | } | 808 | } |
| 799 | 809 | ||
| 800 | /* free page table pages. last level pte should already be cleared */ | 810 | /* free page table pages. last level pte should already be cleared */ |
| @@ -810,6 +820,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, | |||
| 810 | 820 | ||
| 811 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); | 821 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); |
| 812 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); | 822 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); |
| 823 | BUG_ON(start_pfn > last_pfn); | ||
| 813 | 824 | ||
| 814 | /* We don't need lock here; nobody else touches the iova range */ | 825 | /* We don't need lock here; nobody else touches the iova range */ |
| 815 | level = 2; | 826 | level = 2; |
| @@ -820,7 +831,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, | |||
| 820 | if (tmp + level_size(level) - 1 > last_pfn) | 831 | if (tmp + level_size(level) - 1 > last_pfn) |
| 821 | return; | 832 | return; |
| 822 | 833 | ||
| 823 | while (tmp + level_size(level) - 1 <= last_pfn) { | 834 | do { |
| 824 | first_pte = pte = dma_pfn_level_pte(domain, tmp, level); | 835 | first_pte = pte = dma_pfn_level_pte(domain, tmp, level); |
| 825 | if (!pte) { | 836 | if (!pte) { |
| 826 | tmp = align_to_level(tmp + 1, level + 1); | 837 | tmp = align_to_level(tmp + 1, level + 1); |
| @@ -839,7 +850,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, | |||
| 839 | domain_flush_cache(domain, first_pte, | 850 | domain_flush_cache(domain, first_pte, |
| 840 | (void *)pte - (void *)first_pte); | 851 | (void *)pte - (void *)first_pte); |
| 841 | 852 | ||
| 842 | } | 853 | } while (tmp && tmp + level_size(level) - 1 <= last_pfn); |
| 843 | level++; | 854 | level++; |
| 844 | } | 855 | } |
| 845 | /* free pgd */ | 856 | /* free pgd */ |
| @@ -1158,6 +1169,8 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
| 1158 | pr_debug("Number of Domains supportd <%ld>\n", ndomains); | 1169 | pr_debug("Number of Domains supportd <%ld>\n", ndomains); |
| 1159 | nlongs = BITS_TO_LONGS(ndomains); | 1170 | nlongs = BITS_TO_LONGS(ndomains); |
| 1160 | 1171 | ||
| 1172 | spin_lock_init(&iommu->lock); | ||
| 1173 | |||
| 1161 | /* TBD: there might be 64K domains, | 1174 | /* TBD: there might be 64K domains, |
| 1162 | * consider other allocation for future chip | 1175 | * consider other allocation for future chip |
| 1163 | */ | 1176 | */ |
| @@ -1170,12 +1183,9 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
| 1170 | GFP_KERNEL); | 1183 | GFP_KERNEL); |
| 1171 | if (!iommu->domains) { | 1184 | if (!iommu->domains) { |
| 1172 | printk(KERN_ERR "Allocating domain array failed\n"); | 1185 | printk(KERN_ERR "Allocating domain array failed\n"); |
| 1173 | kfree(iommu->domain_ids); | ||
| 1174 | return -ENOMEM; | 1186 | return -ENOMEM; |
| 1175 | } | 1187 | } |
| 1176 | 1188 | ||
| 1177 | spin_lock_init(&iommu->lock); | ||
| 1178 | |||
| 1179 | /* | 1189 | /* |
| 1180 | * if Caching mode is set, then invalid translations are tagged | 1190 | * if Caching mode is set, then invalid translations are tagged |
| 1181 | * with domainid 0. Hence we need to pre-allocate it. | 1191 | * with domainid 0. Hence we need to pre-allocate it. |
| @@ -1195,22 +1205,24 @@ void free_dmar_iommu(struct intel_iommu *iommu) | |||
| 1195 | int i; | 1205 | int i; |
| 1196 | unsigned long flags; | 1206 | unsigned long flags; |
| 1197 | 1207 | ||
| 1198 | i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); | 1208 | if ((iommu->domains) && (iommu->domain_ids)) { |
| 1199 | for (; i < cap_ndoms(iommu->cap); ) { | 1209 | i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); |
| 1200 | domain = iommu->domains[i]; | 1210 | for (; i < cap_ndoms(iommu->cap); ) { |
| 1201 | clear_bit(i, iommu->domain_ids); | 1211 | domain = iommu->domains[i]; |
| 1212 | clear_bit(i, iommu->domain_ids); | ||
| 1213 | |||
| 1214 | spin_lock_irqsave(&domain->iommu_lock, flags); | ||
| 1215 | if (--domain->iommu_count == 0) { | ||
| 1216 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) | ||
| 1217 | vm_domain_exit(domain); | ||
| 1218 | else | ||
| 1219 | domain_exit(domain); | ||
| 1220 | } | ||
| 1221 | spin_unlock_irqrestore(&domain->iommu_lock, flags); | ||
| 1202 | 1222 | ||
| 1203 | spin_lock_irqsave(&domain->iommu_lock, flags); | 1223 | i = find_next_bit(iommu->domain_ids, |
| 1204 | if (--domain->iommu_count == 0) { | 1224 | cap_ndoms(iommu->cap), i+1); |
| 1205 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) | ||
| 1206 | vm_domain_exit(domain); | ||
| 1207 | else | ||
| 1208 | domain_exit(domain); | ||
| 1209 | } | 1225 | } |
| 1210 | spin_unlock_irqrestore(&domain->iommu_lock, flags); | ||
| 1211 | |||
| 1212 | i = find_next_bit(iommu->domain_ids, | ||
| 1213 | cap_ndoms(iommu->cap), i+1); | ||
| 1214 | } | 1226 | } |
| 1215 | 1227 | ||
| 1216 | if (iommu->gcmd & DMA_GCMD_TE) | 1228 | if (iommu->gcmd & DMA_GCMD_TE) |
| @@ -1310,7 +1322,6 @@ static void iommu_detach_domain(struct dmar_domain *domain, | |||
| 1310 | } | 1322 | } |
| 1311 | 1323 | ||
| 1312 | static struct iova_domain reserved_iova_list; | 1324 | static struct iova_domain reserved_iova_list; |
| 1313 | static struct lock_class_key reserved_alloc_key; | ||
| 1314 | static struct lock_class_key reserved_rbtree_key; | 1325 | static struct lock_class_key reserved_rbtree_key; |
| 1315 | 1326 | ||
| 1316 | static void dmar_init_reserved_ranges(void) | 1327 | static void dmar_init_reserved_ranges(void) |
| @@ -1321,8 +1332,6 @@ static void dmar_init_reserved_ranges(void) | |||
| 1321 | 1332 | ||
| 1322 | init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); | 1333 | init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); |
| 1323 | 1334 | ||
| 1324 | lockdep_set_class(&reserved_iova_list.iova_alloc_lock, | ||
| 1325 | &reserved_alloc_key); | ||
| 1326 | lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, | 1335 | lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, |
| 1327 | &reserved_rbtree_key); | 1336 | &reserved_rbtree_key); |
| 1328 | 1337 | ||
| @@ -1959,14 +1968,35 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, | |||
| 1959 | struct dmar_domain *domain; | 1968 | struct dmar_domain *domain; |
| 1960 | int ret; | 1969 | int ret; |
| 1961 | 1970 | ||
| 1962 | printk(KERN_INFO | ||
| 1963 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", | ||
| 1964 | pci_name(pdev), start, end); | ||
| 1965 | |||
| 1966 | domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); | 1971 | domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); |
| 1967 | if (!domain) | 1972 | if (!domain) |
| 1968 | return -ENOMEM; | 1973 | return -ENOMEM; |
| 1969 | 1974 | ||
| 1975 | /* For _hardware_ passthrough, don't bother. But for software | ||
| 1976 | passthrough, we do it anyway -- it may indicate a memory | ||
| 1977 | range which is reserved in E820, so which didn't get set | ||
| 1978 | up to start with in si_domain */ | ||
| 1979 | if (domain == si_domain && hw_pass_through) { | ||
| 1980 | printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n", | ||
| 1981 | pci_name(pdev), start, end); | ||
| 1982 | return 0; | ||
| 1983 | } | ||
| 1984 | |||
| 1985 | printk(KERN_INFO | ||
| 1986 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", | ||
| 1987 | pci_name(pdev), start, end); | ||
| 1988 | |||
| 1989 | if (end >> agaw_to_width(domain->agaw)) { | ||
| 1990 | WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n" | ||
| 1991 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
| 1992 | agaw_to_width(domain->agaw), | ||
| 1993 | dmi_get_system_info(DMI_BIOS_VENDOR), | ||
| 1994 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
| 1995 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
| 1996 | ret = -EIO; | ||
| 1997 | goto error; | ||
| 1998 | } | ||
| 1999 | |||
| 1970 | ret = iommu_domain_identity_map(domain, start, end); | 2000 | ret = iommu_domain_identity_map(domain, start, end); |
| 1971 | if (ret) | 2001 | if (ret) |
| 1972 | goto error; | 2002 | goto error; |
| @@ -2017,23 +2047,6 @@ static inline void iommu_prepare_isa(void) | |||
| 2017 | } | 2047 | } |
| 2018 | #endif /* !CONFIG_DMAR_FLPY_WA */ | 2048 | #endif /* !CONFIG_DMAR_FLPY_WA */ |
| 2019 | 2049 | ||
| 2020 | /* Initialize each context entry as pass through.*/ | ||
| 2021 | static int __init init_context_pass_through(void) | ||
| 2022 | { | ||
| 2023 | struct pci_dev *pdev = NULL; | ||
| 2024 | struct dmar_domain *domain; | ||
| 2025 | int ret; | ||
| 2026 | |||
| 2027 | for_each_pci_dev(pdev) { | ||
| 2028 | domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); | ||
| 2029 | ret = domain_context_mapping(domain, pdev, | ||
| 2030 | CONTEXT_TT_PASS_THROUGH); | ||
| 2031 | if (ret) | ||
| 2032 | return ret; | ||
| 2033 | } | ||
| 2034 | return 0; | ||
| 2035 | } | ||
| 2036 | |||
| 2037 | static int md_domain_init(struct dmar_domain *domain, int guest_width); | 2050 | static int md_domain_init(struct dmar_domain *domain, int guest_width); |
| 2038 | 2051 | ||
| 2039 | static int __init si_domain_work_fn(unsigned long start_pfn, | 2052 | static int __init si_domain_work_fn(unsigned long start_pfn, |
| @@ -2048,7 +2061,7 @@ static int __init si_domain_work_fn(unsigned long start_pfn, | |||
| 2048 | 2061 | ||
| 2049 | } | 2062 | } |
| 2050 | 2063 | ||
| 2051 | static int si_domain_init(void) | 2064 | static int __init si_domain_init(int hw) |
| 2052 | { | 2065 | { |
| 2053 | struct dmar_drhd_unit *drhd; | 2066 | struct dmar_drhd_unit *drhd; |
| 2054 | struct intel_iommu *iommu; | 2067 | struct intel_iommu *iommu; |
| @@ -2075,6 +2088,9 @@ static int si_domain_init(void) | |||
| 2075 | 2088 | ||
| 2076 | si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; | 2089 | si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; |
| 2077 | 2090 | ||
| 2091 | if (hw) | ||
| 2092 | return 0; | ||
| 2093 | |||
| 2078 | for_each_online_node(nid) { | 2094 | for_each_online_node(nid) { |
| 2079 | work_with_active_regions(nid, si_domain_work_fn, &ret); | 2095 | work_with_active_regions(nid, si_domain_work_fn, &ret); |
| 2080 | if (ret) | 2096 | if (ret) |
| @@ -2101,15 +2117,23 @@ static int identity_mapping(struct pci_dev *pdev) | |||
| 2101 | } | 2117 | } |
| 2102 | 2118 | ||
| 2103 | static int domain_add_dev_info(struct dmar_domain *domain, | 2119 | static int domain_add_dev_info(struct dmar_domain *domain, |
| 2104 | struct pci_dev *pdev) | 2120 | struct pci_dev *pdev, |
| 2121 | int translation) | ||
| 2105 | { | 2122 | { |
| 2106 | struct device_domain_info *info; | 2123 | struct device_domain_info *info; |
| 2107 | unsigned long flags; | 2124 | unsigned long flags; |
| 2125 | int ret; | ||
| 2108 | 2126 | ||
| 2109 | info = alloc_devinfo_mem(); | 2127 | info = alloc_devinfo_mem(); |
| 2110 | if (!info) | 2128 | if (!info) |
| 2111 | return -ENOMEM; | 2129 | return -ENOMEM; |
| 2112 | 2130 | ||
| 2131 | ret = domain_context_mapping(domain, pdev, translation); | ||
| 2132 | if (ret) { | ||
| 2133 | free_devinfo_mem(info); | ||
| 2134 | return ret; | ||
| 2135 | } | ||
| 2136 | |||
| 2113 | info->segment = pci_domain_nr(pdev->bus); | 2137 | info->segment = pci_domain_nr(pdev->bus); |
| 2114 | info->bus = pdev->bus->number; | 2138 | info->bus = pdev->bus->number; |
| 2115 | info->devfn = pdev->devfn; | 2139 | info->devfn = pdev->devfn; |
| @@ -2166,27 +2190,25 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup) | |||
| 2166 | return 1; | 2190 | return 1; |
| 2167 | } | 2191 | } |
| 2168 | 2192 | ||
| 2169 | static int iommu_prepare_static_identity_mapping(void) | 2193 | static int __init iommu_prepare_static_identity_mapping(int hw) |
| 2170 | { | 2194 | { |
| 2171 | struct pci_dev *pdev = NULL; | 2195 | struct pci_dev *pdev = NULL; |
| 2172 | int ret; | 2196 | int ret; |
| 2173 | 2197 | ||
| 2174 | ret = si_domain_init(); | 2198 | ret = si_domain_init(hw); |
| 2175 | if (ret) | 2199 | if (ret) |
| 2176 | return -EFAULT; | 2200 | return -EFAULT; |
| 2177 | 2201 | ||
| 2178 | for_each_pci_dev(pdev) { | 2202 | for_each_pci_dev(pdev) { |
| 2179 | if (iommu_should_identity_map(pdev, 1)) { | 2203 | if (iommu_should_identity_map(pdev, 1)) { |
| 2180 | printk(KERN_INFO "IOMMU: identity mapping for device %s\n", | 2204 | printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n", |
| 2181 | pci_name(pdev)); | 2205 | hw ? "hardware" : "software", pci_name(pdev)); |
| 2182 | 2206 | ||
| 2183 | ret = domain_context_mapping(si_domain, pdev, | 2207 | ret = domain_add_dev_info(si_domain, pdev, |
| 2208 | hw ? CONTEXT_TT_PASS_THROUGH : | ||
| 2184 | CONTEXT_TT_MULTI_LEVEL); | 2209 | CONTEXT_TT_MULTI_LEVEL); |
| 2185 | if (ret) | 2210 | if (ret) |
| 2186 | return ret; | 2211 | return ret; |
| 2187 | ret = domain_add_dev_info(si_domain, pdev); | ||
| 2188 | if (ret) | ||
| 2189 | return ret; | ||
| 2190 | } | 2212 | } |
| 2191 | } | 2213 | } |
| 2192 | 2214 | ||
| @@ -2200,14 +2222,6 @@ int __init init_dmars(void) | |||
| 2200 | struct pci_dev *pdev; | 2222 | struct pci_dev *pdev; |
| 2201 | struct intel_iommu *iommu; | 2223 | struct intel_iommu *iommu; |
| 2202 | int i, ret; | 2224 | int i, ret; |
| 2203 | int pass_through = 1; | ||
| 2204 | |||
| 2205 | /* | ||
| 2206 | * In case pass through can not be enabled, iommu tries to use identity | ||
| 2207 | * mapping. | ||
| 2208 | */ | ||
| 2209 | if (iommu_pass_through) | ||
| 2210 | iommu_identity_mapping = 1; | ||
| 2211 | 2225 | ||
| 2212 | /* | 2226 | /* |
| 2213 | * for each drhd | 2227 | * for each drhd |
| @@ -2235,7 +2249,6 @@ int __init init_dmars(void) | |||
| 2235 | deferred_flush = kzalloc(g_num_of_iommus * | 2249 | deferred_flush = kzalloc(g_num_of_iommus * |
| 2236 | sizeof(struct deferred_flush_tables), GFP_KERNEL); | 2250 | sizeof(struct deferred_flush_tables), GFP_KERNEL); |
| 2237 | if (!deferred_flush) { | 2251 | if (!deferred_flush) { |
| 2238 | kfree(g_iommus); | ||
| 2239 | ret = -ENOMEM; | 2252 | ret = -ENOMEM; |
| 2240 | goto error; | 2253 | goto error; |
| 2241 | } | 2254 | } |
| @@ -2262,14 +2275,8 @@ int __init init_dmars(void) | |||
| 2262 | goto error; | 2275 | goto error; |
| 2263 | } | 2276 | } |
| 2264 | if (!ecap_pass_through(iommu->ecap)) | 2277 | if (!ecap_pass_through(iommu->ecap)) |
| 2265 | pass_through = 0; | 2278 | hw_pass_through = 0; |
| 2266 | } | 2279 | } |
| 2267 | if (iommu_pass_through) | ||
| 2268 | if (!pass_through) { | ||
| 2269 | printk(KERN_INFO | ||
| 2270 | "Pass Through is not supported by hardware.\n"); | ||
| 2271 | iommu_pass_through = 0; | ||
| 2272 | } | ||
| 2273 | 2280 | ||
| 2274 | /* | 2281 | /* |
| 2275 | * Start from the sane iommu hardware state. | 2282 | * Start from the sane iommu hardware state. |
| @@ -2324,64 +2331,57 @@ int __init init_dmars(void) | |||
| 2324 | } | 2331 | } |
| 2325 | } | 2332 | } |
| 2326 | 2333 | ||
| 2334 | if (iommu_pass_through) | ||
| 2335 | iommu_identity_mapping = 1; | ||
| 2336 | #ifdef CONFIG_DMAR_BROKEN_GFX_WA | ||
| 2337 | else | ||
| 2338 | iommu_identity_mapping = 2; | ||
| 2339 | #endif | ||
| 2327 | /* | 2340 | /* |
| 2328 | * If pass through is set and enabled, context entries of all pci | 2341 | * If pass through is not set or not enabled, setup context entries for |
| 2329 | * devices are intialized by pass through translation type. | 2342 | * identity mappings for rmrr, gfx, and isa and may fall back to static |
| 2343 | * identity mapping if iommu_identity_mapping is set. | ||
| 2330 | */ | 2344 | */ |
| 2331 | if (iommu_pass_through) { | 2345 | if (iommu_identity_mapping) { |
| 2332 | ret = init_context_pass_through(); | 2346 | ret = iommu_prepare_static_identity_mapping(hw_pass_through); |
| 2333 | if (ret) { | 2347 | if (ret) { |
| 2334 | printk(KERN_ERR "IOMMU: Pass through init failed.\n"); | 2348 | printk(KERN_CRIT "Failed to setup IOMMU pass-through\n"); |
| 2335 | iommu_pass_through = 0; | 2349 | goto error; |
| 2336 | } | 2350 | } |
| 2337 | } | 2351 | } |
| 2338 | |||
| 2339 | /* | 2352 | /* |
| 2340 | * If pass through is not set or not enabled, setup context entries for | 2353 | * For each rmrr |
| 2341 | * identity mappings for rmrr, gfx, and isa and may fall back to static | 2354 | * for each dev attached to rmrr |
| 2342 | * identity mapping if iommu_identity_mapping is set. | 2355 | * do |
| 2356 | * locate drhd for dev, alloc domain for dev | ||
| 2357 | * allocate free domain | ||
| 2358 | * allocate page table entries for rmrr | ||
| 2359 | * if context not allocated for bus | ||
| 2360 | * allocate and init context | ||
| 2361 | * set present in root table for this bus | ||
| 2362 | * init context with domain, translation etc | ||
| 2363 | * endfor | ||
| 2364 | * endfor | ||
| 2343 | */ | 2365 | */ |
| 2344 | if (!iommu_pass_through) { | 2366 | printk(KERN_INFO "IOMMU: Setting RMRR:\n"); |
| 2345 | #ifdef CONFIG_DMAR_BROKEN_GFX_WA | 2367 | for_each_rmrr_units(rmrr) { |
| 2346 | if (!iommu_identity_mapping) | 2368 | for (i = 0; i < rmrr->devices_cnt; i++) { |
| 2347 | iommu_identity_mapping = 2; | 2369 | pdev = rmrr->devices[i]; |
| 2348 | #endif | 2370 | /* |
| 2349 | if (iommu_identity_mapping) | 2371 | * some BIOS lists non-exist devices in DMAR |
| 2350 | iommu_prepare_static_identity_mapping(); | 2372 | * table. |
| 2351 | /* | 2373 | */ |
| 2352 | * For each rmrr | 2374 | if (!pdev) |
| 2353 | * for each dev attached to rmrr | 2375 | continue; |
| 2354 | * do | 2376 | ret = iommu_prepare_rmrr_dev(rmrr, pdev); |
| 2355 | * locate drhd for dev, alloc domain for dev | 2377 | if (ret) |
| 2356 | * allocate free domain | 2378 | printk(KERN_ERR |
| 2357 | * allocate page table entries for rmrr | 2379 | "IOMMU: mapping reserved region failed\n"); |
| 2358 | * if context not allocated for bus | ||
| 2359 | * allocate and init context | ||
| 2360 | * set present in root table for this bus | ||
| 2361 | * init context with domain, translation etc | ||
| 2362 | * endfor | ||
| 2363 | * endfor | ||
| 2364 | */ | ||
| 2365 | printk(KERN_INFO "IOMMU: Setting RMRR:\n"); | ||
| 2366 | for_each_rmrr_units(rmrr) { | ||
| 2367 | for (i = 0; i < rmrr->devices_cnt; i++) { | ||
| 2368 | pdev = rmrr->devices[i]; | ||
| 2369 | /* | ||
| 2370 | * some BIOS lists non-exist devices in DMAR | ||
| 2371 | * table. | ||
| 2372 | */ | ||
| 2373 | if (!pdev) | ||
| 2374 | continue; | ||
| 2375 | ret = iommu_prepare_rmrr_dev(rmrr, pdev); | ||
| 2376 | if (ret) | ||
| 2377 | printk(KERN_ERR | ||
| 2378 | "IOMMU: mapping reserved region failed\n"); | ||
| 2379 | } | ||
| 2380 | } | 2380 | } |
| 2381 | |||
| 2382 | iommu_prepare_isa(); | ||
| 2383 | } | 2381 | } |
| 2384 | 2382 | ||
| 2383 | iommu_prepare_isa(); | ||
| 2384 | |||
| 2385 | /* | 2385 | /* |
| 2386 | * for each drhd | 2386 | * for each drhd |
| 2387 | * enable fault log | 2387 | * enable fault log |
| @@ -2404,11 +2404,12 @@ int __init init_dmars(void) | |||
| 2404 | 2404 | ||
| 2405 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); | 2405 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); |
| 2406 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); | 2406 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); |
| 2407 | iommu_disable_protect_mem_regions(iommu); | ||
| 2408 | 2407 | ||
| 2409 | ret = iommu_enable_translation(iommu); | 2408 | ret = iommu_enable_translation(iommu); |
| 2410 | if (ret) | 2409 | if (ret) |
| 2411 | goto error; | 2410 | goto error; |
| 2411 | |||
| 2412 | iommu_disable_protect_mem_regions(iommu); | ||
| 2412 | } | 2413 | } |
| 2413 | 2414 | ||
| 2414 | return 0; | 2415 | return 0; |
| @@ -2455,8 +2456,7 @@ static struct iova *intel_alloc_iova(struct device *dev, | |||
| 2455 | return iova; | 2456 | return iova; |
| 2456 | } | 2457 | } |
| 2457 | 2458 | ||
| 2458 | static struct dmar_domain * | 2459 | static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev) |
| 2459 | get_valid_domain_for_dev(struct pci_dev *pdev) | ||
| 2460 | { | 2460 | { |
| 2461 | struct dmar_domain *domain; | 2461 | struct dmar_domain *domain; |
| 2462 | int ret; | 2462 | int ret; |
| @@ -2484,6 +2484,18 @@ get_valid_domain_for_dev(struct pci_dev *pdev) | |||
| 2484 | return domain; | 2484 | return domain; |
| 2485 | } | 2485 | } |
| 2486 | 2486 | ||
| 2487 | static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev) | ||
| 2488 | { | ||
| 2489 | struct device_domain_info *info; | ||
| 2490 | |||
| 2491 | /* No lock here, assumes no domain exit in normal case */ | ||
| 2492 | info = dev->dev.archdata.iommu; | ||
| 2493 | if (likely(info)) | ||
| 2494 | return info->domain; | ||
| 2495 | |||
| 2496 | return __get_valid_domain_for_dev(dev); | ||
| 2497 | } | ||
| 2498 | |||
| 2487 | static int iommu_dummy(struct pci_dev *pdev) | 2499 | static int iommu_dummy(struct pci_dev *pdev) |
| 2488 | { | 2500 | { |
| 2489 | return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; | 2501 | return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; |
| @@ -2526,10 +2538,10 @@ static int iommu_no_mapping(struct device *dev) | |||
| 2526 | */ | 2538 | */ |
| 2527 | if (iommu_should_identity_map(pdev, 0)) { | 2539 | if (iommu_should_identity_map(pdev, 0)) { |
| 2528 | int ret; | 2540 | int ret; |
| 2529 | ret = domain_add_dev_info(si_domain, pdev); | 2541 | ret = domain_add_dev_info(si_domain, pdev, |
| 2530 | if (ret) | 2542 | hw_pass_through ? |
| 2531 | return 0; | 2543 | CONTEXT_TT_PASS_THROUGH : |
| 2532 | ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL); | 2544 | CONTEXT_TT_MULTI_LEVEL); |
| 2533 | if (!ret) { | 2545 | if (!ret) { |
| 2534 | printk(KERN_INFO "64bit %s uses identity mapping\n", | 2546 | printk(KERN_INFO "64bit %s uses identity mapping\n", |
| 2535 | pci_name(pdev)); | 2547 | pci_name(pdev)); |
| @@ -2638,10 +2650,9 @@ static void flush_unmaps(void) | |||
| 2638 | unsigned long mask; | 2650 | unsigned long mask; |
| 2639 | struct iova *iova = deferred_flush[i].iova[j]; | 2651 | struct iova *iova = deferred_flush[i].iova[j]; |
| 2640 | 2652 | ||
| 2641 | mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT; | 2653 | mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1)); |
| 2642 | mask = ilog2(mask >> VTD_PAGE_SHIFT); | ||
| 2643 | iommu_flush_dev_iotlb(deferred_flush[i].domain[j], | 2654 | iommu_flush_dev_iotlb(deferred_flush[i].domain[j], |
| 2644 | iova->pfn_lo << PAGE_SHIFT, mask); | 2655 | (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); |
| 2645 | __free_iova(&deferred_flush[i].domain[j]->iovad, iova); | 2656 | __free_iova(&deferred_flush[i].domain[j]->iovad, iova); |
| 2646 | } | 2657 | } |
| 2647 | deferred_flush[i].next = 0; | 2658 | deferred_flush[i].next = 0; |
| @@ -2734,12 +2745,6 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
| 2734 | } | 2745 | } |
| 2735 | } | 2746 | } |
| 2736 | 2747 | ||
| 2737 | static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, | ||
| 2738 | int dir) | ||
| 2739 | { | ||
| 2740 | intel_unmap_page(dev, dev_addr, size, dir, NULL); | ||
| 2741 | } | ||
| 2742 | |||
| 2743 | static void *intel_alloc_coherent(struct device *hwdev, size_t size, | 2748 | static void *intel_alloc_coherent(struct device *hwdev, size_t size, |
| 2744 | dma_addr_t *dma_handle, gfp_t flags) | 2749 | dma_addr_t *dma_handle, gfp_t flags) |
| 2745 | { | 2750 | { |
| @@ -2772,7 +2777,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
| 2772 | size = PAGE_ALIGN(size); | 2777 | size = PAGE_ALIGN(size); |
| 2773 | order = get_order(size); | 2778 | order = get_order(size); |
| 2774 | 2779 | ||
| 2775 | intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); | 2780 | intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); |
| 2776 | free_pages((unsigned long)vaddr, order); | 2781 | free_pages((unsigned long)vaddr, order); |
| 2777 | } | 2782 | } |
| 2778 | 2783 | ||
| @@ -2808,11 +2813,18 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
| 2808 | /* free page tables */ | 2813 | /* free page tables */ |
| 2809 | dma_pte_free_pagetable(domain, start_pfn, last_pfn); | 2814 | dma_pte_free_pagetable(domain, start_pfn, last_pfn); |
| 2810 | 2815 | ||
| 2811 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, | 2816 | if (intel_iommu_strict) { |
| 2812 | (last_pfn - start_pfn + 1)); | 2817 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, |
| 2813 | 2818 | last_pfn - start_pfn + 1); | |
| 2814 | /* free iova */ | 2819 | /* free iova */ |
| 2815 | __free_iova(&domain->iovad, iova); | 2820 | __free_iova(&domain->iovad, iova); |
| 2821 | } else { | ||
| 2822 | add_unmap(domain, iova); | ||
| 2823 | /* | ||
| 2824 | * queue up the release of the unmap to save the 1/6th of the | ||
| 2825 | * cpu used up by the iotlb flush operation... | ||
| 2826 | */ | ||
| 2827 | } | ||
| 2816 | } | 2828 | } |
| 2817 | 2829 | ||
| 2818 | static int intel_nontranslate_map_sg(struct device *hddev, | 2830 | static int intel_nontranslate_map_sg(struct device *hddev, |
| @@ -3056,8 +3068,8 @@ static int init_iommu_hw(void) | |||
| 3056 | DMA_CCMD_GLOBAL_INVL); | 3068 | DMA_CCMD_GLOBAL_INVL); |
| 3057 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, | 3069 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
| 3058 | DMA_TLB_GLOBAL_FLUSH); | 3070 | DMA_TLB_GLOBAL_FLUSH); |
| 3059 | iommu_disable_protect_mem_regions(iommu); | ||
| 3060 | iommu_enable_translation(iommu); | 3071 | iommu_enable_translation(iommu); |
| 3072 | iommu_disable_protect_mem_regions(iommu); | ||
| 3061 | } | 3073 | } |
| 3062 | 3074 | ||
| 3063 | return 0; | 3075 | return 0; |
| @@ -3205,7 +3217,7 @@ int __init intel_iommu_init(void) | |||
| 3205 | * Check the need for DMA-remapping initialization now. | 3217 | * Check the need for DMA-remapping initialization now. |
| 3206 | * Above initialization will also be used by Interrupt-remapping. | 3218 | * Above initialization will also be used by Interrupt-remapping. |
| 3207 | */ | 3219 | */ |
| 3208 | if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled) | 3220 | if (no_iommu || swiotlb || dmar_disabled) |
| 3209 | return -ENODEV; | 3221 | return -ENODEV; |
| 3210 | 3222 | ||
| 3211 | iommu_init_mempool(); | 3223 | iommu_init_mempool(); |
| @@ -3227,14 +3239,7 @@ int __init intel_iommu_init(void) | |||
| 3227 | 3239 | ||
| 3228 | init_timer(&unmap_timer); | 3240 | init_timer(&unmap_timer); |
| 3229 | force_iommu = 1; | 3241 | force_iommu = 1; |
| 3230 | 3242 | dma_ops = &intel_dma_ops; | |
| 3231 | if (!iommu_pass_through) { | ||
| 3232 | printk(KERN_INFO | ||
| 3233 | "Multi-level page-table translation for DMAR.\n"); | ||
| 3234 | dma_ops = &intel_dma_ops; | ||
| 3235 | } else | ||
| 3236 | printk(KERN_INFO | ||
| 3237 | "DMAR: Pass through translation for DMAR.\n"); | ||
| 3238 | 3243 | ||
| 3239 | init_iommu_sysfs(); | 3244 | init_iommu_sysfs(); |
| 3240 | 3245 | ||
| @@ -3517,7 +3522,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
| 3517 | struct intel_iommu *iommu; | 3522 | struct intel_iommu *iommu; |
| 3518 | int addr_width; | 3523 | int addr_width; |
| 3519 | u64 end; | 3524 | u64 end; |
| 3520 | int ret; | ||
| 3521 | 3525 | ||
| 3522 | /* normally pdev is not mapped */ | 3526 | /* normally pdev is not mapped */ |
| 3523 | if (unlikely(domain_context_mapped(pdev))) { | 3527 | if (unlikely(domain_context_mapped(pdev))) { |
| @@ -3549,12 +3553,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
| 3549 | return -EFAULT; | 3553 | return -EFAULT; |
| 3550 | } | 3554 | } |
| 3551 | 3555 | ||
| 3552 | ret = domain_add_dev_info(dmar_domain, pdev); | 3556 | return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL); |
| 3553 | if (ret) | ||
| 3554 | return ret; | ||
| 3555 | |||
| 3556 | ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL); | ||
| 3557 | return ret; | ||
| 3558 | } | 3557 | } |
| 3559 | 3558 | ||
| 3560 | static void intel_iommu_detach_device(struct iommu_domain *domain, | 3559 | static void intel_iommu_detach_device(struct iommu_domain *domain, |
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 44803644ca05..0ed78a764ded 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
| @@ -603,6 +603,9 @@ int __init intr_remapping_supported(void) | |||
| 603 | if (disable_intremap) | 603 | if (disable_intremap) |
| 604 | return 0; | 604 | return 0; |
| 605 | 605 | ||
| 606 | if (!dmar_ir_support()) | ||
| 607 | return 0; | ||
| 608 | |||
| 606 | for_each_drhd_unit(drhd) { | 609 | for_each_drhd_unit(drhd) { |
| 607 | struct intel_iommu *iommu = drhd->iommu; | 610 | struct intel_iommu *iommu = drhd->iommu; |
| 608 | 611 | ||
| @@ -618,6 +621,11 @@ int __init enable_intr_remapping(int eim) | |||
| 618 | struct dmar_drhd_unit *drhd; | 621 | struct dmar_drhd_unit *drhd; |
| 619 | int setup = 0; | 622 | int setup = 0; |
| 620 | 623 | ||
| 624 | if (parse_ioapics_under_ir() != 1) { | ||
| 625 | printk(KERN_INFO "Not enable interrupt remapping\n"); | ||
| 626 | return -1; | ||
| 627 | } | ||
| 628 | |||
| 621 | for_each_drhd_unit(drhd) { | 629 | for_each_drhd_unit(drhd) { |
| 622 | struct intel_iommu *iommu = drhd->iommu; | 630 | struct intel_iommu *iommu = drhd->iommu; |
| 623 | 631 | ||
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c index 46dd440e2315..7914951ef29a 100644 --- a/drivers/pci/iova.c +++ b/drivers/pci/iova.c | |||
| @@ -22,7 +22,6 @@ | |||
| 22 | void | 22 | void |
| 23 | init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) | 23 | init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) |
| 24 | { | 24 | { |
| 25 | spin_lock_init(&iovad->iova_alloc_lock); | ||
| 26 | spin_lock_init(&iovad->iova_rbtree_lock); | 25 | spin_lock_init(&iovad->iova_rbtree_lock); |
| 27 | iovad->rbroot = RB_ROOT; | 26 | iovad->rbroot = RB_ROOT; |
| 28 | iovad->cached32_node = NULL; | 27 | iovad->cached32_node = NULL; |
| @@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, | |||
| 205 | unsigned long limit_pfn, | 204 | unsigned long limit_pfn, |
| 206 | bool size_aligned) | 205 | bool size_aligned) |
| 207 | { | 206 | { |
| 208 | unsigned long flags; | ||
| 209 | struct iova *new_iova; | 207 | struct iova *new_iova; |
| 210 | int ret; | 208 | int ret; |
| 211 | 209 | ||
| @@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, | |||
| 219 | if (size_aligned) | 217 | if (size_aligned) |
| 220 | size = __roundup_pow_of_two(size); | 218 | size = __roundup_pow_of_two(size); |
| 221 | 219 | ||
| 222 | spin_lock_irqsave(&iovad->iova_alloc_lock, flags); | ||
| 223 | ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, | 220 | ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, |
| 224 | new_iova, size_aligned); | 221 | new_iova, size_aligned); |
| 225 | 222 | ||
| 226 | spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags); | ||
| 227 | if (ret) { | 223 | if (ret) { |
| 228 | free_iova_mem(new_iova); | 224 | free_iova_mem(new_iova); |
| 229 | return NULL; | 225 | return NULL; |
| @@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad, | |||
| 381 | struct iova *iova; | 377 | struct iova *iova; |
| 382 | unsigned int overlap = 0; | 378 | unsigned int overlap = 0; |
| 383 | 379 | ||
| 384 | spin_lock_irqsave(&iovad->iova_alloc_lock, flags); | 380 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
| 385 | spin_lock(&iovad->iova_rbtree_lock); | ||
| 386 | for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { | 381 | for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { |
| 387 | if (__is_range_overlap(node, pfn_lo, pfn_hi)) { | 382 | if (__is_range_overlap(node, pfn_lo, pfn_hi)) { |
| 388 | iova = container_of(node, struct iova, node); | 383 | iova = container_of(node, struct iova, node); |
| @@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad, | |||
| 402 | iova = __insert_new_range(iovad, pfn_lo, pfn_hi); | 397 | iova = __insert_new_range(iovad, pfn_lo, pfn_hi); |
| 403 | finish: | 398 | finish: |
| 404 | 399 | ||
| 405 | spin_unlock(&iovad->iova_rbtree_lock); | 400 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
| 406 | spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags); | ||
| 407 | return iova; | 401 | return iova; |
| 408 | } | 402 | } |
| 409 | 403 | ||
| @@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) | |||
| 420 | unsigned long flags; | 414 | unsigned long flags; |
| 421 | struct rb_node *node; | 415 | struct rb_node *node; |
| 422 | 416 | ||
| 423 | spin_lock_irqsave(&from->iova_alloc_lock, flags); | 417 | spin_lock_irqsave(&from->iova_rbtree_lock, flags); |
| 424 | spin_lock(&from->iova_rbtree_lock); | ||
| 425 | for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { | 418 | for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { |
| 426 | struct iova *iova = container_of(node, struct iova, node); | 419 | struct iova *iova = container_of(node, struct iova, node); |
| 427 | struct iova *new_iova; | 420 | struct iova *new_iova; |
| @@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) | |||
| 430 | printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", | 423 | printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", |
| 431 | iova->pfn_lo, iova->pfn_lo); | 424 | iova->pfn_lo, iova->pfn_lo); |
| 432 | } | 425 | } |
| 433 | spin_unlock(&from->iova_rbtree_lock); | 426 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); |
| 434 | spin_unlock_irqrestore(&from->iova_alloc_lock, flags); | ||
| 435 | } | 427 | } |
