diff options
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r-- | drivers/iommu/intel-iommu.c | 1610 |
1 files changed, 926 insertions, 684 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a22c86c867fa..69fa7da5e48b 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, Intel Corporation. | 2 | * Copyright © 2006-2014 Intel Corporation. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms and conditions of the GNU General Public License, | 5 | * under the terms and conditions of the GNU General Public License, |
@@ -10,15 +10,11 @@ | |||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
11 | * more details. | 11 | * more details. |
12 | * | 12 | * |
13 | * You should have received a copy of the GNU General Public License along with | 13 | * Authors: David Woodhouse <dwmw2@infradead.org>, |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | 14 | * Ashok Raj <ashok.raj@intel.com>, |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | 15 | * Shaohua Li <shaohua.li@intel.com>, |
16 | * | 16 | * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>, |
17 | * Copyright (C) 2006-2008 Intel Corporation | 17 | * Fenghua Yu <fenghua.yu@intel.com> |
18 | * Author: Ashok Raj <ashok.raj@intel.com> | ||
19 | * Author: Shaohua Li <shaohua.li@intel.com> | ||
20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | ||
21 | * Author: Fenghua Yu <fenghua.yu@intel.com> | ||
22 | */ | 18 | */ |
23 | 19 | ||
24 | #include <linux/init.h> | 20 | #include <linux/init.h> |
@@ -33,6 +29,7 @@ | |||
33 | #include <linux/dmar.h> | 29 | #include <linux/dmar.h> |
34 | #include <linux/dma-mapping.h> | 30 | #include <linux/dma-mapping.h> |
35 | #include <linux/mempool.h> | 31 | #include <linux/mempool.h> |
32 | #include <linux/memory.h> | ||
36 | #include <linux/timer.h> | 33 | #include <linux/timer.h> |
37 | #include <linux/iova.h> | 34 | #include <linux/iova.h> |
38 | #include <linux/iommu.h> | 35 | #include <linux/iommu.h> |
@@ -372,14 +369,36 @@ struct dmar_domain { | |||
372 | struct device_domain_info { | 369 | struct device_domain_info { |
373 | struct list_head link; /* link to domain siblings */ | 370 | struct list_head link; /* link to domain siblings */ |
374 | struct list_head global; /* link to global list */ | 371 | struct list_head global; /* link to global list */ |
375 | int segment; /* PCI domain */ | ||
376 | u8 bus; /* PCI bus number */ | 372 | u8 bus; /* PCI bus number */ |
377 | u8 devfn; /* PCI devfn number */ | 373 | u8 devfn; /* PCI devfn number */ |
378 | struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */ | 374 | struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ |
379 | struct intel_iommu *iommu; /* IOMMU used by this device */ | 375 | struct intel_iommu *iommu; /* IOMMU used by this device */ |
380 | struct dmar_domain *domain; /* pointer to domain */ | 376 | struct dmar_domain *domain; /* pointer to domain */ |
381 | }; | 377 | }; |
382 | 378 | ||
379 | struct dmar_rmrr_unit { | ||
380 | struct list_head list; /* list of rmrr units */ | ||
381 | struct acpi_dmar_header *hdr; /* ACPI header */ | ||
382 | u64 base_address; /* reserved base address*/ | ||
383 | u64 end_address; /* reserved end address */ | ||
384 | struct dmar_dev_scope *devices; /* target devices */ | ||
385 | int devices_cnt; /* target device count */ | ||
386 | }; | ||
387 | |||
388 | struct dmar_atsr_unit { | ||
389 | struct list_head list; /* list of ATSR units */ | ||
390 | struct acpi_dmar_header *hdr; /* ACPI header */ | ||
391 | struct dmar_dev_scope *devices; /* target devices */ | ||
392 | int devices_cnt; /* target device count */ | ||
393 | u8 include_all:1; /* include all ports */ | ||
394 | }; | ||
395 | |||
396 | static LIST_HEAD(dmar_atsr_units); | ||
397 | static LIST_HEAD(dmar_rmrr_units); | ||
398 | |||
399 | #define for_each_rmrr_units(rmrr) \ | ||
400 | list_for_each_entry(rmrr, &dmar_rmrr_units, list) | ||
401 | |||
383 | static void flush_unmaps_timeout(unsigned long data); | 402 | static void flush_unmaps_timeout(unsigned long data); |
384 | 403 | ||
385 | static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); | 404 | static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); |
@@ -389,6 +408,7 @@ struct deferred_flush_tables { | |||
389 | int next; | 408 | int next; |
390 | struct iova *iova[HIGH_WATER_MARK]; | 409 | struct iova *iova[HIGH_WATER_MARK]; |
391 | struct dmar_domain *domain[HIGH_WATER_MARK]; | 410 | struct dmar_domain *domain[HIGH_WATER_MARK]; |
411 | struct page *freelist[HIGH_WATER_MARK]; | ||
392 | }; | 412 | }; |
393 | 413 | ||
394 | static struct deferred_flush_tables *deferred_flush; | 414 | static struct deferred_flush_tables *deferred_flush; |
@@ -402,7 +422,12 @@ static LIST_HEAD(unmaps_to_do); | |||
402 | static int timer_on; | 422 | static int timer_on; |
403 | static long list_size; | 423 | static long list_size; |
404 | 424 | ||
425 | static void domain_exit(struct dmar_domain *domain); | ||
405 | static void domain_remove_dev_info(struct dmar_domain *domain); | 426 | static void domain_remove_dev_info(struct dmar_domain *domain); |
427 | static void domain_remove_one_dev_info(struct dmar_domain *domain, | ||
428 | struct device *dev); | ||
429 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, | ||
430 | struct device *dev); | ||
406 | 431 | ||
407 | #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON | 432 | #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON |
408 | int dmar_disabled = 0; | 433 | int dmar_disabled = 0; |
@@ -566,18 +591,31 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) | |||
566 | 591 | ||
567 | static void domain_update_iommu_coherency(struct dmar_domain *domain) | 592 | static void domain_update_iommu_coherency(struct dmar_domain *domain) |
568 | { | 593 | { |
569 | int i; | 594 | struct dmar_drhd_unit *drhd; |
570 | 595 | struct intel_iommu *iommu; | |
571 | i = find_first_bit(domain->iommu_bmp, g_num_of_iommus); | 596 | int i, found = 0; |
572 | 597 | ||
573 | domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0; | 598 | domain->iommu_coherency = 1; |
574 | 599 | ||
575 | for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) { | 600 | for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) { |
601 | found = 1; | ||
576 | if (!ecap_coherent(g_iommus[i]->ecap)) { | 602 | if (!ecap_coherent(g_iommus[i]->ecap)) { |
577 | domain->iommu_coherency = 0; | 603 | domain->iommu_coherency = 0; |
578 | break; | 604 | break; |
579 | } | 605 | } |
580 | } | 606 | } |
607 | if (found) | ||
608 | return; | ||
609 | |||
610 | /* No hardware attached; use lowest common denominator */ | ||
611 | rcu_read_lock(); | ||
612 | for_each_active_iommu(iommu, drhd) { | ||
613 | if (!ecap_coherent(iommu->ecap)) { | ||
614 | domain->iommu_coherency = 0; | ||
615 | break; | ||
616 | } | ||
617 | } | ||
618 | rcu_read_unlock(); | ||
581 | } | 619 | } |
582 | 620 | ||
583 | static void domain_update_iommu_snooping(struct dmar_domain *domain) | 621 | static void domain_update_iommu_snooping(struct dmar_domain *domain) |
@@ -606,12 +644,15 @@ static void domain_update_iommu_superpage(struct dmar_domain *domain) | |||
606 | } | 644 | } |
607 | 645 | ||
608 | /* set iommu_superpage to the smallest common denominator */ | 646 | /* set iommu_superpage to the smallest common denominator */ |
647 | rcu_read_lock(); | ||
609 | for_each_active_iommu(iommu, drhd) { | 648 | for_each_active_iommu(iommu, drhd) { |
610 | mask &= cap_super_page_val(iommu->cap); | 649 | mask &= cap_super_page_val(iommu->cap); |
611 | if (!mask) { | 650 | if (!mask) { |
612 | break; | 651 | break; |
613 | } | 652 | } |
614 | } | 653 | } |
654 | rcu_read_unlock(); | ||
655 | |||
615 | domain->iommu_superpage = fls(mask); | 656 | domain->iommu_superpage = fls(mask); |
616 | } | 657 | } |
617 | 658 | ||
@@ -623,32 +664,56 @@ static void domain_update_iommu_cap(struct dmar_domain *domain) | |||
623 | domain_update_iommu_superpage(domain); | 664 | domain_update_iommu_superpage(domain); |
624 | } | 665 | } |
625 | 666 | ||
626 | static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn) | 667 | static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) |
627 | { | 668 | { |
628 | struct dmar_drhd_unit *drhd = NULL; | 669 | struct dmar_drhd_unit *drhd = NULL; |
670 | struct intel_iommu *iommu; | ||
671 | struct device *tmp; | ||
672 | struct pci_dev *ptmp, *pdev = NULL; | ||
673 | u16 segment; | ||
629 | int i; | 674 | int i; |
630 | 675 | ||
631 | for_each_active_drhd_unit(drhd) { | 676 | if (dev_is_pci(dev)) { |
632 | if (segment != drhd->segment) | 677 | pdev = to_pci_dev(dev); |
678 | segment = pci_domain_nr(pdev->bus); | ||
679 | } else if (ACPI_COMPANION(dev)) | ||
680 | dev = &ACPI_COMPANION(dev)->dev; | ||
681 | |||
682 | rcu_read_lock(); | ||
683 | for_each_active_iommu(iommu, drhd) { | ||
684 | if (pdev && segment != drhd->segment) | ||
633 | continue; | 685 | continue; |
634 | 686 | ||
635 | for (i = 0; i < drhd->devices_cnt; i++) { | 687 | for_each_active_dev_scope(drhd->devices, |
636 | if (drhd->devices[i] && | 688 | drhd->devices_cnt, i, tmp) { |
637 | drhd->devices[i]->bus->number == bus && | 689 | if (tmp == dev) { |
638 | drhd->devices[i]->devfn == devfn) | 690 | *bus = drhd->devices[i].bus; |
639 | return drhd->iommu; | 691 | *devfn = drhd->devices[i].devfn; |
640 | if (drhd->devices[i] && | 692 | goto out; |
641 | drhd->devices[i]->subordinate && | 693 | } |
642 | drhd->devices[i]->subordinate->number <= bus && | 694 | |
643 | drhd->devices[i]->subordinate->busn_res.end >= bus) | 695 | if (!pdev || !dev_is_pci(tmp)) |
644 | return drhd->iommu; | 696 | continue; |
697 | |||
698 | ptmp = to_pci_dev(tmp); | ||
699 | if (ptmp->subordinate && | ||
700 | ptmp->subordinate->number <= pdev->bus->number && | ||
701 | ptmp->subordinate->busn_res.end >= pdev->bus->number) | ||
702 | goto got_pdev; | ||
645 | } | 703 | } |
646 | 704 | ||
647 | if (drhd->include_all) | 705 | if (pdev && drhd->include_all) { |
648 | return drhd->iommu; | 706 | got_pdev: |
707 | *bus = pdev->bus->number; | ||
708 | *devfn = pdev->devfn; | ||
709 | goto out; | ||
710 | } | ||
649 | } | 711 | } |
712 | iommu = NULL; | ||
713 | out: | ||
714 | rcu_read_unlock(); | ||
650 | 715 | ||
651 | return NULL; | 716 | return iommu; |
652 | } | 717 | } |
653 | 718 | ||
654 | static void domain_flush_cache(struct dmar_domain *domain, | 719 | static void domain_flush_cache(struct dmar_domain *domain, |
@@ -748,7 +813,7 @@ out: | |||
748 | } | 813 | } |
749 | 814 | ||
750 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | 815 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, |
751 | unsigned long pfn, int target_level) | 816 | unsigned long pfn, int *target_level) |
752 | { | 817 | { |
753 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | 818 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
754 | struct dma_pte *parent, *pte = NULL; | 819 | struct dma_pte *parent, *pte = NULL; |
@@ -763,14 +828,14 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | |||
763 | 828 | ||
764 | parent = domain->pgd; | 829 | parent = domain->pgd; |
765 | 830 | ||
766 | while (level > 0) { | 831 | while (1) { |
767 | void *tmp_page; | 832 | void *tmp_page; |
768 | 833 | ||
769 | offset = pfn_level_offset(pfn, level); | 834 | offset = pfn_level_offset(pfn, level); |
770 | pte = &parent[offset]; | 835 | pte = &parent[offset]; |
771 | if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte))) | 836 | if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte))) |
772 | break; | 837 | break; |
773 | if (level == target_level) | 838 | if (level == *target_level) |
774 | break; | 839 | break; |
775 | 840 | ||
776 | if (!dma_pte_present(pte)) { | 841 | if (!dma_pte_present(pte)) { |
@@ -791,10 +856,16 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | |||
791 | domain_flush_cache(domain, pte, sizeof(*pte)); | 856 | domain_flush_cache(domain, pte, sizeof(*pte)); |
792 | } | 857 | } |
793 | } | 858 | } |
859 | if (level == 1) | ||
860 | break; | ||
861 | |||
794 | parent = phys_to_virt(dma_pte_addr(pte)); | 862 | parent = phys_to_virt(dma_pte_addr(pte)); |
795 | level--; | 863 | level--; |
796 | } | 864 | } |
797 | 865 | ||
866 | if (!*target_level) | ||
867 | *target_level = level; | ||
868 | |||
798 | return pte; | 869 | return pte; |
799 | } | 870 | } |
800 | 871 | ||
@@ -832,7 +903,7 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, | |||
832 | } | 903 | } |
833 | 904 | ||
834 | /* clear last level pte, a tlb flush should be followed */ | 905 | /* clear last level pte, a tlb flush should be followed */ |
835 | static int dma_pte_clear_range(struct dmar_domain *domain, | 906 | static void dma_pte_clear_range(struct dmar_domain *domain, |
836 | unsigned long start_pfn, | 907 | unsigned long start_pfn, |
837 | unsigned long last_pfn) | 908 | unsigned long last_pfn) |
838 | { | 909 | { |
@@ -862,8 +933,6 @@ static int dma_pte_clear_range(struct dmar_domain *domain, | |||
862 | (void *)pte - (void *)first_pte); | 933 | (void *)pte - (void *)first_pte); |
863 | 934 | ||
864 | } while (start_pfn && start_pfn <= last_pfn); | 935 | } while (start_pfn && start_pfn <= last_pfn); |
865 | |||
866 | return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH); | ||
867 | } | 936 | } |
868 | 937 | ||
869 | static void dma_pte_free_level(struct dmar_domain *domain, int level, | 938 | static void dma_pte_free_level(struct dmar_domain *domain, int level, |
@@ -921,6 +990,123 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, | |||
921 | } | 990 | } |
922 | } | 991 | } |
923 | 992 | ||
993 | /* When a page at a given level is being unlinked from its parent, we don't | ||
994 | need to *modify* it at all. All we need to do is make a list of all the | ||
995 | pages which can be freed just as soon as we've flushed the IOTLB and we | ||
996 | know the hardware page-walk will no longer touch them. | ||
997 | The 'pte' argument is the *parent* PTE, pointing to the page that is to | ||
998 | be freed. */ | ||
999 | static struct page *dma_pte_list_pagetables(struct dmar_domain *domain, | ||
1000 | int level, struct dma_pte *pte, | ||
1001 | struct page *freelist) | ||
1002 | { | ||
1003 | struct page *pg; | ||
1004 | |||
1005 | pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT); | ||
1006 | pg->freelist = freelist; | ||
1007 | freelist = pg; | ||
1008 | |||
1009 | if (level == 1) | ||
1010 | return freelist; | ||
1011 | |||
1012 | for (pte = page_address(pg); !first_pte_in_page(pte); pte++) { | ||
1013 | if (dma_pte_present(pte) && !dma_pte_superpage(pte)) | ||
1014 | freelist = dma_pte_list_pagetables(domain, level - 1, | ||
1015 | pte, freelist); | ||
1016 | } | ||
1017 | |||
1018 | return freelist; | ||
1019 | } | ||
1020 | |||
1021 | static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, | ||
1022 | struct dma_pte *pte, unsigned long pfn, | ||
1023 | unsigned long start_pfn, | ||
1024 | unsigned long last_pfn, | ||
1025 | struct page *freelist) | ||
1026 | { | ||
1027 | struct dma_pte *first_pte = NULL, *last_pte = NULL; | ||
1028 | |||
1029 | pfn = max(start_pfn, pfn); | ||
1030 | pte = &pte[pfn_level_offset(pfn, level)]; | ||
1031 | |||
1032 | do { | ||
1033 | unsigned long level_pfn; | ||
1034 | |||
1035 | if (!dma_pte_present(pte)) | ||
1036 | goto next; | ||
1037 | |||
1038 | level_pfn = pfn & level_mask(level); | ||
1039 | |||
1040 | /* If range covers entire pagetable, free it */ | ||
1041 | if (start_pfn <= level_pfn && | ||
1042 | last_pfn >= level_pfn + level_size(level) - 1) { | ||
1043 | /* These suborbinate page tables are going away entirely. Don't | ||
1044 | bother to clear them; we're just going to *free* them. */ | ||
1045 | if (level > 1 && !dma_pte_superpage(pte)) | ||
1046 | freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist); | ||
1047 | |||
1048 | dma_clear_pte(pte); | ||
1049 | if (!first_pte) | ||
1050 | first_pte = pte; | ||
1051 | last_pte = pte; | ||
1052 | } else if (level > 1) { | ||
1053 | /* Recurse down into a level that isn't *entirely* obsolete */ | ||
1054 | freelist = dma_pte_clear_level(domain, level - 1, | ||
1055 | phys_to_virt(dma_pte_addr(pte)), | ||
1056 | level_pfn, start_pfn, last_pfn, | ||
1057 | freelist); | ||
1058 | } | ||
1059 | next: | ||
1060 | pfn += level_size(level); | ||
1061 | } while (!first_pte_in_page(++pte) && pfn <= last_pfn); | ||
1062 | |||
1063 | if (first_pte) | ||
1064 | domain_flush_cache(domain, first_pte, | ||
1065 | (void *)++last_pte - (void *)first_pte); | ||
1066 | |||
1067 | return freelist; | ||
1068 | } | ||
1069 | |||
1070 | /* We can't just free the pages because the IOMMU may still be walking | ||
1071 | the page tables, and may have cached the intermediate levels. The | ||
1072 | pages can only be freed after the IOTLB flush has been done. */ | ||
1073 | struct page *domain_unmap(struct dmar_domain *domain, | ||
1074 | unsigned long start_pfn, | ||
1075 | unsigned long last_pfn) | ||
1076 | { | ||
1077 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | ||
1078 | struct page *freelist = NULL; | ||
1079 | |||
1080 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); | ||
1081 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); | ||
1082 | BUG_ON(start_pfn > last_pfn); | ||
1083 | |||
1084 | /* we don't need lock here; nobody else touches the iova range */ | ||
1085 | freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw), | ||
1086 | domain->pgd, 0, start_pfn, last_pfn, NULL); | ||
1087 | |||
1088 | /* free pgd */ | ||
1089 | if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { | ||
1090 | struct page *pgd_page = virt_to_page(domain->pgd); | ||
1091 | pgd_page->freelist = freelist; | ||
1092 | freelist = pgd_page; | ||
1093 | |||
1094 | domain->pgd = NULL; | ||
1095 | } | ||
1096 | |||
1097 | return freelist; | ||
1098 | } | ||
1099 | |||
1100 | void dma_free_pagelist(struct page *freelist) | ||
1101 | { | ||
1102 | struct page *pg; | ||
1103 | |||
1104 | while ((pg = freelist)) { | ||
1105 | freelist = pg->freelist; | ||
1106 | free_pgtable_page(page_address(pg)); | ||
1107 | } | ||
1108 | } | ||
1109 | |||
924 | /* iommu handling */ | 1110 | /* iommu handling */ |
925 | static int iommu_alloc_root_entry(struct intel_iommu *iommu) | 1111 | static int iommu_alloc_root_entry(struct intel_iommu *iommu) |
926 | { | 1112 | { |
@@ -1030,7 +1216,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, | |||
1030 | break; | 1216 | break; |
1031 | case DMA_TLB_PSI_FLUSH: | 1217 | case DMA_TLB_PSI_FLUSH: |
1032 | val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did); | 1218 | val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did); |
1033 | /* Note: always flush non-leaf currently */ | 1219 | /* IH bit is passed in as part of address */ |
1034 | val_iva = size_order | addr; | 1220 | val_iva = size_order | addr; |
1035 | break; | 1221 | break; |
1036 | default: | 1222 | default: |
@@ -1069,13 +1255,14 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, | |||
1069 | (unsigned long long)DMA_TLB_IAIG(val)); | 1255 | (unsigned long long)DMA_TLB_IAIG(val)); |
1070 | } | 1256 | } |
1071 | 1257 | ||
1072 | static struct device_domain_info *iommu_support_dev_iotlb( | 1258 | static struct device_domain_info * |
1073 | struct dmar_domain *domain, int segment, u8 bus, u8 devfn) | 1259 | iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, |
1260 | u8 bus, u8 devfn) | ||
1074 | { | 1261 | { |
1075 | int found = 0; | 1262 | int found = 0; |
1076 | unsigned long flags; | 1263 | unsigned long flags; |
1077 | struct device_domain_info *info; | 1264 | struct device_domain_info *info; |
1078 | struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn); | 1265 | struct pci_dev *pdev; |
1079 | 1266 | ||
1080 | if (!ecap_dev_iotlb_support(iommu->ecap)) | 1267 | if (!ecap_dev_iotlb_support(iommu->ecap)) |
1081 | return NULL; | 1268 | return NULL; |
@@ -1091,34 +1278,35 @@ static struct device_domain_info *iommu_support_dev_iotlb( | |||
1091 | } | 1278 | } |
1092 | spin_unlock_irqrestore(&device_domain_lock, flags); | 1279 | spin_unlock_irqrestore(&device_domain_lock, flags); |
1093 | 1280 | ||
1094 | if (!found || !info->dev) | 1281 | if (!found || !info->dev || !dev_is_pci(info->dev)) |
1095 | return NULL; | 1282 | return NULL; |
1096 | 1283 | ||
1097 | if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS)) | 1284 | pdev = to_pci_dev(info->dev); |
1098 | return NULL; | ||
1099 | 1285 | ||
1100 | if (!dmar_find_matched_atsr_unit(info->dev)) | 1286 | if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS)) |
1101 | return NULL; | 1287 | return NULL; |
1102 | 1288 | ||
1103 | info->iommu = iommu; | 1289 | if (!dmar_find_matched_atsr_unit(pdev)) |
1290 | return NULL; | ||
1104 | 1291 | ||
1105 | return info; | 1292 | return info; |
1106 | } | 1293 | } |
1107 | 1294 | ||
1108 | static void iommu_enable_dev_iotlb(struct device_domain_info *info) | 1295 | static void iommu_enable_dev_iotlb(struct device_domain_info *info) |
1109 | { | 1296 | { |
1110 | if (!info) | 1297 | if (!info || !dev_is_pci(info->dev)) |
1111 | return; | 1298 | return; |
1112 | 1299 | ||
1113 | pci_enable_ats(info->dev, VTD_PAGE_SHIFT); | 1300 | pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT); |
1114 | } | 1301 | } |
1115 | 1302 | ||
1116 | static void iommu_disable_dev_iotlb(struct device_domain_info *info) | 1303 | static void iommu_disable_dev_iotlb(struct device_domain_info *info) |
1117 | { | 1304 | { |
1118 | if (!info->dev || !pci_ats_enabled(info->dev)) | 1305 | if (!info->dev || !dev_is_pci(info->dev) || |
1306 | !pci_ats_enabled(to_pci_dev(info->dev))) | ||
1119 | return; | 1307 | return; |
1120 | 1308 | ||
1121 | pci_disable_ats(info->dev); | 1309 | pci_disable_ats(to_pci_dev(info->dev)); |
1122 | } | 1310 | } |
1123 | 1311 | ||
1124 | static void iommu_flush_dev_iotlb(struct dmar_domain *domain, | 1312 | static void iommu_flush_dev_iotlb(struct dmar_domain *domain, |
@@ -1130,24 +1318,31 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, | |||
1130 | 1318 | ||
1131 | spin_lock_irqsave(&device_domain_lock, flags); | 1319 | spin_lock_irqsave(&device_domain_lock, flags); |
1132 | list_for_each_entry(info, &domain->devices, link) { | 1320 | list_for_each_entry(info, &domain->devices, link) { |
1133 | if (!info->dev || !pci_ats_enabled(info->dev)) | 1321 | struct pci_dev *pdev; |
1322 | if (!info->dev || !dev_is_pci(info->dev)) | ||
1323 | continue; | ||
1324 | |||
1325 | pdev = to_pci_dev(info->dev); | ||
1326 | if (!pci_ats_enabled(pdev)) | ||
1134 | continue; | 1327 | continue; |
1135 | 1328 | ||
1136 | sid = info->bus << 8 | info->devfn; | 1329 | sid = info->bus << 8 | info->devfn; |
1137 | qdep = pci_ats_queue_depth(info->dev); | 1330 | qdep = pci_ats_queue_depth(pdev); |
1138 | qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); | 1331 | qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); |
1139 | } | 1332 | } |
1140 | spin_unlock_irqrestore(&device_domain_lock, flags); | 1333 | spin_unlock_irqrestore(&device_domain_lock, flags); |
1141 | } | 1334 | } |
1142 | 1335 | ||
1143 | static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, | 1336 | static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, |
1144 | unsigned long pfn, unsigned int pages, int map) | 1337 | unsigned long pfn, unsigned int pages, int ih, int map) |
1145 | { | 1338 | { |
1146 | unsigned int mask = ilog2(__roundup_pow_of_two(pages)); | 1339 | unsigned int mask = ilog2(__roundup_pow_of_two(pages)); |
1147 | uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; | 1340 | uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; |
1148 | 1341 | ||
1149 | BUG_ON(pages == 0); | 1342 | BUG_ON(pages == 0); |
1150 | 1343 | ||
1344 | if (ih) | ||
1345 | ih = 1 << 6; | ||
1151 | /* | 1346 | /* |
1152 | * Fallback to domain selective flush if no PSI support or the size is | 1347 | * Fallback to domain selective flush if no PSI support or the size is |
1153 | * too big. | 1348 | * too big. |
@@ -1158,7 +1353,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, | |||
1158 | iommu->flush.flush_iotlb(iommu, did, 0, 0, | 1353 | iommu->flush.flush_iotlb(iommu, did, 0, 0, |
1159 | DMA_TLB_DSI_FLUSH); | 1354 | DMA_TLB_DSI_FLUSH); |
1160 | else | 1355 | else |
1161 | iommu->flush.flush_iotlb(iommu, did, addr, mask, | 1356 | iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, |
1162 | DMA_TLB_PSI_FLUSH); | 1357 | DMA_TLB_PSI_FLUSH); |
1163 | 1358 | ||
1164 | /* | 1359 | /* |
@@ -1261,10 +1456,6 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
1261 | return 0; | 1456 | return 0; |
1262 | } | 1457 | } |
1263 | 1458 | ||
1264 | |||
1265 | static void domain_exit(struct dmar_domain *domain); | ||
1266 | static void vm_domain_exit(struct dmar_domain *domain); | ||
1267 | |||
1268 | static void free_dmar_iommu(struct intel_iommu *iommu) | 1459 | static void free_dmar_iommu(struct intel_iommu *iommu) |
1269 | { | 1460 | { |
1270 | struct dmar_domain *domain; | 1461 | struct dmar_domain *domain; |
@@ -1273,18 +1464,21 @@ static void free_dmar_iommu(struct intel_iommu *iommu) | |||
1273 | 1464 | ||
1274 | if ((iommu->domains) && (iommu->domain_ids)) { | 1465 | if ((iommu->domains) && (iommu->domain_ids)) { |
1275 | for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) { | 1466 | for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) { |
1467 | /* | ||
1468 | * Domain id 0 is reserved for invalid translation | ||
1469 | * if hardware supports caching mode. | ||
1470 | */ | ||
1471 | if (cap_caching_mode(iommu->cap) && i == 0) | ||
1472 | continue; | ||
1473 | |||
1276 | domain = iommu->domains[i]; | 1474 | domain = iommu->domains[i]; |
1277 | clear_bit(i, iommu->domain_ids); | 1475 | clear_bit(i, iommu->domain_ids); |
1278 | 1476 | ||
1279 | spin_lock_irqsave(&domain->iommu_lock, flags); | 1477 | spin_lock_irqsave(&domain->iommu_lock, flags); |
1280 | count = --domain->iommu_count; | 1478 | count = --domain->iommu_count; |
1281 | spin_unlock_irqrestore(&domain->iommu_lock, flags); | 1479 | spin_unlock_irqrestore(&domain->iommu_lock, flags); |
1282 | if (count == 0) { | 1480 | if (count == 0) |
1283 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) | 1481 | domain_exit(domain); |
1284 | vm_domain_exit(domain); | ||
1285 | else | ||
1286 | domain_exit(domain); | ||
1287 | } | ||
1288 | } | 1482 | } |
1289 | } | 1483 | } |
1290 | 1484 | ||
@@ -1298,21 +1492,14 @@ static void free_dmar_iommu(struct intel_iommu *iommu) | |||
1298 | 1492 | ||
1299 | g_iommus[iommu->seq_id] = NULL; | 1493 | g_iommus[iommu->seq_id] = NULL; |
1300 | 1494 | ||
1301 | /* if all iommus are freed, free g_iommus */ | ||
1302 | for (i = 0; i < g_num_of_iommus; i++) { | ||
1303 | if (g_iommus[i]) | ||
1304 | break; | ||
1305 | } | ||
1306 | |||
1307 | if (i == g_num_of_iommus) | ||
1308 | kfree(g_iommus); | ||
1309 | |||
1310 | /* free context mapping */ | 1495 | /* free context mapping */ |
1311 | free_context_table(iommu); | 1496 | free_context_table(iommu); |
1312 | } | 1497 | } |
1313 | 1498 | ||
1314 | static struct dmar_domain *alloc_domain(void) | 1499 | static struct dmar_domain *alloc_domain(bool vm) |
1315 | { | 1500 | { |
1501 | /* domain id for virtual machine, it won't be set in context */ | ||
1502 | static atomic_t vm_domid = ATOMIC_INIT(0); | ||
1316 | struct dmar_domain *domain; | 1503 | struct dmar_domain *domain; |
1317 | 1504 | ||
1318 | domain = alloc_domain_mem(); | 1505 | domain = alloc_domain_mem(); |
@@ -1320,8 +1507,15 @@ static struct dmar_domain *alloc_domain(void) | |||
1320 | return NULL; | 1507 | return NULL; |
1321 | 1508 | ||
1322 | domain->nid = -1; | 1509 | domain->nid = -1; |
1510 | domain->iommu_count = 0; | ||
1323 | memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp)); | 1511 | memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp)); |
1324 | domain->flags = 0; | 1512 | domain->flags = 0; |
1513 | spin_lock_init(&domain->iommu_lock); | ||
1514 | INIT_LIST_HEAD(&domain->devices); | ||
1515 | if (vm) { | ||
1516 | domain->id = atomic_inc_return(&vm_domid); | ||
1517 | domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; | ||
1518 | } | ||
1325 | 1519 | ||
1326 | return domain; | 1520 | return domain; |
1327 | } | 1521 | } |
@@ -1345,6 +1539,7 @@ static int iommu_attach_domain(struct dmar_domain *domain, | |||
1345 | } | 1539 | } |
1346 | 1540 | ||
1347 | domain->id = num; | 1541 | domain->id = num; |
1542 | domain->iommu_count++; | ||
1348 | set_bit(num, iommu->domain_ids); | 1543 | set_bit(num, iommu->domain_ids); |
1349 | set_bit(iommu->seq_id, domain->iommu_bmp); | 1544 | set_bit(iommu->seq_id, domain->iommu_bmp); |
1350 | iommu->domains[num] = domain; | 1545 | iommu->domains[num] = domain; |
@@ -1358,22 +1553,16 @@ static void iommu_detach_domain(struct dmar_domain *domain, | |||
1358 | { | 1553 | { |
1359 | unsigned long flags; | 1554 | unsigned long flags; |
1360 | int num, ndomains; | 1555 | int num, ndomains; |
1361 | int found = 0; | ||
1362 | 1556 | ||
1363 | spin_lock_irqsave(&iommu->lock, flags); | 1557 | spin_lock_irqsave(&iommu->lock, flags); |
1364 | ndomains = cap_ndoms(iommu->cap); | 1558 | ndomains = cap_ndoms(iommu->cap); |
1365 | for_each_set_bit(num, iommu->domain_ids, ndomains) { | 1559 | for_each_set_bit(num, iommu->domain_ids, ndomains) { |
1366 | if (iommu->domains[num] == domain) { | 1560 | if (iommu->domains[num] == domain) { |
1367 | found = 1; | 1561 | clear_bit(num, iommu->domain_ids); |
1562 | iommu->domains[num] = NULL; | ||
1368 | break; | 1563 | break; |
1369 | } | 1564 | } |
1370 | } | 1565 | } |
1371 | |||
1372 | if (found) { | ||
1373 | clear_bit(num, iommu->domain_ids); | ||
1374 | clear_bit(iommu->seq_id, domain->iommu_bmp); | ||
1375 | iommu->domains[num] = NULL; | ||
1376 | } | ||
1377 | spin_unlock_irqrestore(&iommu->lock, flags); | 1566 | spin_unlock_irqrestore(&iommu->lock, flags); |
1378 | } | 1567 | } |
1379 | 1568 | ||
@@ -1445,8 +1634,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1445 | unsigned long sagaw; | 1634 | unsigned long sagaw; |
1446 | 1635 | ||
1447 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); | 1636 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); |
1448 | spin_lock_init(&domain->iommu_lock); | ||
1449 | |||
1450 | domain_reserve_special_ranges(domain); | 1637 | domain_reserve_special_ranges(domain); |
1451 | 1638 | ||
1452 | /* calculate AGAW */ | 1639 | /* calculate AGAW */ |
@@ -1465,7 +1652,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1465 | return -ENODEV; | 1652 | return -ENODEV; |
1466 | } | 1653 | } |
1467 | domain->agaw = agaw; | 1654 | domain->agaw = agaw; |
1468 | INIT_LIST_HEAD(&domain->devices); | ||
1469 | 1655 | ||
1470 | if (ecap_coherent(iommu->ecap)) | 1656 | if (ecap_coherent(iommu->ecap)) |
1471 | domain->iommu_coherency = 1; | 1657 | domain->iommu_coherency = 1; |
@@ -1477,8 +1663,11 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1477 | else | 1663 | else |
1478 | domain->iommu_snooping = 0; | 1664 | domain->iommu_snooping = 0; |
1479 | 1665 | ||
1480 | domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); | 1666 | if (intel_iommu_superpage) |
1481 | domain->iommu_count = 1; | 1667 | domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); |
1668 | else | ||
1669 | domain->iommu_superpage = 0; | ||
1670 | |||
1482 | domain->nid = iommu->node; | 1671 | domain->nid = iommu->node; |
1483 | 1672 | ||
1484 | /* always allocate the top pgd */ | 1673 | /* always allocate the top pgd */ |
@@ -1493,6 +1682,7 @@ static void domain_exit(struct dmar_domain *domain) | |||
1493 | { | 1682 | { |
1494 | struct dmar_drhd_unit *drhd; | 1683 | struct dmar_drhd_unit *drhd; |
1495 | struct intel_iommu *iommu; | 1684 | struct intel_iommu *iommu; |
1685 | struct page *freelist = NULL; | ||
1496 | 1686 | ||
1497 | /* Domain 0 is reserved, so dont process it */ | 1687 | /* Domain 0 is reserved, so dont process it */ |
1498 | if (!domain) | 1688 | if (!domain) |
@@ -1502,29 +1692,33 @@ static void domain_exit(struct dmar_domain *domain) | |||
1502 | if (!intel_iommu_strict) | 1692 | if (!intel_iommu_strict) |
1503 | flush_unmaps_timeout(0); | 1693 | flush_unmaps_timeout(0); |
1504 | 1694 | ||
1695 | /* remove associated devices */ | ||
1505 | domain_remove_dev_info(domain); | 1696 | domain_remove_dev_info(domain); |
1697 | |||
1506 | /* destroy iovas */ | 1698 | /* destroy iovas */ |
1507 | put_iova_domain(&domain->iovad); | 1699 | put_iova_domain(&domain->iovad); |
1508 | 1700 | ||
1509 | /* clear ptes */ | 1701 | freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); |
1510 | dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); | ||
1511 | |||
1512 | /* free page tables */ | ||
1513 | dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); | ||
1514 | 1702 | ||
1703 | /* clear attached or cached domains */ | ||
1704 | rcu_read_lock(); | ||
1515 | for_each_active_iommu(iommu, drhd) | 1705 | for_each_active_iommu(iommu, drhd) |
1516 | if (test_bit(iommu->seq_id, domain->iommu_bmp)) | 1706 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || |
1707 | test_bit(iommu->seq_id, domain->iommu_bmp)) | ||
1517 | iommu_detach_domain(domain, iommu); | 1708 | iommu_detach_domain(domain, iommu); |
1709 | rcu_read_unlock(); | ||
1710 | |||
1711 | dma_free_pagelist(freelist); | ||
1518 | 1712 | ||
1519 | free_domain_mem(domain); | 1713 | free_domain_mem(domain); |
1520 | } | 1714 | } |
1521 | 1715 | ||
1522 | static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | 1716 | static int domain_context_mapping_one(struct dmar_domain *domain, |
1523 | u8 bus, u8 devfn, int translation) | 1717 | struct intel_iommu *iommu, |
1718 | u8 bus, u8 devfn, int translation) | ||
1524 | { | 1719 | { |
1525 | struct context_entry *context; | 1720 | struct context_entry *context; |
1526 | unsigned long flags; | 1721 | unsigned long flags; |
1527 | struct intel_iommu *iommu; | ||
1528 | struct dma_pte *pgd; | 1722 | struct dma_pte *pgd; |
1529 | unsigned long num; | 1723 | unsigned long num; |
1530 | unsigned long ndomains; | 1724 | unsigned long ndomains; |
@@ -1539,10 +1733,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
1539 | BUG_ON(translation != CONTEXT_TT_PASS_THROUGH && | 1733 | BUG_ON(translation != CONTEXT_TT_PASS_THROUGH && |
1540 | translation != CONTEXT_TT_MULTI_LEVEL); | 1734 | translation != CONTEXT_TT_MULTI_LEVEL); |
1541 | 1735 | ||
1542 | iommu = device_to_iommu(segment, bus, devfn); | ||
1543 | if (!iommu) | ||
1544 | return -ENODEV; | ||
1545 | |||
1546 | context = device_to_context_entry(iommu, bus, devfn); | 1736 | context = device_to_context_entry(iommu, bus, devfn); |
1547 | if (!context) | 1737 | if (!context) |
1548 | return -ENOMEM; | 1738 | return -ENOMEM; |
@@ -1600,7 +1790,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
1600 | context_set_domain_id(context, id); | 1790 | context_set_domain_id(context, id); |
1601 | 1791 | ||
1602 | if (translation != CONTEXT_TT_PASS_THROUGH) { | 1792 | if (translation != CONTEXT_TT_PASS_THROUGH) { |
1603 | info = iommu_support_dev_iotlb(domain, segment, bus, devfn); | 1793 | info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); |
1604 | translation = info ? CONTEXT_TT_DEV_IOTLB : | 1794 | translation = info ? CONTEXT_TT_DEV_IOTLB : |
1605 | CONTEXT_TT_MULTI_LEVEL; | 1795 | CONTEXT_TT_MULTI_LEVEL; |
1606 | } | 1796 | } |
@@ -1650,27 +1840,32 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
1650 | } | 1840 | } |
1651 | 1841 | ||
1652 | static int | 1842 | static int |
1653 | domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev, | 1843 | domain_context_mapping(struct dmar_domain *domain, struct device *dev, |
1654 | int translation) | 1844 | int translation) |
1655 | { | 1845 | { |
1656 | int ret; | 1846 | int ret; |
1657 | struct pci_dev *tmp, *parent; | 1847 | struct pci_dev *pdev, *tmp, *parent; |
1848 | struct intel_iommu *iommu; | ||
1849 | u8 bus, devfn; | ||
1850 | |||
1851 | iommu = device_to_iommu(dev, &bus, &devfn); | ||
1852 | if (!iommu) | ||
1853 | return -ENODEV; | ||
1658 | 1854 | ||
1659 | ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus), | 1855 | ret = domain_context_mapping_one(domain, iommu, bus, devfn, |
1660 | pdev->bus->number, pdev->devfn, | ||
1661 | translation); | 1856 | translation); |
1662 | if (ret) | 1857 | if (ret || !dev_is_pci(dev)) |
1663 | return ret; | 1858 | return ret; |
1664 | 1859 | ||
1665 | /* dependent device mapping */ | 1860 | /* dependent device mapping */ |
1861 | pdev = to_pci_dev(dev); | ||
1666 | tmp = pci_find_upstream_pcie_bridge(pdev); | 1862 | tmp = pci_find_upstream_pcie_bridge(pdev); |
1667 | if (!tmp) | 1863 | if (!tmp) |
1668 | return 0; | 1864 | return 0; |
1669 | /* Secondary interface's bus number and devfn 0 */ | 1865 | /* Secondary interface's bus number and devfn 0 */ |
1670 | parent = pdev->bus->self; | 1866 | parent = pdev->bus->self; |
1671 | while (parent != tmp) { | 1867 | while (parent != tmp) { |
1672 | ret = domain_context_mapping_one(domain, | 1868 | ret = domain_context_mapping_one(domain, iommu, |
1673 | pci_domain_nr(parent->bus), | ||
1674 | parent->bus->number, | 1869 | parent->bus->number, |
1675 | parent->devfn, translation); | 1870 | parent->devfn, translation); |
1676 | if (ret) | 1871 | if (ret) |
@@ -1678,33 +1873,33 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev, | |||
1678 | parent = parent->bus->self; | 1873 | parent = parent->bus->self; |
1679 | } | 1874 | } |
1680 | if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */ | 1875 | if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */ |
1681 | return domain_context_mapping_one(domain, | 1876 | return domain_context_mapping_one(domain, iommu, |
1682 | pci_domain_nr(tmp->subordinate), | ||
1683 | tmp->subordinate->number, 0, | 1877 | tmp->subordinate->number, 0, |
1684 | translation); | 1878 | translation); |
1685 | else /* this is a legacy PCI bridge */ | 1879 | else /* this is a legacy PCI bridge */ |
1686 | return domain_context_mapping_one(domain, | 1880 | return domain_context_mapping_one(domain, iommu, |
1687 | pci_domain_nr(tmp->bus), | ||
1688 | tmp->bus->number, | 1881 | tmp->bus->number, |
1689 | tmp->devfn, | 1882 | tmp->devfn, |
1690 | translation); | 1883 | translation); |
1691 | } | 1884 | } |
1692 | 1885 | ||
1693 | static int domain_context_mapped(struct pci_dev *pdev) | 1886 | static int domain_context_mapped(struct device *dev) |
1694 | { | 1887 | { |
1695 | int ret; | 1888 | int ret; |
1696 | struct pci_dev *tmp, *parent; | 1889 | struct pci_dev *pdev, *tmp, *parent; |
1697 | struct intel_iommu *iommu; | 1890 | struct intel_iommu *iommu; |
1891 | u8 bus, devfn; | ||
1698 | 1892 | ||
1699 | iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, | 1893 | iommu = device_to_iommu(dev, &bus, &devfn); |
1700 | pdev->devfn); | ||
1701 | if (!iommu) | 1894 | if (!iommu) |
1702 | return -ENODEV; | 1895 | return -ENODEV; |
1703 | 1896 | ||
1704 | ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn); | 1897 | ret = device_context_mapped(iommu, bus, devfn); |
1705 | if (!ret) | 1898 | if (!ret || !dev_is_pci(dev)) |
1706 | return ret; | 1899 | return ret; |
1900 | |||
1707 | /* dependent device mapping */ | 1901 | /* dependent device mapping */ |
1902 | pdev = to_pci_dev(dev); | ||
1708 | tmp = pci_find_upstream_pcie_bridge(pdev); | 1903 | tmp = pci_find_upstream_pcie_bridge(pdev); |
1709 | if (!tmp) | 1904 | if (!tmp) |
1710 | return ret; | 1905 | return ret; |
@@ -1800,7 +1995,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | |||
1800 | if (!pte) { | 1995 | if (!pte) { |
1801 | largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); | 1996 | largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); |
1802 | 1997 | ||
1803 | first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl); | 1998 | first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl); |
1804 | if (!pte) | 1999 | if (!pte) |
1805 | return -ENOMEM; | 2000 | return -ENOMEM; |
1806 | /* It is large page*/ | 2001 | /* It is large page*/ |
@@ -1899,14 +2094,13 @@ static inline void unlink_domain_info(struct device_domain_info *info) | |||
1899 | list_del(&info->link); | 2094 | list_del(&info->link); |
1900 | list_del(&info->global); | 2095 | list_del(&info->global); |
1901 | if (info->dev) | 2096 | if (info->dev) |
1902 | info->dev->dev.archdata.iommu = NULL; | 2097 | info->dev->archdata.iommu = NULL; |
1903 | } | 2098 | } |
1904 | 2099 | ||
1905 | static void domain_remove_dev_info(struct dmar_domain *domain) | 2100 | static void domain_remove_dev_info(struct dmar_domain *domain) |
1906 | { | 2101 | { |
1907 | struct device_domain_info *info; | 2102 | struct device_domain_info *info; |
1908 | unsigned long flags; | 2103 | unsigned long flags, flags2; |
1909 | struct intel_iommu *iommu; | ||
1910 | 2104 | ||
1911 | spin_lock_irqsave(&device_domain_lock, flags); | 2105 | spin_lock_irqsave(&device_domain_lock, flags); |
1912 | while (!list_empty(&domain->devices)) { | 2106 | while (!list_empty(&domain->devices)) { |
@@ -1916,10 +2110,23 @@ static void domain_remove_dev_info(struct dmar_domain *domain) | |||
1916 | spin_unlock_irqrestore(&device_domain_lock, flags); | 2110 | spin_unlock_irqrestore(&device_domain_lock, flags); |
1917 | 2111 | ||
1918 | iommu_disable_dev_iotlb(info); | 2112 | iommu_disable_dev_iotlb(info); |
1919 | iommu = device_to_iommu(info->segment, info->bus, info->devfn); | 2113 | iommu_detach_dev(info->iommu, info->bus, info->devfn); |
1920 | iommu_detach_dev(iommu, info->bus, info->devfn); | ||
1921 | free_devinfo_mem(info); | ||
1922 | 2114 | ||
2115 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) { | ||
2116 | iommu_detach_dependent_devices(info->iommu, info->dev); | ||
2117 | /* clear this iommu in iommu_bmp, update iommu count | ||
2118 | * and capabilities | ||
2119 | */ | ||
2120 | spin_lock_irqsave(&domain->iommu_lock, flags2); | ||
2121 | if (test_and_clear_bit(info->iommu->seq_id, | ||
2122 | domain->iommu_bmp)) { | ||
2123 | domain->iommu_count--; | ||
2124 | domain_update_iommu_cap(domain); | ||
2125 | } | ||
2126 | spin_unlock_irqrestore(&domain->iommu_lock, flags2); | ||
2127 | } | ||
2128 | |||
2129 | free_devinfo_mem(info); | ||
1923 | spin_lock_irqsave(&device_domain_lock, flags); | 2130 | spin_lock_irqsave(&device_domain_lock, flags); |
1924 | } | 2131 | } |
1925 | spin_unlock_irqrestore(&device_domain_lock, flags); | 2132 | spin_unlock_irqrestore(&device_domain_lock, flags); |
@@ -1927,155 +2134,151 @@ static void domain_remove_dev_info(struct dmar_domain *domain) | |||
1927 | 2134 | ||
1928 | /* | 2135 | /* |
1929 | * find_domain | 2136 | * find_domain |
1930 | * Note: we use struct pci_dev->dev.archdata.iommu stores the info | 2137 | * Note: we use struct device->archdata.iommu stores the info |
1931 | */ | 2138 | */ |
1932 | static struct dmar_domain * | 2139 | static struct dmar_domain *find_domain(struct device *dev) |
1933 | find_domain(struct pci_dev *pdev) | ||
1934 | { | 2140 | { |
1935 | struct device_domain_info *info; | 2141 | struct device_domain_info *info; |
1936 | 2142 | ||
1937 | /* No lock here, assumes no domain exit in normal case */ | 2143 | /* No lock here, assumes no domain exit in normal case */ |
1938 | info = pdev->dev.archdata.iommu; | 2144 | info = dev->archdata.iommu; |
1939 | if (info) | 2145 | if (info) |
1940 | return info->domain; | 2146 | return info->domain; |
1941 | return NULL; | 2147 | return NULL; |
1942 | } | 2148 | } |
1943 | 2149 | ||
2150 | static inline struct device_domain_info * | ||
2151 | dmar_search_domain_by_dev_info(int segment, int bus, int devfn) | ||
2152 | { | ||
2153 | struct device_domain_info *info; | ||
2154 | |||
2155 | list_for_each_entry(info, &device_domain_list, global) | ||
2156 | if (info->iommu->segment == segment && info->bus == bus && | ||
2157 | info->devfn == devfn) | ||
2158 | return info; | ||
2159 | |||
2160 | return NULL; | ||
2161 | } | ||
2162 | |||
2163 | static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu, | ||
2164 | int bus, int devfn, | ||
2165 | struct device *dev, | ||
2166 | struct dmar_domain *domain) | ||
2167 | { | ||
2168 | struct dmar_domain *found = NULL; | ||
2169 | struct device_domain_info *info; | ||
2170 | unsigned long flags; | ||
2171 | |||
2172 | info = alloc_devinfo_mem(); | ||
2173 | if (!info) | ||
2174 | return NULL; | ||
2175 | |||
2176 | info->bus = bus; | ||
2177 | info->devfn = devfn; | ||
2178 | info->dev = dev; | ||
2179 | info->domain = domain; | ||
2180 | info->iommu = iommu; | ||
2181 | if (!dev) | ||
2182 | domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES; | ||
2183 | |||
2184 | spin_lock_irqsave(&device_domain_lock, flags); | ||
2185 | if (dev) | ||
2186 | found = find_domain(dev); | ||
2187 | else { | ||
2188 | struct device_domain_info *info2; | ||
2189 | info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn); | ||
2190 | if (info2) | ||
2191 | found = info2->domain; | ||
2192 | } | ||
2193 | if (found) { | ||
2194 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2195 | free_devinfo_mem(info); | ||
2196 | /* Caller must free the original domain */ | ||
2197 | return found; | ||
2198 | } | ||
2199 | |||
2200 | list_add(&info->link, &domain->devices); | ||
2201 | list_add(&info->global, &device_domain_list); | ||
2202 | if (dev) | ||
2203 | dev->archdata.iommu = info; | ||
2204 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2205 | |||
2206 | return domain; | ||
2207 | } | ||
2208 | |||
1944 | /* domain is initialized */ | 2209 | /* domain is initialized */ |
1945 | static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | 2210 | static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) |
1946 | { | 2211 | { |
1947 | struct dmar_domain *domain, *found = NULL; | 2212 | struct dmar_domain *domain, *free = NULL; |
1948 | struct intel_iommu *iommu; | 2213 | struct intel_iommu *iommu = NULL; |
1949 | struct dmar_drhd_unit *drhd; | 2214 | struct device_domain_info *info; |
1950 | struct device_domain_info *info, *tmp; | 2215 | struct pci_dev *dev_tmp = NULL; |
1951 | struct pci_dev *dev_tmp; | ||
1952 | unsigned long flags; | 2216 | unsigned long flags; |
1953 | int bus = 0, devfn = 0; | 2217 | u8 bus, devfn, bridge_bus, bridge_devfn; |
1954 | int segment; | ||
1955 | int ret; | ||
1956 | 2218 | ||
1957 | domain = find_domain(pdev); | 2219 | domain = find_domain(dev); |
1958 | if (domain) | 2220 | if (domain) |
1959 | return domain; | 2221 | return domain; |
1960 | 2222 | ||
1961 | segment = pci_domain_nr(pdev->bus); | 2223 | if (dev_is_pci(dev)) { |
2224 | struct pci_dev *pdev = to_pci_dev(dev); | ||
2225 | u16 segment; | ||
1962 | 2226 | ||
1963 | dev_tmp = pci_find_upstream_pcie_bridge(pdev); | 2227 | segment = pci_domain_nr(pdev->bus); |
1964 | if (dev_tmp) { | 2228 | dev_tmp = pci_find_upstream_pcie_bridge(pdev); |
1965 | if (pci_is_pcie(dev_tmp)) { | 2229 | if (dev_tmp) { |
1966 | bus = dev_tmp->subordinate->number; | 2230 | if (pci_is_pcie(dev_tmp)) { |
1967 | devfn = 0; | 2231 | bridge_bus = dev_tmp->subordinate->number; |
1968 | } else { | 2232 | bridge_devfn = 0; |
1969 | bus = dev_tmp->bus->number; | 2233 | } else { |
1970 | devfn = dev_tmp->devfn; | 2234 | bridge_bus = dev_tmp->bus->number; |
1971 | } | 2235 | bridge_devfn = dev_tmp->devfn; |
1972 | spin_lock_irqsave(&device_domain_lock, flags); | ||
1973 | list_for_each_entry(info, &device_domain_list, global) { | ||
1974 | if (info->segment == segment && | ||
1975 | info->bus == bus && info->devfn == devfn) { | ||
1976 | found = info->domain; | ||
1977 | break; | ||
1978 | } | 2236 | } |
1979 | } | 2237 | spin_lock_irqsave(&device_domain_lock, flags); |
1980 | spin_unlock_irqrestore(&device_domain_lock, flags); | 2238 | info = dmar_search_domain_by_dev_info(segment, bus, devfn); |
1981 | /* pcie-pci bridge already has a domain, uses it */ | 2239 | if (info) { |
1982 | if (found) { | 2240 | iommu = info->iommu; |
1983 | domain = found; | 2241 | domain = info->domain; |
1984 | goto found_domain; | 2242 | } |
2243 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2244 | /* pcie-pci bridge already has a domain, uses it */ | ||
2245 | if (info) | ||
2246 | goto found_domain; | ||
1985 | } | 2247 | } |
1986 | } | 2248 | } |
1987 | 2249 | ||
1988 | domain = alloc_domain(); | 2250 | iommu = device_to_iommu(dev, &bus, &devfn); |
1989 | if (!domain) | 2251 | if (!iommu) |
1990 | goto error; | 2252 | goto error; |
1991 | 2253 | ||
1992 | /* Allocate new domain for the device */ | 2254 | /* Allocate and initialize new domain for the device */ |
1993 | drhd = dmar_find_matched_drhd_unit(pdev); | 2255 | domain = alloc_domain(false); |
1994 | if (!drhd) { | 2256 | if (!domain) |
1995 | printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n", | 2257 | goto error; |
1996 | pci_name(pdev)); | 2258 | if (iommu_attach_domain(domain, iommu)) { |
1997 | free_domain_mem(domain); | ||
1998 | return NULL; | ||
1999 | } | ||
2000 | iommu = drhd->iommu; | ||
2001 | |||
2002 | ret = iommu_attach_domain(domain, iommu); | ||
2003 | if (ret) { | ||
2004 | free_domain_mem(domain); | 2259 | free_domain_mem(domain); |
2260 | domain = NULL; | ||
2005 | goto error; | 2261 | goto error; |
2006 | } | 2262 | } |
2007 | 2263 | free = domain; | |
2008 | if (domain_init(domain, gaw)) { | 2264 | if (domain_init(domain, gaw)) |
2009 | domain_exit(domain); | ||
2010 | goto error; | 2265 | goto error; |
2011 | } | ||
2012 | 2266 | ||
2013 | /* register pcie-to-pci device */ | 2267 | /* register pcie-to-pci device */ |
2014 | if (dev_tmp) { | 2268 | if (dev_tmp) { |
2015 | info = alloc_devinfo_mem(); | 2269 | domain = dmar_insert_dev_info(iommu, bridge_bus, bridge_devfn, |
2016 | if (!info) { | 2270 | NULL, domain); |
2017 | domain_exit(domain); | 2271 | if (!domain) |
2018 | goto error; | 2272 | goto error; |
2019 | } | ||
2020 | info->segment = segment; | ||
2021 | info->bus = bus; | ||
2022 | info->devfn = devfn; | ||
2023 | info->dev = NULL; | ||
2024 | info->domain = domain; | ||
2025 | /* This domain is shared by devices under p2p bridge */ | ||
2026 | domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES; | ||
2027 | |||
2028 | /* pcie-to-pci bridge already has a domain, uses it */ | ||
2029 | found = NULL; | ||
2030 | spin_lock_irqsave(&device_domain_lock, flags); | ||
2031 | list_for_each_entry(tmp, &device_domain_list, global) { | ||
2032 | if (tmp->segment == segment && | ||
2033 | tmp->bus == bus && tmp->devfn == devfn) { | ||
2034 | found = tmp->domain; | ||
2035 | break; | ||
2036 | } | ||
2037 | } | ||
2038 | if (found) { | ||
2039 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2040 | free_devinfo_mem(info); | ||
2041 | domain_exit(domain); | ||
2042 | domain = found; | ||
2043 | } else { | ||
2044 | list_add(&info->link, &domain->devices); | ||
2045 | list_add(&info->global, &device_domain_list); | ||
2046 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2047 | } | ||
2048 | } | 2273 | } |
2049 | 2274 | ||
2050 | found_domain: | 2275 | found_domain: |
2051 | info = alloc_devinfo_mem(); | 2276 | domain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); |
2052 | if (!info) | ||
2053 | goto error; | ||
2054 | info->segment = segment; | ||
2055 | info->bus = pdev->bus->number; | ||
2056 | info->devfn = pdev->devfn; | ||
2057 | info->dev = pdev; | ||
2058 | info->domain = domain; | ||
2059 | spin_lock_irqsave(&device_domain_lock, flags); | ||
2060 | /* somebody is fast */ | ||
2061 | found = find_domain(pdev); | ||
2062 | if (found != NULL) { | ||
2063 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2064 | if (found != domain) { | ||
2065 | domain_exit(domain); | ||
2066 | domain = found; | ||
2067 | } | ||
2068 | free_devinfo_mem(info); | ||
2069 | return domain; | ||
2070 | } | ||
2071 | list_add(&info->link, &domain->devices); | ||
2072 | list_add(&info->global, &device_domain_list); | ||
2073 | pdev->dev.archdata.iommu = info; | ||
2074 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2075 | return domain; | ||
2076 | error: | 2277 | error: |
2077 | /* recheck it here, maybe others set it */ | 2278 | if (free != domain) |
2078 | return find_domain(pdev); | 2279 | domain_exit(free); |
2280 | |||
2281 | return domain; | ||
2079 | } | 2282 | } |
2080 | 2283 | ||
2081 | static int iommu_identity_mapping; | 2284 | static int iommu_identity_mapping; |
@@ -2109,14 +2312,14 @@ static int iommu_domain_identity_map(struct dmar_domain *domain, | |||
2109 | DMA_PTE_READ|DMA_PTE_WRITE); | 2312 | DMA_PTE_READ|DMA_PTE_WRITE); |
2110 | } | 2313 | } |
2111 | 2314 | ||
2112 | static int iommu_prepare_identity_map(struct pci_dev *pdev, | 2315 | static int iommu_prepare_identity_map(struct device *dev, |
2113 | unsigned long long start, | 2316 | unsigned long long start, |
2114 | unsigned long long end) | 2317 | unsigned long long end) |
2115 | { | 2318 | { |
2116 | struct dmar_domain *domain; | 2319 | struct dmar_domain *domain; |
2117 | int ret; | 2320 | int ret; |
2118 | 2321 | ||
2119 | domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); | 2322 | domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); |
2120 | if (!domain) | 2323 | if (!domain) |
2121 | return -ENOMEM; | 2324 | return -ENOMEM; |
2122 | 2325 | ||
@@ -2126,13 +2329,13 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, | |||
2126 | up to start with in si_domain */ | 2329 | up to start with in si_domain */ |
2127 | if (domain == si_domain && hw_pass_through) { | 2330 | if (domain == si_domain && hw_pass_through) { |
2128 | printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n", | 2331 | printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n", |
2129 | pci_name(pdev), start, end); | 2332 | dev_name(dev), start, end); |
2130 | return 0; | 2333 | return 0; |
2131 | } | 2334 | } |
2132 | 2335 | ||
2133 | printk(KERN_INFO | 2336 | printk(KERN_INFO |
2134 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", | 2337 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", |
2135 | pci_name(pdev), start, end); | 2338 | dev_name(dev), start, end); |
2136 | 2339 | ||
2137 | if (end < start) { | 2340 | if (end < start) { |
2138 | WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n" | 2341 | WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n" |
@@ -2160,7 +2363,7 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, | |||
2160 | goto error; | 2363 | goto error; |
2161 | 2364 | ||
2162 | /* context entry init */ | 2365 | /* context entry init */ |
2163 | ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL); | 2366 | ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL); |
2164 | if (ret) | 2367 | if (ret) |
2165 | goto error; | 2368 | goto error; |
2166 | 2369 | ||
@@ -2172,12 +2375,12 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, | |||
2172 | } | 2375 | } |
2173 | 2376 | ||
2174 | static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, | 2377 | static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, |
2175 | struct pci_dev *pdev) | 2378 | struct device *dev) |
2176 | { | 2379 | { |
2177 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2380 | if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) |
2178 | return 0; | 2381 | return 0; |
2179 | return iommu_prepare_identity_map(pdev, rmrr->base_address, | 2382 | return iommu_prepare_identity_map(dev, rmrr->base_address, |
2180 | rmrr->end_address); | 2383 | rmrr->end_address); |
2181 | } | 2384 | } |
2182 | 2385 | ||
2183 | #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA | 2386 | #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA |
@@ -2191,7 +2394,7 @@ static inline void iommu_prepare_isa(void) | |||
2191 | return; | 2394 | return; |
2192 | 2395 | ||
2193 | printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n"); | 2396 | printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n"); |
2194 | ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1); | 2397 | ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1); |
2195 | 2398 | ||
2196 | if (ret) | 2399 | if (ret) |
2197 | printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " | 2400 | printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " |
@@ -2213,10 +2416,12 @@ static int __init si_domain_init(int hw) | |||
2213 | struct intel_iommu *iommu; | 2416 | struct intel_iommu *iommu; |
2214 | int nid, ret = 0; | 2417 | int nid, ret = 0; |
2215 | 2418 | ||
2216 | si_domain = alloc_domain(); | 2419 | si_domain = alloc_domain(false); |
2217 | if (!si_domain) | 2420 | if (!si_domain) |
2218 | return -EFAULT; | 2421 | return -EFAULT; |
2219 | 2422 | ||
2423 | si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; | ||
2424 | |||
2220 | for_each_active_iommu(iommu, drhd) { | 2425 | for_each_active_iommu(iommu, drhd) { |
2221 | ret = iommu_attach_domain(si_domain, iommu); | 2426 | ret = iommu_attach_domain(si_domain, iommu); |
2222 | if (ret) { | 2427 | if (ret) { |
@@ -2230,7 +2435,6 @@ static int __init si_domain_init(int hw) | |||
2230 | return -EFAULT; | 2435 | return -EFAULT; |
2231 | } | 2436 | } |
2232 | 2437 | ||
2233 | si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; | ||
2234 | pr_debug("IOMMU: identity mapping domain is domain %d\n", | 2438 | pr_debug("IOMMU: identity mapping domain is domain %d\n", |
2235 | si_domain->id); | 2439 | si_domain->id); |
2236 | 2440 | ||
@@ -2252,16 +2456,14 @@ static int __init si_domain_init(int hw) | |||
2252 | return 0; | 2456 | return 0; |
2253 | } | 2457 | } |
2254 | 2458 | ||
2255 | static void domain_remove_one_dev_info(struct dmar_domain *domain, | 2459 | static int identity_mapping(struct device *dev) |
2256 | struct pci_dev *pdev); | ||
2257 | static int identity_mapping(struct pci_dev *pdev) | ||
2258 | { | 2460 | { |
2259 | struct device_domain_info *info; | 2461 | struct device_domain_info *info; |
2260 | 2462 | ||
2261 | if (likely(!iommu_identity_mapping)) | 2463 | if (likely(!iommu_identity_mapping)) |
2262 | return 0; | 2464 | return 0; |
2263 | 2465 | ||
2264 | info = pdev->dev.archdata.iommu; | 2466 | info = dev->archdata.iommu; |
2265 | if (info && info != DUMMY_DEVICE_DOMAIN_INFO) | 2467 | if (info && info != DUMMY_DEVICE_DOMAIN_INFO) |
2266 | return (info->domain == si_domain); | 2468 | return (info->domain == si_domain); |
2267 | 2469 | ||
@@ -2269,111 +2471,112 @@ static int identity_mapping(struct pci_dev *pdev) | |||
2269 | } | 2471 | } |
2270 | 2472 | ||
2271 | static int domain_add_dev_info(struct dmar_domain *domain, | 2473 | static int domain_add_dev_info(struct dmar_domain *domain, |
2272 | struct pci_dev *pdev, | 2474 | struct device *dev, int translation) |
2273 | int translation) | ||
2274 | { | 2475 | { |
2275 | struct device_domain_info *info; | 2476 | struct dmar_domain *ndomain; |
2276 | unsigned long flags; | 2477 | struct intel_iommu *iommu; |
2478 | u8 bus, devfn; | ||
2277 | int ret; | 2479 | int ret; |
2278 | 2480 | ||
2279 | info = alloc_devinfo_mem(); | 2481 | iommu = device_to_iommu(dev, &bus, &devfn); |
2280 | if (!info) | 2482 | if (!iommu) |
2281 | return -ENOMEM; | 2483 | return -ENODEV; |
2282 | |||
2283 | info->segment = pci_domain_nr(pdev->bus); | ||
2284 | info->bus = pdev->bus->number; | ||
2285 | info->devfn = pdev->devfn; | ||
2286 | info->dev = pdev; | ||
2287 | info->domain = domain; | ||
2288 | 2484 | ||
2289 | spin_lock_irqsave(&device_domain_lock, flags); | 2485 | ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); |
2290 | list_add(&info->link, &domain->devices); | 2486 | if (ndomain != domain) |
2291 | list_add(&info->global, &device_domain_list); | 2487 | return -EBUSY; |
2292 | pdev->dev.archdata.iommu = info; | ||
2293 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2294 | 2488 | ||
2295 | ret = domain_context_mapping(domain, pdev, translation); | 2489 | ret = domain_context_mapping(domain, dev, translation); |
2296 | if (ret) { | 2490 | if (ret) { |
2297 | spin_lock_irqsave(&device_domain_lock, flags); | 2491 | domain_remove_one_dev_info(domain, dev); |
2298 | unlink_domain_info(info); | ||
2299 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2300 | free_devinfo_mem(info); | ||
2301 | return ret; | 2492 | return ret; |
2302 | } | 2493 | } |
2303 | 2494 | ||
2304 | return 0; | 2495 | return 0; |
2305 | } | 2496 | } |
2306 | 2497 | ||
2307 | static bool device_has_rmrr(struct pci_dev *dev) | 2498 | static bool device_has_rmrr(struct device *dev) |
2308 | { | 2499 | { |
2309 | struct dmar_rmrr_unit *rmrr; | 2500 | struct dmar_rmrr_unit *rmrr; |
2501 | struct device *tmp; | ||
2310 | int i; | 2502 | int i; |
2311 | 2503 | ||
2504 | rcu_read_lock(); | ||
2312 | for_each_rmrr_units(rmrr) { | 2505 | for_each_rmrr_units(rmrr) { |
2313 | for (i = 0; i < rmrr->devices_cnt; i++) { | 2506 | /* |
2314 | /* | 2507 | * Return TRUE if this RMRR contains the device that |
2315 | * Return TRUE if this RMRR contains the device that | 2508 | * is passed in. |
2316 | * is passed in. | 2509 | */ |
2317 | */ | 2510 | for_each_active_dev_scope(rmrr->devices, |
2318 | if (rmrr->devices[i] == dev) | 2511 | rmrr->devices_cnt, i, tmp) |
2512 | if (tmp == dev) { | ||
2513 | rcu_read_unlock(); | ||
2319 | return true; | 2514 | return true; |
2320 | } | 2515 | } |
2321 | } | 2516 | } |
2517 | rcu_read_unlock(); | ||
2322 | return false; | 2518 | return false; |
2323 | } | 2519 | } |
2324 | 2520 | ||
2325 | static int iommu_should_identity_map(struct pci_dev *pdev, int startup) | 2521 | static int iommu_should_identity_map(struct device *dev, int startup) |
2326 | { | 2522 | { |
2327 | 2523 | ||
2328 | /* | 2524 | if (dev_is_pci(dev)) { |
2329 | * We want to prevent any device associated with an RMRR from | 2525 | struct pci_dev *pdev = to_pci_dev(dev); |
2330 | * getting placed into the SI Domain. This is done because | ||
2331 | * problems exist when devices are moved in and out of domains | ||
2332 | * and their respective RMRR info is lost. We exempt USB devices | ||
2333 | * from this process due to their usage of RMRRs that are known | ||
2334 | * to not be needed after BIOS hand-off to OS. | ||
2335 | */ | ||
2336 | if (device_has_rmrr(pdev) && | ||
2337 | (pdev->class >> 8) != PCI_CLASS_SERIAL_USB) | ||
2338 | return 0; | ||
2339 | 2526 | ||
2340 | if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) | 2527 | /* |
2341 | return 1; | 2528 | * We want to prevent any device associated with an RMRR from |
2529 | * getting placed into the SI Domain. This is done because | ||
2530 | * problems exist when devices are moved in and out of domains | ||
2531 | * and their respective RMRR info is lost. We exempt USB devices | ||
2532 | * from this process due to their usage of RMRRs that are known | ||
2533 | * to not be needed after BIOS hand-off to OS. | ||
2534 | */ | ||
2535 | if (device_has_rmrr(dev) && | ||
2536 | (pdev->class >> 8) != PCI_CLASS_SERIAL_USB) | ||
2537 | return 0; | ||
2342 | 2538 | ||
2343 | if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) | 2539 | if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) |
2344 | return 1; | 2540 | return 1; |
2345 | 2541 | ||
2346 | if (!(iommu_identity_mapping & IDENTMAP_ALL)) | 2542 | if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) |
2347 | return 0; | 2543 | return 1; |
2348 | 2544 | ||
2349 | /* | 2545 | if (!(iommu_identity_mapping & IDENTMAP_ALL)) |
2350 | * We want to start off with all devices in the 1:1 domain, and | ||
2351 | * take them out later if we find they can't access all of memory. | ||
2352 | * | ||
2353 | * However, we can't do this for PCI devices behind bridges, | ||
2354 | * because all PCI devices behind the same bridge will end up | ||
2355 | * with the same source-id on their transactions. | ||
2356 | * | ||
2357 | * Practically speaking, we can't change things around for these | ||
2358 | * devices at run-time, because we can't be sure there'll be no | ||
2359 | * DMA transactions in flight for any of their siblings. | ||
2360 | * | ||
2361 | * So PCI devices (unless they're on the root bus) as well as | ||
2362 | * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of | ||
2363 | * the 1:1 domain, just in _case_ one of their siblings turns out | ||
2364 | * not to be able to map all of memory. | ||
2365 | */ | ||
2366 | if (!pci_is_pcie(pdev)) { | ||
2367 | if (!pci_is_root_bus(pdev->bus)) | ||
2368 | return 0; | 2546 | return 0; |
2369 | if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) | 2547 | |
2548 | /* | ||
2549 | * We want to start off with all devices in the 1:1 domain, and | ||
2550 | * take them out later if we find they can't access all of memory. | ||
2551 | * | ||
2552 | * However, we can't do this for PCI devices behind bridges, | ||
2553 | * because all PCI devices behind the same bridge will end up | ||
2554 | * with the same source-id on their transactions. | ||
2555 | * | ||
2556 | * Practically speaking, we can't change things around for these | ||
2557 | * devices at run-time, because we can't be sure there'll be no | ||
2558 | * DMA transactions in flight for any of their siblings. | ||
2559 | * | ||
2560 | * So PCI devices (unless they're on the root bus) as well as | ||
2561 | * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of | ||
2562 | * the 1:1 domain, just in _case_ one of their siblings turns out | ||
2563 | * not to be able to map all of memory. | ||
2564 | */ | ||
2565 | if (!pci_is_pcie(pdev)) { | ||
2566 | if (!pci_is_root_bus(pdev->bus)) | ||
2567 | return 0; | ||
2568 | if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) | ||
2569 | return 0; | ||
2570 | } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE) | ||
2370 | return 0; | 2571 | return 0; |
2371 | } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE) | 2572 | } else { |
2372 | return 0; | 2573 | if (device_has_rmrr(dev)) |
2574 | return 0; | ||
2575 | } | ||
2373 | 2576 | ||
2374 | /* | 2577 | /* |
2375 | * At boot time, we don't yet know if devices will be 64-bit capable. | 2578 | * At boot time, we don't yet know if devices will be 64-bit capable. |
2376 | * Assume that they will -- if they turn out not to be, then we can | 2579 | * Assume that they will — if they turn out not to be, then we can |
2377 | * take them out of the 1:1 domain later. | 2580 | * take them out of the 1:1 domain later. |
2378 | */ | 2581 | */ |
2379 | if (!startup) { | 2582 | if (!startup) { |
@@ -2381,42 +2584,77 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup) | |||
2381 | * If the device's dma_mask is less than the system's memory | 2584 | * If the device's dma_mask is less than the system's memory |
2382 | * size then this is not a candidate for identity mapping. | 2585 | * size then this is not a candidate for identity mapping. |
2383 | */ | 2586 | */ |
2384 | u64 dma_mask = pdev->dma_mask; | 2587 | u64 dma_mask = *dev->dma_mask; |
2385 | 2588 | ||
2386 | if (pdev->dev.coherent_dma_mask && | 2589 | if (dev->coherent_dma_mask && |
2387 | pdev->dev.coherent_dma_mask < dma_mask) | 2590 | dev->coherent_dma_mask < dma_mask) |
2388 | dma_mask = pdev->dev.coherent_dma_mask; | 2591 | dma_mask = dev->coherent_dma_mask; |
2389 | 2592 | ||
2390 | return dma_mask >= dma_get_required_mask(&pdev->dev); | 2593 | return dma_mask >= dma_get_required_mask(dev); |
2391 | } | 2594 | } |
2392 | 2595 | ||
2393 | return 1; | 2596 | return 1; |
2394 | } | 2597 | } |
2395 | 2598 | ||
2599 | static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw) | ||
2600 | { | ||
2601 | int ret; | ||
2602 | |||
2603 | if (!iommu_should_identity_map(dev, 1)) | ||
2604 | return 0; | ||
2605 | |||
2606 | ret = domain_add_dev_info(si_domain, dev, | ||
2607 | hw ? CONTEXT_TT_PASS_THROUGH : | ||
2608 | CONTEXT_TT_MULTI_LEVEL); | ||
2609 | if (!ret) | ||
2610 | pr_info("IOMMU: %s identity mapping for device %s\n", | ||
2611 | hw ? "hardware" : "software", dev_name(dev)); | ||
2612 | else if (ret == -ENODEV) | ||
2613 | /* device not associated with an iommu */ | ||
2614 | ret = 0; | ||
2615 | |||
2616 | return ret; | ||
2617 | } | ||
2618 | |||
2619 | |||
2396 | static int __init iommu_prepare_static_identity_mapping(int hw) | 2620 | static int __init iommu_prepare_static_identity_mapping(int hw) |
2397 | { | 2621 | { |
2398 | struct pci_dev *pdev = NULL; | 2622 | struct pci_dev *pdev = NULL; |
2399 | int ret; | 2623 | struct dmar_drhd_unit *drhd; |
2624 | struct intel_iommu *iommu; | ||
2625 | struct device *dev; | ||
2626 | int i; | ||
2627 | int ret = 0; | ||
2400 | 2628 | ||
2401 | ret = si_domain_init(hw); | 2629 | ret = si_domain_init(hw); |
2402 | if (ret) | 2630 | if (ret) |
2403 | return -EFAULT; | 2631 | return -EFAULT; |
2404 | 2632 | ||
2405 | for_each_pci_dev(pdev) { | 2633 | for_each_pci_dev(pdev) { |
2406 | if (iommu_should_identity_map(pdev, 1)) { | 2634 | ret = dev_prepare_static_identity_mapping(&pdev->dev, hw); |
2407 | ret = domain_add_dev_info(si_domain, pdev, | 2635 | if (ret) |
2408 | hw ? CONTEXT_TT_PASS_THROUGH : | 2636 | return ret; |
2409 | CONTEXT_TT_MULTI_LEVEL); | 2637 | } |
2410 | if (ret) { | 2638 | |
2411 | /* device not associated with an iommu */ | 2639 | for_each_active_iommu(iommu, drhd) |
2412 | if (ret == -ENODEV) | 2640 | for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) { |
2413 | continue; | 2641 | struct acpi_device_physical_node *pn; |
2414 | return ret; | 2642 | struct acpi_device *adev; |
2643 | |||
2644 | if (dev->bus != &acpi_bus_type) | ||
2645 | continue; | ||
2646 | |||
2647 | adev= to_acpi_device(dev); | ||
2648 | mutex_lock(&adev->physical_node_lock); | ||
2649 | list_for_each_entry(pn, &adev->physical_node_list, node) { | ||
2650 | ret = dev_prepare_static_identity_mapping(pn->dev, hw); | ||
2651 | if (ret) | ||
2652 | break; | ||
2415 | } | 2653 | } |
2416 | pr_info("IOMMU: %s identity mapping for device %s\n", | 2654 | mutex_unlock(&adev->physical_node_lock); |
2417 | hw ? "hardware" : "software", pci_name(pdev)); | 2655 | if (ret) |
2656 | return ret; | ||
2418 | } | 2657 | } |
2419 | } | ||
2420 | 2658 | ||
2421 | return 0; | 2659 | return 0; |
2422 | } | 2660 | } |
@@ -2425,7 +2663,7 @@ static int __init init_dmars(void) | |||
2425 | { | 2663 | { |
2426 | struct dmar_drhd_unit *drhd; | 2664 | struct dmar_drhd_unit *drhd; |
2427 | struct dmar_rmrr_unit *rmrr; | 2665 | struct dmar_rmrr_unit *rmrr; |
2428 | struct pci_dev *pdev; | 2666 | struct device *dev; |
2429 | struct intel_iommu *iommu; | 2667 | struct intel_iommu *iommu; |
2430 | int i, ret; | 2668 | int i, ret; |
2431 | 2669 | ||
@@ -2461,7 +2699,7 @@ static int __init init_dmars(void) | |||
2461 | sizeof(struct deferred_flush_tables), GFP_KERNEL); | 2699 | sizeof(struct deferred_flush_tables), GFP_KERNEL); |
2462 | if (!deferred_flush) { | 2700 | if (!deferred_flush) { |
2463 | ret = -ENOMEM; | 2701 | ret = -ENOMEM; |
2464 | goto error; | 2702 | goto free_g_iommus; |
2465 | } | 2703 | } |
2466 | 2704 | ||
2467 | for_each_active_iommu(iommu, drhd) { | 2705 | for_each_active_iommu(iommu, drhd) { |
@@ -2469,7 +2707,7 @@ static int __init init_dmars(void) | |||
2469 | 2707 | ||
2470 | ret = iommu_init_domains(iommu); | 2708 | ret = iommu_init_domains(iommu); |
2471 | if (ret) | 2709 | if (ret) |
2472 | goto error; | 2710 | goto free_iommu; |
2473 | 2711 | ||
2474 | /* | 2712 | /* |
2475 | * TBD: | 2713 | * TBD: |
@@ -2479,7 +2717,7 @@ static int __init init_dmars(void) | |||
2479 | ret = iommu_alloc_root_entry(iommu); | 2717 | ret = iommu_alloc_root_entry(iommu); |
2480 | if (ret) { | 2718 | if (ret) { |
2481 | printk(KERN_ERR "IOMMU: allocate root entry failed\n"); | 2719 | printk(KERN_ERR "IOMMU: allocate root entry failed\n"); |
2482 | goto error; | 2720 | goto free_iommu; |
2483 | } | 2721 | } |
2484 | if (!ecap_pass_through(iommu->ecap)) | 2722 | if (!ecap_pass_through(iommu->ecap)) |
2485 | hw_pass_through = 0; | 2723 | hw_pass_through = 0; |
@@ -2548,7 +2786,7 @@ static int __init init_dmars(void) | |||
2548 | ret = iommu_prepare_static_identity_mapping(hw_pass_through); | 2786 | ret = iommu_prepare_static_identity_mapping(hw_pass_through); |
2549 | if (ret) { | 2787 | if (ret) { |
2550 | printk(KERN_CRIT "Failed to setup IOMMU pass-through\n"); | 2788 | printk(KERN_CRIT "Failed to setup IOMMU pass-through\n"); |
2551 | goto error; | 2789 | goto free_iommu; |
2552 | } | 2790 | } |
2553 | } | 2791 | } |
2554 | /* | 2792 | /* |
@@ -2567,15 +2805,10 @@ static int __init init_dmars(void) | |||
2567 | */ | 2805 | */ |
2568 | printk(KERN_INFO "IOMMU: Setting RMRR:\n"); | 2806 | printk(KERN_INFO "IOMMU: Setting RMRR:\n"); |
2569 | for_each_rmrr_units(rmrr) { | 2807 | for_each_rmrr_units(rmrr) { |
2570 | for (i = 0; i < rmrr->devices_cnt; i++) { | 2808 | /* some BIOS lists non-exist devices in DMAR table. */ |
2571 | pdev = rmrr->devices[i]; | 2809 | for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, |
2572 | /* | 2810 | i, dev) { |
2573 | * some BIOS lists non-exist devices in DMAR | 2811 | ret = iommu_prepare_rmrr_dev(rmrr, dev); |
2574 | * table. | ||
2575 | */ | ||
2576 | if (!pdev) | ||
2577 | continue; | ||
2578 | ret = iommu_prepare_rmrr_dev(rmrr, pdev); | ||
2579 | if (ret) | 2812 | if (ret) |
2580 | printk(KERN_ERR | 2813 | printk(KERN_ERR |
2581 | "IOMMU: mapping reserved region failed\n"); | 2814 | "IOMMU: mapping reserved region failed\n"); |
@@ -2606,7 +2839,7 @@ static int __init init_dmars(void) | |||
2606 | 2839 | ||
2607 | ret = dmar_set_interrupt(iommu); | 2840 | ret = dmar_set_interrupt(iommu); |
2608 | if (ret) | 2841 | if (ret) |
2609 | goto error; | 2842 | goto free_iommu; |
2610 | 2843 | ||
2611 | iommu_set_root_entry(iommu); | 2844 | iommu_set_root_entry(iommu); |
2612 | 2845 | ||
@@ -2615,17 +2848,20 @@ static int __init init_dmars(void) | |||
2615 | 2848 | ||
2616 | ret = iommu_enable_translation(iommu); | 2849 | ret = iommu_enable_translation(iommu); |
2617 | if (ret) | 2850 | if (ret) |
2618 | goto error; | 2851 | goto free_iommu; |
2619 | 2852 | ||
2620 | iommu_disable_protect_mem_regions(iommu); | 2853 | iommu_disable_protect_mem_regions(iommu); |
2621 | } | 2854 | } |
2622 | 2855 | ||
2623 | return 0; | 2856 | return 0; |
2624 | error: | 2857 | |
2858 | free_iommu: | ||
2625 | for_each_active_iommu(iommu, drhd) | 2859 | for_each_active_iommu(iommu, drhd) |
2626 | free_dmar_iommu(iommu); | 2860 | free_dmar_iommu(iommu); |
2627 | kfree(deferred_flush); | 2861 | kfree(deferred_flush); |
2862 | free_g_iommus: | ||
2628 | kfree(g_iommus); | 2863 | kfree(g_iommus); |
2864 | error: | ||
2629 | return ret; | 2865 | return ret; |
2630 | } | 2866 | } |
2631 | 2867 | ||
@@ -2634,7 +2870,6 @@ static struct iova *intel_alloc_iova(struct device *dev, | |||
2634 | struct dmar_domain *domain, | 2870 | struct dmar_domain *domain, |
2635 | unsigned long nrpages, uint64_t dma_mask) | 2871 | unsigned long nrpages, uint64_t dma_mask) |
2636 | { | 2872 | { |
2637 | struct pci_dev *pdev = to_pci_dev(dev); | ||
2638 | struct iova *iova = NULL; | 2873 | struct iova *iova = NULL; |
2639 | 2874 | ||
2640 | /* Restrict dma_mask to the width that the iommu can handle */ | 2875 | /* Restrict dma_mask to the width that the iommu can handle */ |
@@ -2654,34 +2889,31 @@ static struct iova *intel_alloc_iova(struct device *dev, | |||
2654 | iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); | 2889 | iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); |
2655 | if (unlikely(!iova)) { | 2890 | if (unlikely(!iova)) { |
2656 | printk(KERN_ERR "Allocating %ld-page iova for %s failed", | 2891 | printk(KERN_ERR "Allocating %ld-page iova for %s failed", |
2657 | nrpages, pci_name(pdev)); | 2892 | nrpages, dev_name(dev)); |
2658 | return NULL; | 2893 | return NULL; |
2659 | } | 2894 | } |
2660 | 2895 | ||
2661 | return iova; | 2896 | return iova; |
2662 | } | 2897 | } |
2663 | 2898 | ||
2664 | static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev) | 2899 | static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev) |
2665 | { | 2900 | { |
2666 | struct dmar_domain *domain; | 2901 | struct dmar_domain *domain; |
2667 | int ret; | 2902 | int ret; |
2668 | 2903 | ||
2669 | domain = get_domain_for_dev(pdev, | 2904 | domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); |
2670 | DEFAULT_DOMAIN_ADDRESS_WIDTH); | ||
2671 | if (!domain) { | 2905 | if (!domain) { |
2672 | printk(KERN_ERR | 2906 | printk(KERN_ERR "Allocating domain for %s failed", |
2673 | "Allocating domain for %s failed", pci_name(pdev)); | 2907 | dev_name(dev)); |
2674 | return NULL; | 2908 | return NULL; |
2675 | } | 2909 | } |
2676 | 2910 | ||
2677 | /* make sure context mapping is ok */ | 2911 | /* make sure context mapping is ok */ |
2678 | if (unlikely(!domain_context_mapped(pdev))) { | 2912 | if (unlikely(!domain_context_mapped(dev))) { |
2679 | ret = domain_context_mapping(domain, pdev, | 2913 | ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL); |
2680 | CONTEXT_TT_MULTI_LEVEL); | ||
2681 | if (ret) { | 2914 | if (ret) { |
2682 | printk(KERN_ERR | 2915 | printk(KERN_ERR "Domain context map for %s failed", |
2683 | "Domain context map for %s failed", | 2916 | dev_name(dev)); |
2684 | pci_name(pdev)); | ||
2685 | return NULL; | 2917 | return NULL; |
2686 | } | 2918 | } |
2687 | } | 2919 | } |
@@ -2689,51 +2921,46 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev) | |||
2689 | return domain; | 2921 | return domain; |
2690 | } | 2922 | } |
2691 | 2923 | ||
2692 | static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev) | 2924 | static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev) |
2693 | { | 2925 | { |
2694 | struct device_domain_info *info; | 2926 | struct device_domain_info *info; |
2695 | 2927 | ||
2696 | /* No lock here, assumes no domain exit in normal case */ | 2928 | /* No lock here, assumes no domain exit in normal case */ |
2697 | info = dev->dev.archdata.iommu; | 2929 | info = dev->archdata.iommu; |
2698 | if (likely(info)) | 2930 | if (likely(info)) |
2699 | return info->domain; | 2931 | return info->domain; |
2700 | 2932 | ||
2701 | return __get_valid_domain_for_dev(dev); | 2933 | return __get_valid_domain_for_dev(dev); |
2702 | } | 2934 | } |
2703 | 2935 | ||
2704 | static int iommu_dummy(struct pci_dev *pdev) | 2936 | static int iommu_dummy(struct device *dev) |
2705 | { | 2937 | { |
2706 | return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; | 2938 | return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; |
2707 | } | 2939 | } |
2708 | 2940 | ||
2709 | /* Check if the pdev needs to go through non-identity map and unmap process.*/ | 2941 | /* Check if the dev needs to go through non-identity map and unmap process.*/ |
2710 | static int iommu_no_mapping(struct device *dev) | 2942 | static int iommu_no_mapping(struct device *dev) |
2711 | { | 2943 | { |
2712 | struct pci_dev *pdev; | ||
2713 | int found; | 2944 | int found; |
2714 | 2945 | ||
2715 | if (unlikely(!dev_is_pci(dev))) | 2946 | if (iommu_dummy(dev)) |
2716 | return 1; | ||
2717 | |||
2718 | pdev = to_pci_dev(dev); | ||
2719 | if (iommu_dummy(pdev)) | ||
2720 | return 1; | 2947 | return 1; |
2721 | 2948 | ||
2722 | if (!iommu_identity_mapping) | 2949 | if (!iommu_identity_mapping) |
2723 | return 0; | 2950 | return 0; |
2724 | 2951 | ||
2725 | found = identity_mapping(pdev); | 2952 | found = identity_mapping(dev); |
2726 | if (found) { | 2953 | if (found) { |
2727 | if (iommu_should_identity_map(pdev, 0)) | 2954 | if (iommu_should_identity_map(dev, 0)) |
2728 | return 1; | 2955 | return 1; |
2729 | else { | 2956 | else { |
2730 | /* | 2957 | /* |
2731 | * 32 bit DMA is removed from si_domain and fall back | 2958 | * 32 bit DMA is removed from si_domain and fall back |
2732 | * to non-identity mapping. | 2959 | * to non-identity mapping. |
2733 | */ | 2960 | */ |
2734 | domain_remove_one_dev_info(si_domain, pdev); | 2961 | domain_remove_one_dev_info(si_domain, dev); |
2735 | printk(KERN_INFO "32bit %s uses non-identity mapping\n", | 2962 | printk(KERN_INFO "32bit %s uses non-identity mapping\n", |
2736 | pci_name(pdev)); | 2963 | dev_name(dev)); |
2737 | return 0; | 2964 | return 0; |
2738 | } | 2965 | } |
2739 | } else { | 2966 | } else { |
@@ -2741,15 +2968,15 @@ static int iommu_no_mapping(struct device *dev) | |||
2741 | * In case of a detached 64 bit DMA device from vm, the device | 2968 | * In case of a detached 64 bit DMA device from vm, the device |
2742 | * is put into si_domain for identity mapping. | 2969 | * is put into si_domain for identity mapping. |
2743 | */ | 2970 | */ |
2744 | if (iommu_should_identity_map(pdev, 0)) { | 2971 | if (iommu_should_identity_map(dev, 0)) { |
2745 | int ret; | 2972 | int ret; |
2746 | ret = domain_add_dev_info(si_domain, pdev, | 2973 | ret = domain_add_dev_info(si_domain, dev, |
2747 | hw_pass_through ? | 2974 | hw_pass_through ? |
2748 | CONTEXT_TT_PASS_THROUGH : | 2975 | CONTEXT_TT_PASS_THROUGH : |
2749 | CONTEXT_TT_MULTI_LEVEL); | 2976 | CONTEXT_TT_MULTI_LEVEL); |
2750 | if (!ret) { | 2977 | if (!ret) { |
2751 | printk(KERN_INFO "64bit %s uses identity mapping\n", | 2978 | printk(KERN_INFO "64bit %s uses identity mapping\n", |
2752 | pci_name(pdev)); | 2979 | dev_name(dev)); |
2753 | return 1; | 2980 | return 1; |
2754 | } | 2981 | } |
2755 | } | 2982 | } |
@@ -2758,10 +2985,9 @@ static int iommu_no_mapping(struct device *dev) | |||
2758 | return 0; | 2985 | return 0; |
2759 | } | 2986 | } |
2760 | 2987 | ||
2761 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | 2988 | static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, |
2762 | size_t size, int dir, u64 dma_mask) | 2989 | size_t size, int dir, u64 dma_mask) |
2763 | { | 2990 | { |
2764 | struct pci_dev *pdev = to_pci_dev(hwdev); | ||
2765 | struct dmar_domain *domain; | 2991 | struct dmar_domain *domain; |
2766 | phys_addr_t start_paddr; | 2992 | phys_addr_t start_paddr; |
2767 | struct iova *iova; | 2993 | struct iova *iova; |
@@ -2772,17 +2998,17 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2772 | 2998 | ||
2773 | BUG_ON(dir == DMA_NONE); | 2999 | BUG_ON(dir == DMA_NONE); |
2774 | 3000 | ||
2775 | if (iommu_no_mapping(hwdev)) | 3001 | if (iommu_no_mapping(dev)) |
2776 | return paddr; | 3002 | return paddr; |
2777 | 3003 | ||
2778 | domain = get_valid_domain_for_dev(pdev); | 3004 | domain = get_valid_domain_for_dev(dev); |
2779 | if (!domain) | 3005 | if (!domain) |
2780 | return 0; | 3006 | return 0; |
2781 | 3007 | ||
2782 | iommu = domain_get_iommu(domain); | 3008 | iommu = domain_get_iommu(domain); |
2783 | size = aligned_nrpages(paddr, size); | 3009 | size = aligned_nrpages(paddr, size); |
2784 | 3010 | ||
2785 | iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask); | 3011 | iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask); |
2786 | if (!iova) | 3012 | if (!iova) |
2787 | goto error; | 3013 | goto error; |
2788 | 3014 | ||
@@ -2808,7 +3034,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2808 | 3034 | ||
2809 | /* it's a non-present to present mapping. Only flush if caching mode */ | 3035 | /* it's a non-present to present mapping. Only flush if caching mode */ |
2810 | if (cap_caching_mode(iommu->cap)) | 3036 | if (cap_caching_mode(iommu->cap)) |
2811 | iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1); | 3037 | iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1); |
2812 | else | 3038 | else |
2813 | iommu_flush_write_buffer(iommu); | 3039 | iommu_flush_write_buffer(iommu); |
2814 | 3040 | ||
@@ -2820,7 +3046,7 @@ error: | |||
2820 | if (iova) | 3046 | if (iova) |
2821 | __free_iova(&domain->iovad, iova); | 3047 | __free_iova(&domain->iovad, iova); |
2822 | printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n", | 3048 | printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n", |
2823 | pci_name(pdev), size, (unsigned long long)paddr, dir); | 3049 | dev_name(dev), size, (unsigned long long)paddr, dir); |
2824 | return 0; | 3050 | return 0; |
2825 | } | 3051 | } |
2826 | 3052 | ||
@@ -2830,7 +3056,7 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page, | |||
2830 | struct dma_attrs *attrs) | 3056 | struct dma_attrs *attrs) |
2831 | { | 3057 | { |
2832 | return __intel_map_single(dev, page_to_phys(page) + offset, size, | 3058 | return __intel_map_single(dev, page_to_phys(page) + offset, size, |
2833 | dir, to_pci_dev(dev)->dma_mask); | 3059 | dir, *dev->dma_mask); |
2834 | } | 3060 | } |
2835 | 3061 | ||
2836 | static void flush_unmaps(void) | 3062 | static void flush_unmaps(void) |
@@ -2860,13 +3086,16 @@ static void flush_unmaps(void) | |||
2860 | /* On real hardware multiple invalidations are expensive */ | 3086 | /* On real hardware multiple invalidations are expensive */ |
2861 | if (cap_caching_mode(iommu->cap)) | 3087 | if (cap_caching_mode(iommu->cap)) |
2862 | iommu_flush_iotlb_psi(iommu, domain->id, | 3088 | iommu_flush_iotlb_psi(iommu, domain->id, |
2863 | iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0); | 3089 | iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, |
3090 | !deferred_flush[i].freelist[j], 0); | ||
2864 | else { | 3091 | else { |
2865 | mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1)); | 3092 | mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1)); |
2866 | iommu_flush_dev_iotlb(deferred_flush[i].domain[j], | 3093 | iommu_flush_dev_iotlb(deferred_flush[i].domain[j], |
2867 | (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); | 3094 | (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); |
2868 | } | 3095 | } |
2869 | __free_iova(&deferred_flush[i].domain[j]->iovad, iova); | 3096 | __free_iova(&deferred_flush[i].domain[j]->iovad, iova); |
3097 | if (deferred_flush[i].freelist[j]) | ||
3098 | dma_free_pagelist(deferred_flush[i].freelist[j]); | ||
2870 | } | 3099 | } |
2871 | deferred_flush[i].next = 0; | 3100 | deferred_flush[i].next = 0; |
2872 | } | 3101 | } |
@@ -2883,7 +3112,7 @@ static void flush_unmaps_timeout(unsigned long data) | |||
2883 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); | 3112 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); |
2884 | } | 3113 | } |
2885 | 3114 | ||
2886 | static void add_unmap(struct dmar_domain *dom, struct iova *iova) | 3115 | static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist) |
2887 | { | 3116 | { |
2888 | unsigned long flags; | 3117 | unsigned long flags; |
2889 | int next, iommu_id; | 3118 | int next, iommu_id; |
@@ -2899,6 +3128,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) | |||
2899 | next = deferred_flush[iommu_id].next; | 3128 | next = deferred_flush[iommu_id].next; |
2900 | deferred_flush[iommu_id].domain[next] = dom; | 3129 | deferred_flush[iommu_id].domain[next] = dom; |
2901 | deferred_flush[iommu_id].iova[next] = iova; | 3130 | deferred_flush[iommu_id].iova[next] = iova; |
3131 | deferred_flush[iommu_id].freelist[next] = freelist; | ||
2902 | deferred_flush[iommu_id].next++; | 3132 | deferred_flush[iommu_id].next++; |
2903 | 3133 | ||
2904 | if (!timer_on) { | 3134 | if (!timer_on) { |
@@ -2913,16 +3143,16 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
2913 | size_t size, enum dma_data_direction dir, | 3143 | size_t size, enum dma_data_direction dir, |
2914 | struct dma_attrs *attrs) | 3144 | struct dma_attrs *attrs) |
2915 | { | 3145 | { |
2916 | struct pci_dev *pdev = to_pci_dev(dev); | ||
2917 | struct dmar_domain *domain; | 3146 | struct dmar_domain *domain; |
2918 | unsigned long start_pfn, last_pfn; | 3147 | unsigned long start_pfn, last_pfn; |
2919 | struct iova *iova; | 3148 | struct iova *iova; |
2920 | struct intel_iommu *iommu; | 3149 | struct intel_iommu *iommu; |
3150 | struct page *freelist; | ||
2921 | 3151 | ||
2922 | if (iommu_no_mapping(dev)) | 3152 | if (iommu_no_mapping(dev)) |
2923 | return; | 3153 | return; |
2924 | 3154 | ||
2925 | domain = find_domain(pdev); | 3155 | domain = find_domain(dev); |
2926 | BUG_ON(!domain); | 3156 | BUG_ON(!domain); |
2927 | 3157 | ||
2928 | iommu = domain_get_iommu(domain); | 3158 | iommu = domain_get_iommu(domain); |
@@ -2936,21 +3166,18 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
2936 | last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; | 3166 | last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; |
2937 | 3167 | ||
2938 | pr_debug("Device %s unmapping: pfn %lx-%lx\n", | 3168 | pr_debug("Device %s unmapping: pfn %lx-%lx\n", |
2939 | pci_name(pdev), start_pfn, last_pfn); | 3169 | dev_name(dev), start_pfn, last_pfn); |
2940 | 3170 | ||
2941 | /* clear the whole page */ | 3171 | freelist = domain_unmap(domain, start_pfn, last_pfn); |
2942 | dma_pte_clear_range(domain, start_pfn, last_pfn); | ||
2943 | |||
2944 | /* free page tables */ | ||
2945 | dma_pte_free_pagetable(domain, start_pfn, last_pfn); | ||
2946 | 3172 | ||
2947 | if (intel_iommu_strict) { | 3173 | if (intel_iommu_strict) { |
2948 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, | 3174 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, |
2949 | last_pfn - start_pfn + 1, 0); | 3175 | last_pfn - start_pfn + 1, !freelist, 0); |
2950 | /* free iova */ | 3176 | /* free iova */ |
2951 | __free_iova(&domain->iovad, iova); | 3177 | __free_iova(&domain->iovad, iova); |
3178 | dma_free_pagelist(freelist); | ||
2952 | } else { | 3179 | } else { |
2953 | add_unmap(domain, iova); | 3180 | add_unmap(domain, iova, freelist); |
2954 | /* | 3181 | /* |
2955 | * queue up the release of the unmap to save the 1/6th of the | 3182 | * queue up the release of the unmap to save the 1/6th of the |
2956 | * cpu used up by the iotlb flush operation... | 3183 | * cpu used up by the iotlb flush operation... |
@@ -2958,7 +3185,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
2958 | } | 3185 | } |
2959 | } | 3186 | } |
2960 | 3187 | ||
2961 | static void *intel_alloc_coherent(struct device *hwdev, size_t size, | 3188 | static void *intel_alloc_coherent(struct device *dev, size_t size, |
2962 | dma_addr_t *dma_handle, gfp_t flags, | 3189 | dma_addr_t *dma_handle, gfp_t flags, |
2963 | struct dma_attrs *attrs) | 3190 | struct dma_attrs *attrs) |
2964 | { | 3191 | { |
@@ -2968,10 +3195,10 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size, | |||
2968 | size = PAGE_ALIGN(size); | 3195 | size = PAGE_ALIGN(size); |
2969 | order = get_order(size); | 3196 | order = get_order(size); |
2970 | 3197 | ||
2971 | if (!iommu_no_mapping(hwdev)) | 3198 | if (!iommu_no_mapping(dev)) |
2972 | flags &= ~(GFP_DMA | GFP_DMA32); | 3199 | flags &= ~(GFP_DMA | GFP_DMA32); |
2973 | else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) { | 3200 | else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) { |
2974 | if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32)) | 3201 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) |
2975 | flags |= GFP_DMA; | 3202 | flags |= GFP_DMA; |
2976 | else | 3203 | else |
2977 | flags |= GFP_DMA32; | 3204 | flags |= GFP_DMA32; |
@@ -2982,16 +3209,16 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size, | |||
2982 | return NULL; | 3209 | return NULL; |
2983 | memset(vaddr, 0, size); | 3210 | memset(vaddr, 0, size); |
2984 | 3211 | ||
2985 | *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size, | 3212 | *dma_handle = __intel_map_single(dev, virt_to_bus(vaddr), size, |
2986 | DMA_BIDIRECTIONAL, | 3213 | DMA_BIDIRECTIONAL, |
2987 | hwdev->coherent_dma_mask); | 3214 | dev->coherent_dma_mask); |
2988 | if (*dma_handle) | 3215 | if (*dma_handle) |
2989 | return vaddr; | 3216 | return vaddr; |
2990 | free_pages((unsigned long)vaddr, order); | 3217 | free_pages((unsigned long)vaddr, order); |
2991 | return NULL; | 3218 | return NULL; |
2992 | } | 3219 | } |
2993 | 3220 | ||
2994 | static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, | 3221 | static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, |
2995 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 3222 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
2996 | { | 3223 | { |
2997 | int order; | 3224 | int order; |
@@ -2999,24 +3226,24 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
2999 | size = PAGE_ALIGN(size); | 3226 | size = PAGE_ALIGN(size); |
3000 | order = get_order(size); | 3227 | order = get_order(size); |
3001 | 3228 | ||
3002 | intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); | 3229 | intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); |
3003 | free_pages((unsigned long)vaddr, order); | 3230 | free_pages((unsigned long)vaddr, order); |
3004 | } | 3231 | } |
3005 | 3232 | ||
3006 | static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | 3233 | static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, |
3007 | int nelems, enum dma_data_direction dir, | 3234 | int nelems, enum dma_data_direction dir, |
3008 | struct dma_attrs *attrs) | 3235 | struct dma_attrs *attrs) |
3009 | { | 3236 | { |
3010 | struct pci_dev *pdev = to_pci_dev(hwdev); | ||
3011 | struct dmar_domain *domain; | 3237 | struct dmar_domain *domain; |
3012 | unsigned long start_pfn, last_pfn; | 3238 | unsigned long start_pfn, last_pfn; |
3013 | struct iova *iova; | 3239 | struct iova *iova; |
3014 | struct intel_iommu *iommu; | 3240 | struct intel_iommu *iommu; |
3241 | struct page *freelist; | ||
3015 | 3242 | ||
3016 | if (iommu_no_mapping(hwdev)) | 3243 | if (iommu_no_mapping(dev)) |
3017 | return; | 3244 | return; |
3018 | 3245 | ||
3019 | domain = find_domain(pdev); | 3246 | domain = find_domain(dev); |
3020 | BUG_ON(!domain); | 3247 | BUG_ON(!domain); |
3021 | 3248 | ||
3022 | iommu = domain_get_iommu(domain); | 3249 | iommu = domain_get_iommu(domain); |
@@ -3029,19 +3256,16 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
3029 | start_pfn = mm_to_dma_pfn(iova->pfn_lo); | 3256 | start_pfn = mm_to_dma_pfn(iova->pfn_lo); |
3030 | last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; | 3257 | last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; |
3031 | 3258 | ||
3032 | /* clear the whole page */ | 3259 | freelist = domain_unmap(domain, start_pfn, last_pfn); |
3033 | dma_pte_clear_range(domain, start_pfn, last_pfn); | ||
3034 | |||
3035 | /* free page tables */ | ||
3036 | dma_pte_free_pagetable(domain, start_pfn, last_pfn); | ||
3037 | 3260 | ||
3038 | if (intel_iommu_strict) { | 3261 | if (intel_iommu_strict) { |
3039 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, | 3262 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, |
3040 | last_pfn - start_pfn + 1, 0); | 3263 | last_pfn - start_pfn + 1, !freelist, 0); |
3041 | /* free iova */ | 3264 | /* free iova */ |
3042 | __free_iova(&domain->iovad, iova); | 3265 | __free_iova(&domain->iovad, iova); |
3266 | dma_free_pagelist(freelist); | ||
3043 | } else { | 3267 | } else { |
3044 | add_unmap(domain, iova); | 3268 | add_unmap(domain, iova, freelist); |
3045 | /* | 3269 | /* |
3046 | * queue up the release of the unmap to save the 1/6th of the | 3270 | * queue up the release of the unmap to save the 1/6th of the |
3047 | * cpu used up by the iotlb flush operation... | 3271 | * cpu used up by the iotlb flush operation... |
@@ -3063,11 +3287,10 @@ static int intel_nontranslate_map_sg(struct device *hddev, | |||
3063 | return nelems; | 3287 | return nelems; |
3064 | } | 3288 | } |
3065 | 3289 | ||
3066 | static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | 3290 | static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, |
3067 | enum dma_data_direction dir, struct dma_attrs *attrs) | 3291 | enum dma_data_direction dir, struct dma_attrs *attrs) |
3068 | { | 3292 | { |
3069 | int i; | 3293 | int i; |
3070 | struct pci_dev *pdev = to_pci_dev(hwdev); | ||
3071 | struct dmar_domain *domain; | 3294 | struct dmar_domain *domain; |
3072 | size_t size = 0; | 3295 | size_t size = 0; |
3073 | int prot = 0; | 3296 | int prot = 0; |
@@ -3078,10 +3301,10 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
3078 | struct intel_iommu *iommu; | 3301 | struct intel_iommu *iommu; |
3079 | 3302 | ||
3080 | BUG_ON(dir == DMA_NONE); | 3303 | BUG_ON(dir == DMA_NONE); |
3081 | if (iommu_no_mapping(hwdev)) | 3304 | if (iommu_no_mapping(dev)) |
3082 | return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); | 3305 | return intel_nontranslate_map_sg(dev, sglist, nelems, dir); |
3083 | 3306 | ||
3084 | domain = get_valid_domain_for_dev(pdev); | 3307 | domain = get_valid_domain_for_dev(dev); |
3085 | if (!domain) | 3308 | if (!domain) |
3086 | return 0; | 3309 | return 0; |
3087 | 3310 | ||
@@ -3090,8 +3313,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
3090 | for_each_sg(sglist, sg, nelems, i) | 3313 | for_each_sg(sglist, sg, nelems, i) |
3091 | size += aligned_nrpages(sg->offset, sg->length); | 3314 | size += aligned_nrpages(sg->offset, sg->length); |
3092 | 3315 | ||
3093 | iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), | 3316 | iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), |
3094 | pdev->dma_mask); | 3317 | *dev->dma_mask); |
3095 | if (!iova) { | 3318 | if (!iova) { |
3096 | sglist->dma_length = 0; | 3319 | sglist->dma_length = 0; |
3097 | return 0; | 3320 | return 0; |
@@ -3124,7 +3347,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
3124 | 3347 | ||
3125 | /* it's a non-present to present mapping. Only flush if caching mode */ | 3348 | /* it's a non-present to present mapping. Only flush if caching mode */ |
3126 | if (cap_caching_mode(iommu->cap)) | 3349 | if (cap_caching_mode(iommu->cap)) |
3127 | iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1); | 3350 | iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1); |
3128 | else | 3351 | else |
3129 | iommu_flush_write_buffer(iommu); | 3352 | iommu_flush_write_buffer(iommu); |
3130 | 3353 | ||
@@ -3259,29 +3482,28 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quir | |||
3259 | static void __init init_no_remapping_devices(void) | 3482 | static void __init init_no_remapping_devices(void) |
3260 | { | 3483 | { |
3261 | struct dmar_drhd_unit *drhd; | 3484 | struct dmar_drhd_unit *drhd; |
3485 | struct device *dev; | ||
3486 | int i; | ||
3262 | 3487 | ||
3263 | for_each_drhd_unit(drhd) { | 3488 | for_each_drhd_unit(drhd) { |
3264 | if (!drhd->include_all) { | 3489 | if (!drhd->include_all) { |
3265 | int i; | 3490 | for_each_active_dev_scope(drhd->devices, |
3266 | for (i = 0; i < drhd->devices_cnt; i++) | 3491 | drhd->devices_cnt, i, dev) |
3267 | if (drhd->devices[i] != NULL) | 3492 | break; |
3268 | break; | 3493 | /* ignore DMAR unit if no devices exist */ |
3269 | /* ignore DMAR unit if no pci devices exist */ | ||
3270 | if (i == drhd->devices_cnt) | 3494 | if (i == drhd->devices_cnt) |
3271 | drhd->ignored = 1; | 3495 | drhd->ignored = 1; |
3272 | } | 3496 | } |
3273 | } | 3497 | } |
3274 | 3498 | ||
3275 | for_each_active_drhd_unit(drhd) { | 3499 | for_each_active_drhd_unit(drhd) { |
3276 | int i; | ||
3277 | if (drhd->include_all) | 3500 | if (drhd->include_all) |
3278 | continue; | 3501 | continue; |
3279 | 3502 | ||
3280 | for (i = 0; i < drhd->devices_cnt; i++) | 3503 | for_each_active_dev_scope(drhd->devices, |
3281 | if (drhd->devices[i] && | 3504 | drhd->devices_cnt, i, dev) |
3282 | !IS_GFX_DEVICE(drhd->devices[i])) | 3505 | if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev))) |
3283 | break; | 3506 | break; |
3284 | |||
3285 | if (i < drhd->devices_cnt) | 3507 | if (i < drhd->devices_cnt) |
3286 | continue; | 3508 | continue; |
3287 | 3509 | ||
@@ -3291,11 +3513,9 @@ static void __init init_no_remapping_devices(void) | |||
3291 | intel_iommu_gfx_mapped = 1; | 3513 | intel_iommu_gfx_mapped = 1; |
3292 | } else { | 3514 | } else { |
3293 | drhd->ignored = 1; | 3515 | drhd->ignored = 1; |
3294 | for (i = 0; i < drhd->devices_cnt; i++) { | 3516 | for_each_active_dev_scope(drhd->devices, |
3295 | if (!drhd->devices[i]) | 3517 | drhd->devices_cnt, i, dev) |
3296 | continue; | 3518 | dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; |
3297 | drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; | ||
3298 | } | ||
3299 | } | 3519 | } |
3300 | } | 3520 | } |
3301 | } | 3521 | } |
@@ -3438,13 +3658,6 @@ static void __init init_iommu_pm_ops(void) | |||
3438 | static inline void init_iommu_pm_ops(void) {} | 3658 | static inline void init_iommu_pm_ops(void) {} |
3439 | #endif /* CONFIG_PM */ | 3659 | #endif /* CONFIG_PM */ |
3440 | 3660 | ||
3441 | LIST_HEAD(dmar_rmrr_units); | ||
3442 | |||
3443 | static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) | ||
3444 | { | ||
3445 | list_add(&rmrr->list, &dmar_rmrr_units); | ||
3446 | } | ||
3447 | |||
3448 | 3661 | ||
3449 | int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) | 3662 | int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) |
3450 | { | 3663 | { |
@@ -3459,25 +3672,19 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) | |||
3459 | rmrr = (struct acpi_dmar_reserved_memory *)header; | 3672 | rmrr = (struct acpi_dmar_reserved_memory *)header; |
3460 | rmrru->base_address = rmrr->base_address; | 3673 | rmrru->base_address = rmrr->base_address; |
3461 | rmrru->end_address = rmrr->end_address; | 3674 | rmrru->end_address = rmrr->end_address; |
3675 | rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), | ||
3676 | ((void *)rmrr) + rmrr->header.length, | ||
3677 | &rmrru->devices_cnt); | ||
3678 | if (rmrru->devices_cnt && rmrru->devices == NULL) { | ||
3679 | kfree(rmrru); | ||
3680 | return -ENOMEM; | ||
3681 | } | ||
3462 | 3682 | ||
3463 | dmar_register_rmrr_unit(rmrru); | 3683 | list_add(&rmrru->list, &dmar_rmrr_units); |
3464 | return 0; | ||
3465 | } | ||
3466 | |||
3467 | static int __init | ||
3468 | rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) | ||
3469 | { | ||
3470 | struct acpi_dmar_reserved_memory *rmrr; | ||
3471 | 3684 | ||
3472 | rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; | 3685 | return 0; |
3473 | return dmar_parse_dev_scope((void *)(rmrr + 1), | ||
3474 | ((void *)rmrr) + rmrr->header.length, | ||
3475 | &rmrru->devices_cnt, &rmrru->devices, | ||
3476 | rmrr->segment); | ||
3477 | } | 3686 | } |
3478 | 3687 | ||
3479 | static LIST_HEAD(dmar_atsr_units); | ||
3480 | |||
3481 | int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) | 3688 | int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) |
3482 | { | 3689 | { |
3483 | struct acpi_dmar_atsr *atsr; | 3690 | struct acpi_dmar_atsr *atsr; |
@@ -3490,26 +3697,21 @@ int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) | |||
3490 | 3697 | ||
3491 | atsru->hdr = hdr; | 3698 | atsru->hdr = hdr; |
3492 | atsru->include_all = atsr->flags & 0x1; | 3699 | atsru->include_all = atsr->flags & 0x1; |
3700 | if (!atsru->include_all) { | ||
3701 | atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1), | ||
3702 | (void *)atsr + atsr->header.length, | ||
3703 | &atsru->devices_cnt); | ||
3704 | if (atsru->devices_cnt && atsru->devices == NULL) { | ||
3705 | kfree(atsru); | ||
3706 | return -ENOMEM; | ||
3707 | } | ||
3708 | } | ||
3493 | 3709 | ||
3494 | list_add(&atsru->list, &dmar_atsr_units); | 3710 | list_add_rcu(&atsru->list, &dmar_atsr_units); |
3495 | 3711 | ||
3496 | return 0; | 3712 | return 0; |
3497 | } | 3713 | } |
3498 | 3714 | ||
3499 | static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru) | ||
3500 | { | ||
3501 | struct acpi_dmar_atsr *atsr; | ||
3502 | |||
3503 | if (atsru->include_all) | ||
3504 | return 0; | ||
3505 | |||
3506 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); | ||
3507 | return dmar_parse_dev_scope((void *)(atsr + 1), | ||
3508 | (void *)atsr + atsr->header.length, | ||
3509 | &atsru->devices_cnt, &atsru->devices, | ||
3510 | atsr->segment); | ||
3511 | } | ||
3512 | |||
3513 | static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru) | 3715 | static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru) |
3514 | { | 3716 | { |
3515 | dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt); | 3717 | dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt); |
@@ -3535,62 +3737,97 @@ static void intel_iommu_free_dmars(void) | |||
3535 | 3737 | ||
3536 | int dmar_find_matched_atsr_unit(struct pci_dev *dev) | 3738 | int dmar_find_matched_atsr_unit(struct pci_dev *dev) |
3537 | { | 3739 | { |
3538 | int i; | 3740 | int i, ret = 1; |
3539 | struct pci_bus *bus; | 3741 | struct pci_bus *bus; |
3742 | struct pci_dev *bridge = NULL; | ||
3743 | struct device *tmp; | ||
3540 | struct acpi_dmar_atsr *atsr; | 3744 | struct acpi_dmar_atsr *atsr; |
3541 | struct dmar_atsr_unit *atsru; | 3745 | struct dmar_atsr_unit *atsru; |
3542 | 3746 | ||
3543 | dev = pci_physfn(dev); | 3747 | dev = pci_physfn(dev); |
3544 | |||
3545 | list_for_each_entry(atsru, &dmar_atsr_units, list) { | ||
3546 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); | ||
3547 | if (atsr->segment == pci_domain_nr(dev->bus)) | ||
3548 | goto found; | ||
3549 | } | ||
3550 | |||
3551 | return 0; | ||
3552 | |||
3553 | found: | ||
3554 | for (bus = dev->bus; bus; bus = bus->parent) { | 3748 | for (bus = dev->bus; bus; bus = bus->parent) { |
3555 | struct pci_dev *bridge = bus->self; | 3749 | bridge = bus->self; |
3556 | |||
3557 | if (!bridge || !pci_is_pcie(bridge) || | 3750 | if (!bridge || !pci_is_pcie(bridge) || |
3558 | pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) | 3751 | pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) |
3559 | return 0; | 3752 | return 0; |
3560 | 3753 | if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) | |
3561 | if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) { | ||
3562 | for (i = 0; i < atsru->devices_cnt; i++) | ||
3563 | if (atsru->devices[i] == bridge) | ||
3564 | return 1; | ||
3565 | break; | 3754 | break; |
3566 | } | ||
3567 | } | 3755 | } |
3756 | if (!bridge) | ||
3757 | return 0; | ||
3568 | 3758 | ||
3569 | if (atsru->include_all) | 3759 | rcu_read_lock(); |
3570 | return 1; | 3760 | list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) { |
3761 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); | ||
3762 | if (atsr->segment != pci_domain_nr(dev->bus)) | ||
3763 | continue; | ||
3571 | 3764 | ||
3572 | return 0; | 3765 | for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp) |
3766 | if (tmp == &bridge->dev) | ||
3767 | goto out; | ||
3768 | |||
3769 | if (atsru->include_all) | ||
3770 | goto out; | ||
3771 | } | ||
3772 | ret = 0; | ||
3773 | out: | ||
3774 | rcu_read_unlock(); | ||
3775 | |||
3776 | return ret; | ||
3573 | } | 3777 | } |
3574 | 3778 | ||
3575 | int __init dmar_parse_rmrr_atsr_dev(void) | 3779 | int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) |
3576 | { | 3780 | { |
3577 | struct dmar_rmrr_unit *rmrr; | ||
3578 | struct dmar_atsr_unit *atsr; | ||
3579 | int ret = 0; | 3781 | int ret = 0; |
3782 | struct dmar_rmrr_unit *rmrru; | ||
3783 | struct dmar_atsr_unit *atsru; | ||
3784 | struct acpi_dmar_atsr *atsr; | ||
3785 | struct acpi_dmar_reserved_memory *rmrr; | ||
3580 | 3786 | ||
3581 | list_for_each_entry(rmrr, &dmar_rmrr_units, list) { | 3787 | if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING) |
3582 | ret = rmrr_parse_dev(rmrr); | 3788 | return 0; |
3583 | if (ret) | 3789 | |
3584 | return ret; | 3790 | list_for_each_entry(rmrru, &dmar_rmrr_units, list) { |
3791 | rmrr = container_of(rmrru->hdr, | ||
3792 | struct acpi_dmar_reserved_memory, header); | ||
3793 | if (info->event == BUS_NOTIFY_ADD_DEVICE) { | ||
3794 | ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1), | ||
3795 | ((void *)rmrr) + rmrr->header.length, | ||
3796 | rmrr->segment, rmrru->devices, | ||
3797 | rmrru->devices_cnt); | ||
3798 | if (ret > 0) | ||
3799 | break; | ||
3800 | else if(ret < 0) | ||
3801 | return ret; | ||
3802 | } else if (info->event == BUS_NOTIFY_DEL_DEVICE) { | ||
3803 | if (dmar_remove_dev_scope(info, rmrr->segment, | ||
3804 | rmrru->devices, rmrru->devices_cnt)) | ||
3805 | break; | ||
3806 | } | ||
3585 | } | 3807 | } |
3586 | 3808 | ||
3587 | list_for_each_entry(atsr, &dmar_atsr_units, list) { | 3809 | list_for_each_entry(atsru, &dmar_atsr_units, list) { |
3588 | ret = atsr_parse_dev(atsr); | 3810 | if (atsru->include_all) |
3589 | if (ret) | 3811 | continue; |
3590 | return ret; | 3812 | |
3813 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); | ||
3814 | if (info->event == BUS_NOTIFY_ADD_DEVICE) { | ||
3815 | ret = dmar_insert_dev_scope(info, (void *)(atsr + 1), | ||
3816 | (void *)atsr + atsr->header.length, | ||
3817 | atsr->segment, atsru->devices, | ||
3818 | atsru->devices_cnt); | ||
3819 | if (ret > 0) | ||
3820 | break; | ||
3821 | else if(ret < 0) | ||
3822 | return ret; | ||
3823 | } else if (info->event == BUS_NOTIFY_DEL_DEVICE) { | ||
3824 | if (dmar_remove_dev_scope(info, atsr->segment, | ||
3825 | atsru->devices, atsru->devices_cnt)) | ||
3826 | break; | ||
3827 | } | ||
3591 | } | 3828 | } |
3592 | 3829 | ||
3593 | return ret; | 3830 | return 0; |
3594 | } | 3831 | } |
3595 | 3832 | ||
3596 | /* | 3833 | /* |
@@ -3603,24 +3840,26 @@ static int device_notifier(struct notifier_block *nb, | |||
3603 | unsigned long action, void *data) | 3840 | unsigned long action, void *data) |
3604 | { | 3841 | { |
3605 | struct device *dev = data; | 3842 | struct device *dev = data; |
3606 | struct pci_dev *pdev = to_pci_dev(dev); | ||
3607 | struct dmar_domain *domain; | 3843 | struct dmar_domain *domain; |
3608 | 3844 | ||
3609 | if (iommu_no_mapping(dev)) | 3845 | if (iommu_dummy(dev)) |
3610 | return 0; | 3846 | return 0; |
3611 | 3847 | ||
3612 | domain = find_domain(pdev); | 3848 | if (action != BUS_NOTIFY_UNBOUND_DRIVER && |
3613 | if (!domain) | 3849 | action != BUS_NOTIFY_DEL_DEVICE) |
3614 | return 0; | 3850 | return 0; |
3615 | 3851 | ||
3616 | if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) { | 3852 | domain = find_domain(dev); |
3617 | domain_remove_one_dev_info(domain, pdev); | 3853 | if (!domain) |
3854 | return 0; | ||
3618 | 3855 | ||
3619 | if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && | 3856 | down_read(&dmar_global_lock); |
3620 | !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && | 3857 | domain_remove_one_dev_info(domain, dev); |
3621 | list_empty(&domain->devices)) | 3858 | if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && |
3622 | domain_exit(domain); | 3859 | !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && |
3623 | } | 3860 | list_empty(&domain->devices)) |
3861 | domain_exit(domain); | ||
3862 | up_read(&dmar_global_lock); | ||
3624 | 3863 | ||
3625 | return 0; | 3864 | return 0; |
3626 | } | 3865 | } |
@@ -3629,6 +3868,75 @@ static struct notifier_block device_nb = { | |||
3629 | .notifier_call = device_notifier, | 3868 | .notifier_call = device_notifier, |
3630 | }; | 3869 | }; |
3631 | 3870 | ||
3871 | static int intel_iommu_memory_notifier(struct notifier_block *nb, | ||
3872 | unsigned long val, void *v) | ||
3873 | { | ||
3874 | struct memory_notify *mhp = v; | ||
3875 | unsigned long long start, end; | ||
3876 | unsigned long start_vpfn, last_vpfn; | ||
3877 | |||
3878 | switch (val) { | ||
3879 | case MEM_GOING_ONLINE: | ||
3880 | start = mhp->start_pfn << PAGE_SHIFT; | ||
3881 | end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1; | ||
3882 | if (iommu_domain_identity_map(si_domain, start, end)) { | ||
3883 | pr_warn("dmar: failed to build identity map for [%llx-%llx]\n", | ||
3884 | start, end); | ||
3885 | return NOTIFY_BAD; | ||
3886 | } | ||
3887 | break; | ||
3888 | |||
3889 | case MEM_OFFLINE: | ||
3890 | case MEM_CANCEL_ONLINE: | ||
3891 | start_vpfn = mm_to_dma_pfn(mhp->start_pfn); | ||
3892 | last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1); | ||
3893 | while (start_vpfn <= last_vpfn) { | ||
3894 | struct iova *iova; | ||
3895 | struct dmar_drhd_unit *drhd; | ||
3896 | struct intel_iommu *iommu; | ||
3897 | struct page *freelist; | ||
3898 | |||
3899 | iova = find_iova(&si_domain->iovad, start_vpfn); | ||
3900 | if (iova == NULL) { | ||
3901 | pr_debug("dmar: failed get IOVA for PFN %lx\n", | ||
3902 | start_vpfn); | ||
3903 | break; | ||
3904 | } | ||
3905 | |||
3906 | iova = split_and_remove_iova(&si_domain->iovad, iova, | ||
3907 | start_vpfn, last_vpfn); | ||
3908 | if (iova == NULL) { | ||
3909 | pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n", | ||
3910 | start_vpfn, last_vpfn); | ||
3911 | return NOTIFY_BAD; | ||
3912 | } | ||
3913 | |||
3914 | freelist = domain_unmap(si_domain, iova->pfn_lo, | ||
3915 | iova->pfn_hi); | ||
3916 | |||
3917 | rcu_read_lock(); | ||
3918 | for_each_active_iommu(iommu, drhd) | ||
3919 | iommu_flush_iotlb_psi(iommu, si_domain->id, | ||
3920 | iova->pfn_lo, | ||
3921 | iova->pfn_hi - iova->pfn_lo + 1, | ||
3922 | !freelist, 0); | ||
3923 | rcu_read_unlock(); | ||
3924 | dma_free_pagelist(freelist); | ||
3925 | |||
3926 | start_vpfn = iova->pfn_hi + 1; | ||
3927 | free_iova_mem(iova); | ||
3928 | } | ||
3929 | break; | ||
3930 | } | ||
3931 | |||
3932 | return NOTIFY_OK; | ||
3933 | } | ||
3934 | |||
3935 | static struct notifier_block intel_iommu_memory_nb = { | ||
3936 | .notifier_call = intel_iommu_memory_notifier, | ||
3937 | .priority = 0 | ||
3938 | }; | ||
3939 | |||
3632 | int __init intel_iommu_init(void) | 3940 | int __init intel_iommu_init(void) |
3633 | { | 3941 | { |
3634 | int ret = -ENODEV; | 3942 | int ret = -ENODEV; |
@@ -3638,6 +3946,13 @@ int __init intel_iommu_init(void) | |||
3638 | /* VT-d is required for a TXT/tboot launch, so enforce that */ | 3946 | /* VT-d is required for a TXT/tboot launch, so enforce that */ |
3639 | force_on = tboot_force_iommu(); | 3947 | force_on = tboot_force_iommu(); |
3640 | 3948 | ||
3949 | if (iommu_init_mempool()) { | ||
3950 | if (force_on) | ||
3951 | panic("tboot: Failed to initialize iommu memory\n"); | ||
3952 | return -ENOMEM; | ||
3953 | } | ||
3954 | |||
3955 | down_write(&dmar_global_lock); | ||
3641 | if (dmar_table_init()) { | 3956 | if (dmar_table_init()) { |
3642 | if (force_on) | 3957 | if (force_on) |
3643 | panic("tboot: Failed to initialize DMAR table\n"); | 3958 | panic("tboot: Failed to initialize DMAR table\n"); |
@@ -3660,12 +3975,6 @@ int __init intel_iommu_init(void) | |||
3660 | if (no_iommu || dmar_disabled) | 3975 | if (no_iommu || dmar_disabled) |
3661 | goto out_free_dmar; | 3976 | goto out_free_dmar; |
3662 | 3977 | ||
3663 | if (iommu_init_mempool()) { | ||
3664 | if (force_on) | ||
3665 | panic("tboot: Failed to initialize iommu memory\n"); | ||
3666 | goto out_free_dmar; | ||
3667 | } | ||
3668 | |||
3669 | if (list_empty(&dmar_rmrr_units)) | 3978 | if (list_empty(&dmar_rmrr_units)) |
3670 | printk(KERN_INFO "DMAR: No RMRR found\n"); | 3979 | printk(KERN_INFO "DMAR: No RMRR found\n"); |
3671 | 3980 | ||
@@ -3675,7 +3984,7 @@ int __init intel_iommu_init(void) | |||
3675 | if (dmar_init_reserved_ranges()) { | 3984 | if (dmar_init_reserved_ranges()) { |
3676 | if (force_on) | 3985 | if (force_on) |
3677 | panic("tboot: Failed to reserve iommu ranges\n"); | 3986 | panic("tboot: Failed to reserve iommu ranges\n"); |
3678 | goto out_free_mempool; | 3987 | goto out_free_reserved_range; |
3679 | } | 3988 | } |
3680 | 3989 | ||
3681 | init_no_remapping_devices(); | 3990 | init_no_remapping_devices(); |
@@ -3687,6 +3996,7 @@ int __init intel_iommu_init(void) | |||
3687 | printk(KERN_ERR "IOMMU: dmar init failed\n"); | 3996 | printk(KERN_ERR "IOMMU: dmar init failed\n"); |
3688 | goto out_free_reserved_range; | 3997 | goto out_free_reserved_range; |
3689 | } | 3998 | } |
3999 | up_write(&dmar_global_lock); | ||
3690 | printk(KERN_INFO | 4000 | printk(KERN_INFO |
3691 | "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); | 4001 | "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); |
3692 | 4002 | ||
@@ -3699,8 +4009,9 @@ int __init intel_iommu_init(void) | |||
3699 | init_iommu_pm_ops(); | 4009 | init_iommu_pm_ops(); |
3700 | 4010 | ||
3701 | bus_set_iommu(&pci_bus_type, &intel_iommu_ops); | 4011 | bus_set_iommu(&pci_bus_type, &intel_iommu_ops); |
3702 | |||
3703 | bus_register_notifier(&pci_bus_type, &device_nb); | 4012 | bus_register_notifier(&pci_bus_type, &device_nb); |
4013 | if (si_domain && !hw_pass_through) | ||
4014 | register_memory_notifier(&intel_iommu_memory_nb); | ||
3704 | 4015 | ||
3705 | intel_iommu_enabled = 1; | 4016 | intel_iommu_enabled = 1; |
3706 | 4017 | ||
@@ -3708,21 +4019,23 @@ int __init intel_iommu_init(void) | |||
3708 | 4019 | ||
3709 | out_free_reserved_range: | 4020 | out_free_reserved_range: |
3710 | put_iova_domain(&reserved_iova_list); | 4021 | put_iova_domain(&reserved_iova_list); |
3711 | out_free_mempool: | ||
3712 | iommu_exit_mempool(); | ||
3713 | out_free_dmar: | 4022 | out_free_dmar: |
3714 | intel_iommu_free_dmars(); | 4023 | intel_iommu_free_dmars(); |
4024 | up_write(&dmar_global_lock); | ||
4025 | iommu_exit_mempool(); | ||
3715 | return ret; | 4026 | return ret; |
3716 | } | 4027 | } |
3717 | 4028 | ||
3718 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, | 4029 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, |
3719 | struct pci_dev *pdev) | 4030 | struct device *dev) |
3720 | { | 4031 | { |
3721 | struct pci_dev *tmp, *parent; | 4032 | struct pci_dev *tmp, *parent, *pdev; |
3722 | 4033 | ||
3723 | if (!iommu || !pdev) | 4034 | if (!iommu || !dev || !dev_is_pci(dev)) |
3724 | return; | 4035 | return; |
3725 | 4036 | ||
4037 | pdev = to_pci_dev(dev); | ||
4038 | |||
3726 | /* dependent device detach */ | 4039 | /* dependent device detach */ |
3727 | tmp = pci_find_upstream_pcie_bridge(pdev); | 4040 | tmp = pci_find_upstream_pcie_bridge(pdev); |
3728 | /* Secondary interface's bus number and devfn 0 */ | 4041 | /* Secondary interface's bus number and devfn 0 */ |
@@ -3743,29 +4056,28 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu, | |||
3743 | } | 4056 | } |
3744 | 4057 | ||
3745 | static void domain_remove_one_dev_info(struct dmar_domain *domain, | 4058 | static void domain_remove_one_dev_info(struct dmar_domain *domain, |
3746 | struct pci_dev *pdev) | 4059 | struct device *dev) |
3747 | { | 4060 | { |
3748 | struct device_domain_info *info, *tmp; | 4061 | struct device_domain_info *info, *tmp; |
3749 | struct intel_iommu *iommu; | 4062 | struct intel_iommu *iommu; |
3750 | unsigned long flags; | 4063 | unsigned long flags; |
3751 | int found = 0; | 4064 | int found = 0; |
4065 | u8 bus, devfn; | ||
3752 | 4066 | ||
3753 | iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, | 4067 | iommu = device_to_iommu(dev, &bus, &devfn); |
3754 | pdev->devfn); | ||
3755 | if (!iommu) | 4068 | if (!iommu) |
3756 | return; | 4069 | return; |
3757 | 4070 | ||
3758 | spin_lock_irqsave(&device_domain_lock, flags); | 4071 | spin_lock_irqsave(&device_domain_lock, flags); |
3759 | list_for_each_entry_safe(info, tmp, &domain->devices, link) { | 4072 | list_for_each_entry_safe(info, tmp, &domain->devices, link) { |
3760 | if (info->segment == pci_domain_nr(pdev->bus) && | 4073 | if (info->iommu == iommu && info->bus == bus && |
3761 | info->bus == pdev->bus->number && | 4074 | info->devfn == devfn) { |
3762 | info->devfn == pdev->devfn) { | ||
3763 | unlink_domain_info(info); | 4075 | unlink_domain_info(info); |
3764 | spin_unlock_irqrestore(&device_domain_lock, flags); | 4076 | spin_unlock_irqrestore(&device_domain_lock, flags); |
3765 | 4077 | ||
3766 | iommu_disable_dev_iotlb(info); | 4078 | iommu_disable_dev_iotlb(info); |
3767 | iommu_detach_dev(iommu, info->bus, info->devfn); | 4079 | iommu_detach_dev(iommu, info->bus, info->devfn); |
3768 | iommu_detach_dependent_devices(iommu, pdev); | 4080 | iommu_detach_dependent_devices(iommu, dev); |
3769 | free_devinfo_mem(info); | 4081 | free_devinfo_mem(info); |
3770 | 4082 | ||
3771 | spin_lock_irqsave(&device_domain_lock, flags); | 4083 | spin_lock_irqsave(&device_domain_lock, flags); |
@@ -3780,8 +4092,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, | |||
3780 | * owned by this domain, clear this iommu in iommu_bmp | 4092 | * owned by this domain, clear this iommu in iommu_bmp |
3781 | * update iommu count and coherency | 4093 | * update iommu count and coherency |
3782 | */ | 4094 | */ |
3783 | if (iommu == device_to_iommu(info->segment, info->bus, | 4095 | if (info->iommu == iommu) |
3784 | info->devfn)) | ||
3785 | found = 1; | 4096 | found = 1; |
3786 | } | 4097 | } |
3787 | 4098 | ||
@@ -3805,67 +4116,11 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, | |||
3805 | } | 4116 | } |
3806 | } | 4117 | } |
3807 | 4118 | ||
3808 | static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) | ||
3809 | { | ||
3810 | struct device_domain_info *info; | ||
3811 | struct intel_iommu *iommu; | ||
3812 | unsigned long flags1, flags2; | ||
3813 | |||
3814 | spin_lock_irqsave(&device_domain_lock, flags1); | ||
3815 | while (!list_empty(&domain->devices)) { | ||
3816 | info = list_entry(domain->devices.next, | ||
3817 | struct device_domain_info, link); | ||
3818 | unlink_domain_info(info); | ||
3819 | spin_unlock_irqrestore(&device_domain_lock, flags1); | ||
3820 | |||
3821 | iommu_disable_dev_iotlb(info); | ||
3822 | iommu = device_to_iommu(info->segment, info->bus, info->devfn); | ||
3823 | iommu_detach_dev(iommu, info->bus, info->devfn); | ||
3824 | iommu_detach_dependent_devices(iommu, info->dev); | ||
3825 | |||
3826 | /* clear this iommu in iommu_bmp, update iommu count | ||
3827 | * and capabilities | ||
3828 | */ | ||
3829 | spin_lock_irqsave(&domain->iommu_lock, flags2); | ||
3830 | if (test_and_clear_bit(iommu->seq_id, | ||
3831 | domain->iommu_bmp)) { | ||
3832 | domain->iommu_count--; | ||
3833 | domain_update_iommu_cap(domain); | ||
3834 | } | ||
3835 | spin_unlock_irqrestore(&domain->iommu_lock, flags2); | ||
3836 | |||
3837 | free_devinfo_mem(info); | ||
3838 | spin_lock_irqsave(&device_domain_lock, flags1); | ||
3839 | } | ||
3840 | spin_unlock_irqrestore(&device_domain_lock, flags1); | ||
3841 | } | ||
3842 | |||
3843 | /* domain id for virtual machine, it won't be set in context */ | ||
3844 | static atomic_t vm_domid = ATOMIC_INIT(0); | ||
3845 | |||
3846 | static struct dmar_domain *iommu_alloc_vm_domain(void) | ||
3847 | { | ||
3848 | struct dmar_domain *domain; | ||
3849 | |||
3850 | domain = alloc_domain_mem(); | ||
3851 | if (!domain) | ||
3852 | return NULL; | ||
3853 | |||
3854 | domain->id = atomic_inc_return(&vm_domid); | ||
3855 | domain->nid = -1; | ||
3856 | memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp)); | ||
3857 | domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; | ||
3858 | |||
3859 | return domain; | ||
3860 | } | ||
3861 | |||
3862 | static int md_domain_init(struct dmar_domain *domain, int guest_width) | 4119 | static int md_domain_init(struct dmar_domain *domain, int guest_width) |
3863 | { | 4120 | { |
3864 | int adjust_width; | 4121 | int adjust_width; |
3865 | 4122 | ||
3866 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); | 4123 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); |
3867 | spin_lock_init(&domain->iommu_lock); | ||
3868 | |||
3869 | domain_reserve_special_ranges(domain); | 4124 | domain_reserve_special_ranges(domain); |
3870 | 4125 | ||
3871 | /* calculate AGAW */ | 4126 | /* calculate AGAW */ |
@@ -3873,9 +4128,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) | |||
3873 | adjust_width = guestwidth_to_adjustwidth(guest_width); | 4128 | adjust_width = guestwidth_to_adjustwidth(guest_width); |
3874 | domain->agaw = width_to_agaw(adjust_width); | 4129 | domain->agaw = width_to_agaw(adjust_width); |
3875 | 4130 | ||
3876 | INIT_LIST_HEAD(&domain->devices); | ||
3877 | |||
3878 | domain->iommu_count = 0; | ||
3879 | domain->iommu_coherency = 0; | 4131 | domain->iommu_coherency = 0; |
3880 | domain->iommu_snooping = 0; | 4132 | domain->iommu_snooping = 0; |
3881 | domain->iommu_superpage = 0; | 4133 | domain->iommu_superpage = 0; |
@@ -3890,53 +4142,11 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) | |||
3890 | return 0; | 4142 | return 0; |
3891 | } | 4143 | } |
3892 | 4144 | ||
3893 | static void iommu_free_vm_domain(struct dmar_domain *domain) | ||
3894 | { | ||
3895 | unsigned long flags; | ||
3896 | struct dmar_drhd_unit *drhd; | ||
3897 | struct intel_iommu *iommu; | ||
3898 | unsigned long i; | ||
3899 | unsigned long ndomains; | ||
3900 | |||
3901 | for_each_active_iommu(iommu, drhd) { | ||
3902 | ndomains = cap_ndoms(iommu->cap); | ||
3903 | for_each_set_bit(i, iommu->domain_ids, ndomains) { | ||
3904 | if (iommu->domains[i] == domain) { | ||
3905 | spin_lock_irqsave(&iommu->lock, flags); | ||
3906 | clear_bit(i, iommu->domain_ids); | ||
3907 | iommu->domains[i] = NULL; | ||
3908 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
3909 | break; | ||
3910 | } | ||
3911 | } | ||
3912 | } | ||
3913 | } | ||
3914 | |||
3915 | static void vm_domain_exit(struct dmar_domain *domain) | ||
3916 | { | ||
3917 | /* Domain 0 is reserved, so dont process it */ | ||
3918 | if (!domain) | ||
3919 | return; | ||
3920 | |||
3921 | vm_domain_remove_all_dev_info(domain); | ||
3922 | /* destroy iovas */ | ||
3923 | put_iova_domain(&domain->iovad); | ||
3924 | |||
3925 | /* clear ptes */ | ||
3926 | dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); | ||
3927 | |||
3928 | /* free page tables */ | ||
3929 | dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); | ||
3930 | |||
3931 | iommu_free_vm_domain(domain); | ||
3932 | free_domain_mem(domain); | ||
3933 | } | ||
3934 | |||
3935 | static int intel_iommu_domain_init(struct iommu_domain *domain) | 4145 | static int intel_iommu_domain_init(struct iommu_domain *domain) |
3936 | { | 4146 | { |
3937 | struct dmar_domain *dmar_domain; | 4147 | struct dmar_domain *dmar_domain; |
3938 | 4148 | ||
3939 | dmar_domain = iommu_alloc_vm_domain(); | 4149 | dmar_domain = alloc_domain(true); |
3940 | if (!dmar_domain) { | 4150 | if (!dmar_domain) { |
3941 | printk(KERN_ERR | 4151 | printk(KERN_ERR |
3942 | "intel_iommu_domain_init: dmar_domain == NULL\n"); | 4152 | "intel_iommu_domain_init: dmar_domain == NULL\n"); |
@@ -3945,7 +4155,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain) | |||
3945 | if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | 4155 | if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { |
3946 | printk(KERN_ERR | 4156 | printk(KERN_ERR |
3947 | "intel_iommu_domain_init() failed\n"); | 4157 | "intel_iommu_domain_init() failed\n"); |
3948 | vm_domain_exit(dmar_domain); | 4158 | domain_exit(dmar_domain); |
3949 | return -ENOMEM; | 4159 | return -ENOMEM; |
3950 | } | 4160 | } |
3951 | domain_update_iommu_cap(dmar_domain); | 4161 | domain_update_iommu_cap(dmar_domain); |
@@ -3963,33 +4173,32 @@ static void intel_iommu_domain_destroy(struct iommu_domain *domain) | |||
3963 | struct dmar_domain *dmar_domain = domain->priv; | 4173 | struct dmar_domain *dmar_domain = domain->priv; |
3964 | 4174 | ||
3965 | domain->priv = NULL; | 4175 | domain->priv = NULL; |
3966 | vm_domain_exit(dmar_domain); | 4176 | domain_exit(dmar_domain); |
3967 | } | 4177 | } |
3968 | 4178 | ||
3969 | static int intel_iommu_attach_device(struct iommu_domain *domain, | 4179 | static int intel_iommu_attach_device(struct iommu_domain *domain, |
3970 | struct device *dev) | 4180 | struct device *dev) |
3971 | { | 4181 | { |
3972 | struct dmar_domain *dmar_domain = domain->priv; | 4182 | struct dmar_domain *dmar_domain = domain->priv; |
3973 | struct pci_dev *pdev = to_pci_dev(dev); | ||
3974 | struct intel_iommu *iommu; | 4183 | struct intel_iommu *iommu; |
3975 | int addr_width; | 4184 | int addr_width; |
4185 | u8 bus, devfn; | ||
3976 | 4186 | ||
3977 | /* normally pdev is not mapped */ | 4187 | /* normally dev is not mapped */ |
3978 | if (unlikely(domain_context_mapped(pdev))) { | 4188 | if (unlikely(domain_context_mapped(dev))) { |
3979 | struct dmar_domain *old_domain; | 4189 | struct dmar_domain *old_domain; |
3980 | 4190 | ||
3981 | old_domain = find_domain(pdev); | 4191 | old_domain = find_domain(dev); |
3982 | if (old_domain) { | 4192 | if (old_domain) { |
3983 | if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || | 4193 | if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || |
3984 | dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) | 4194 | dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) |
3985 | domain_remove_one_dev_info(old_domain, pdev); | 4195 | domain_remove_one_dev_info(old_domain, dev); |
3986 | else | 4196 | else |
3987 | domain_remove_dev_info(old_domain); | 4197 | domain_remove_dev_info(old_domain); |
3988 | } | 4198 | } |
3989 | } | 4199 | } |
3990 | 4200 | ||
3991 | iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, | 4201 | iommu = device_to_iommu(dev, &bus, &devfn); |
3992 | pdev->devfn); | ||
3993 | if (!iommu) | 4202 | if (!iommu) |
3994 | return -ENODEV; | 4203 | return -ENODEV; |
3995 | 4204 | ||
@@ -4021,16 +4230,15 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
4021 | dmar_domain->agaw--; | 4230 | dmar_domain->agaw--; |
4022 | } | 4231 | } |
4023 | 4232 | ||
4024 | return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL); | 4233 | return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL); |
4025 | } | 4234 | } |
4026 | 4235 | ||
4027 | static void intel_iommu_detach_device(struct iommu_domain *domain, | 4236 | static void intel_iommu_detach_device(struct iommu_domain *domain, |
4028 | struct device *dev) | 4237 | struct device *dev) |
4029 | { | 4238 | { |
4030 | struct dmar_domain *dmar_domain = domain->priv; | 4239 | struct dmar_domain *dmar_domain = domain->priv; |
4031 | struct pci_dev *pdev = to_pci_dev(dev); | ||
4032 | 4240 | ||
4033 | domain_remove_one_dev_info(dmar_domain, pdev); | 4241 | domain_remove_one_dev_info(dmar_domain, dev); |
4034 | } | 4242 | } |
4035 | 4243 | ||
4036 | static int intel_iommu_map(struct iommu_domain *domain, | 4244 | static int intel_iommu_map(struct iommu_domain *domain, |
@@ -4072,18 +4280,51 @@ static int intel_iommu_map(struct iommu_domain *domain, | |||
4072 | } | 4280 | } |
4073 | 4281 | ||
4074 | static size_t intel_iommu_unmap(struct iommu_domain *domain, | 4282 | static size_t intel_iommu_unmap(struct iommu_domain *domain, |
4075 | unsigned long iova, size_t size) | 4283 | unsigned long iova, size_t size) |
4076 | { | 4284 | { |
4077 | struct dmar_domain *dmar_domain = domain->priv; | 4285 | struct dmar_domain *dmar_domain = domain->priv; |
4078 | int order; | 4286 | struct page *freelist = NULL; |
4287 | struct intel_iommu *iommu; | ||
4288 | unsigned long start_pfn, last_pfn; | ||
4289 | unsigned int npages; | ||
4290 | int iommu_id, num, ndomains, level = 0; | ||
4291 | |||
4292 | /* Cope with horrid API which requires us to unmap more than the | ||
4293 | size argument if it happens to be a large-page mapping. */ | ||
4294 | if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level)) | ||
4295 | BUG(); | ||
4296 | |||
4297 | if (size < VTD_PAGE_SIZE << level_to_offset_bits(level)) | ||
4298 | size = VTD_PAGE_SIZE << level_to_offset_bits(level); | ||
4299 | |||
4300 | start_pfn = iova >> VTD_PAGE_SHIFT; | ||
4301 | last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT; | ||
4302 | |||
4303 | freelist = domain_unmap(dmar_domain, start_pfn, last_pfn); | ||
4304 | |||
4305 | npages = last_pfn - start_pfn + 1; | ||
4306 | |||
4307 | for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) { | ||
4308 | iommu = g_iommus[iommu_id]; | ||
4309 | |||
4310 | /* | ||
4311 | * find bit position of dmar_domain | ||
4312 | */ | ||
4313 | ndomains = cap_ndoms(iommu->cap); | ||
4314 | for_each_set_bit(num, iommu->domain_ids, ndomains) { | ||
4315 | if (iommu->domains[num] == dmar_domain) | ||
4316 | iommu_flush_iotlb_psi(iommu, num, start_pfn, | ||
4317 | npages, !freelist, 0); | ||
4318 | } | ||
4319 | |||
4320 | } | ||
4079 | 4321 | ||
4080 | order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, | 4322 | dma_free_pagelist(freelist); |
4081 | (iova + size - 1) >> VTD_PAGE_SHIFT); | ||
4082 | 4323 | ||
4083 | if (dmar_domain->max_addr == iova + size) | 4324 | if (dmar_domain->max_addr == iova + size) |
4084 | dmar_domain->max_addr = iova; | 4325 | dmar_domain->max_addr = iova; |
4085 | 4326 | ||
4086 | return PAGE_SIZE << order; | 4327 | return size; |
4087 | } | 4328 | } |
4088 | 4329 | ||
4089 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | 4330 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, |
@@ -4091,9 +4332,10 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | |||
4091 | { | 4332 | { |
4092 | struct dmar_domain *dmar_domain = domain->priv; | 4333 | struct dmar_domain *dmar_domain = domain->priv; |
4093 | struct dma_pte *pte; | 4334 | struct dma_pte *pte; |
4335 | int level = 0; | ||
4094 | u64 phys = 0; | 4336 | u64 phys = 0; |
4095 | 4337 | ||
4096 | pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0); | 4338 | pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level); |
4097 | if (pte) | 4339 | if (pte) |
4098 | phys = dma_pte_addr(pte); | 4340 | phys = dma_pte_addr(pte); |
4099 | 4341 | ||
@@ -4121,9 +4363,9 @@ static int intel_iommu_add_device(struct device *dev) | |||
4121 | struct pci_dev *bridge, *dma_pdev = NULL; | 4363 | struct pci_dev *bridge, *dma_pdev = NULL; |
4122 | struct iommu_group *group; | 4364 | struct iommu_group *group; |
4123 | int ret; | 4365 | int ret; |
4366 | u8 bus, devfn; | ||
4124 | 4367 | ||
4125 | if (!device_to_iommu(pci_domain_nr(pdev->bus), | 4368 | if (!device_to_iommu(dev, &bus, &devfn)) |
4126 | pdev->bus->number, pdev->devfn)) | ||
4127 | return -ENODEV; | 4369 | return -ENODEV; |
4128 | 4370 | ||
4129 | bridge = pci_find_upstream_pcie_bridge(pdev); | 4371 | bridge = pci_find_upstream_pcie_bridge(pdev); |