diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-23 22:49:24 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-23 22:49:24 -0400 |
| commit | be98eb2c2fc88d9d61cfeab5c11ab1118ca0bba9 (patch) | |
| tree | 2fda177b596534b9ec69d98f6843d24d7e234538 | |
| parent | cf5434e894a17bb8385997adc6d56642055a85d6 (diff) | |
| parent | f007e99c8e2e322b8331aba72414715119a2920d (diff) | |
Merge git://git.infradead.org/iommu-2.6
* git://git.infradead.org/iommu-2.6:
Intel-IOMMU, intr-remap: source-id checking
Intel-IOMMU, intr-remap: set the whole 128bits of irte when modify/free it
IOMMU Identity Mapping Support (drivers/pci/intel_iommu.c)
| -rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 6 | ||||
| -rw-r--r-- | drivers/pci/intel-iommu.c | 314 | ||||
| -rw-r--r-- | drivers/pci/intr_remapping.c | 160 | ||||
| -rw-r--r-- | drivers/pci/intr_remapping.h | 2 | ||||
| -rw-r--r-- | include/linux/dmar.h | 11 |
5 files changed, 416 insertions, 77 deletions
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index b7a79207295e..4d0216fcb36c 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
| @@ -1414,6 +1414,9 @@ int setup_ioapic_entry(int apic_id, int irq, | |||
| 1414 | irte.vector = vector; | 1414 | irte.vector = vector; |
| 1415 | irte.dest_id = IRTE_DEST(destination); | 1415 | irte.dest_id = IRTE_DEST(destination); |
| 1416 | 1416 | ||
| 1417 | /* Set source-id of interrupt request */ | ||
| 1418 | set_ioapic_sid(&irte, apic_id); | ||
| 1419 | |||
| 1417 | modify_irte(irq, &irte); | 1420 | modify_irte(irq, &irte); |
| 1418 | 1421 | ||
| 1419 | ir_entry->index2 = (index >> 15) & 0x1; | 1422 | ir_entry->index2 = (index >> 15) & 0x1; |
| @@ -3290,6 +3293,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
| 3290 | irte.vector = cfg->vector; | 3293 | irte.vector = cfg->vector; |
| 3291 | irte.dest_id = IRTE_DEST(dest); | 3294 | irte.dest_id = IRTE_DEST(dest); |
| 3292 | 3295 | ||
| 3296 | /* Set source-id of interrupt request */ | ||
| 3297 | set_msi_sid(&irte, pdev); | ||
| 3298 | |||
| 3293 | modify_irte(irq, &irte); | 3299 | modify_irte(irq, &irte); |
| 3294 | 3300 | ||
| 3295 | msg->address_hi = MSI_ADDR_BASE_HI; | 3301 | msg->address_hi = MSI_ADDR_BASE_HI; |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 178853a07440..e53eacd75c8d 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include <linux/sysdev.h> | 39 | #include <linux/sysdev.h> |
| 40 | #include <asm/cacheflush.h> | 40 | #include <asm/cacheflush.h> |
| 41 | #include <asm/iommu.h> | 41 | #include <asm/iommu.h> |
| 42 | #include <asm/e820.h> | ||
| 42 | #include "pci.h" | 43 | #include "pci.h" |
| 43 | 44 | ||
| 44 | #define ROOT_SIZE VTD_PAGE_SIZE | 45 | #define ROOT_SIZE VTD_PAGE_SIZE |
| @@ -217,6 +218,14 @@ static inline bool dma_pte_present(struct dma_pte *pte) | |||
| 217 | return (pte->val & 3) != 0; | 218 | return (pte->val & 3) != 0; |
| 218 | } | 219 | } |
| 219 | 220 | ||
| 221 | /* | ||
| 222 | * This domain is a statically identity mapping domain. | ||
| 223 | * 1. This domain creats a static 1:1 mapping to all usable memory. | ||
| 224 | * 2. It maps to each iommu if successful. | ||
| 225 | * 3. Each iommu mapps to this domain if successful. | ||
| 226 | */ | ||
| 227 | struct dmar_domain *si_domain; | ||
| 228 | |||
| 220 | /* devices under the same p2p bridge are owned in one domain */ | 229 | /* devices under the same p2p bridge are owned in one domain */ |
| 221 | #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) | 230 | #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) |
| 222 | 231 | ||
| @@ -225,6 +234,9 @@ static inline bool dma_pte_present(struct dma_pte *pte) | |||
| 225 | */ | 234 | */ |
| 226 | #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) | 235 | #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) |
| 227 | 236 | ||
| 237 | /* si_domain contains mulitple devices */ | ||
| 238 | #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2) | ||
| 239 | |||
| 228 | struct dmar_domain { | 240 | struct dmar_domain { |
| 229 | int id; /* domain id */ | 241 | int id; /* domain id */ |
| 230 | unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ | 242 | unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ |
| @@ -435,12 +447,14 @@ int iommu_calculate_agaw(struct intel_iommu *iommu) | |||
| 435 | return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); | 447 | return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); |
| 436 | } | 448 | } |
| 437 | 449 | ||
| 438 | /* in native case, each domain is related to only one iommu */ | 450 | /* This functionin only returns single iommu in a domain */ |
| 439 | static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) | 451 | static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) |
| 440 | { | 452 | { |
| 441 | int iommu_id; | 453 | int iommu_id; |
| 442 | 454 | ||
| 455 | /* si_domain and vm domain should not get here. */ | ||
| 443 | BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); | 456 | BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); |
| 457 | BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY); | ||
| 444 | 458 | ||
| 445 | iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); | 459 | iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); |
| 446 | if (iommu_id < 0 || iommu_id >= g_num_of_iommus) | 460 | if (iommu_id < 0 || iommu_id >= g_num_of_iommus) |
| @@ -1189,48 +1203,71 @@ void free_dmar_iommu(struct intel_iommu *iommu) | |||
| 1189 | free_context_table(iommu); | 1203 | free_context_table(iommu); |
| 1190 | } | 1204 | } |
| 1191 | 1205 | ||
| 1192 | static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) | 1206 | static struct dmar_domain *alloc_domain(void) |
| 1193 | { | 1207 | { |
| 1194 | unsigned long num; | ||
| 1195 | unsigned long ndomains; | ||
| 1196 | struct dmar_domain *domain; | 1208 | struct dmar_domain *domain; |
| 1197 | unsigned long flags; | ||
| 1198 | 1209 | ||
| 1199 | domain = alloc_domain_mem(); | 1210 | domain = alloc_domain_mem(); |
| 1200 | if (!domain) | 1211 | if (!domain) |
| 1201 | return NULL; | 1212 | return NULL; |
| 1202 | 1213 | ||
| 1214 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); | ||
| 1215 | domain->flags = 0; | ||
| 1216 | |||
| 1217 | return domain; | ||
| 1218 | } | ||
| 1219 | |||
| 1220 | static int iommu_attach_domain(struct dmar_domain *domain, | ||
| 1221 | struct intel_iommu *iommu) | ||
| 1222 | { | ||
| 1223 | int num; | ||
| 1224 | unsigned long ndomains; | ||
| 1225 | unsigned long flags; | ||
| 1226 | |||
| 1203 | ndomains = cap_ndoms(iommu->cap); | 1227 | ndomains = cap_ndoms(iommu->cap); |
| 1204 | 1228 | ||
| 1205 | spin_lock_irqsave(&iommu->lock, flags); | 1229 | spin_lock_irqsave(&iommu->lock, flags); |
| 1230 | |||
| 1206 | num = find_first_zero_bit(iommu->domain_ids, ndomains); | 1231 | num = find_first_zero_bit(iommu->domain_ids, ndomains); |
| 1207 | if (num >= ndomains) { | 1232 | if (num >= ndomains) { |
| 1208 | spin_unlock_irqrestore(&iommu->lock, flags); | 1233 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 1209 | free_domain_mem(domain); | ||
| 1210 | printk(KERN_ERR "IOMMU: no free domain ids\n"); | 1234 | printk(KERN_ERR "IOMMU: no free domain ids\n"); |
| 1211 | return NULL; | 1235 | return -ENOMEM; |
| 1212 | } | 1236 | } |
| 1213 | 1237 | ||
| 1214 | set_bit(num, iommu->domain_ids); | ||
| 1215 | domain->id = num; | 1238 | domain->id = num; |
| 1216 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); | 1239 | set_bit(num, iommu->domain_ids); |
| 1217 | set_bit(iommu->seq_id, &domain->iommu_bmp); | 1240 | set_bit(iommu->seq_id, &domain->iommu_bmp); |
| 1218 | domain->flags = 0; | ||
| 1219 | iommu->domains[num] = domain; | 1241 | iommu->domains[num] = domain; |
| 1220 | spin_unlock_irqrestore(&iommu->lock, flags); | 1242 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 1221 | 1243 | ||
| 1222 | return domain; | 1244 | return 0; |
| 1223 | } | 1245 | } |
| 1224 | 1246 | ||
| 1225 | static void iommu_free_domain(struct dmar_domain *domain) | 1247 | static void iommu_detach_domain(struct dmar_domain *domain, |
| 1248 | struct intel_iommu *iommu) | ||
| 1226 | { | 1249 | { |
| 1227 | unsigned long flags; | 1250 | unsigned long flags; |
| 1228 | struct intel_iommu *iommu; | 1251 | int num, ndomains; |
| 1229 | 1252 | int found = 0; | |
| 1230 | iommu = domain_get_iommu(domain); | ||
| 1231 | 1253 | ||
| 1232 | spin_lock_irqsave(&iommu->lock, flags); | 1254 | spin_lock_irqsave(&iommu->lock, flags); |
| 1233 | clear_bit(domain->id, iommu->domain_ids); | 1255 | ndomains = cap_ndoms(iommu->cap); |
| 1256 | num = find_first_bit(iommu->domain_ids, ndomains); | ||
| 1257 | for (; num < ndomains; ) { | ||
| 1258 | if (iommu->domains[num] == domain) { | ||
| 1259 | found = 1; | ||
| 1260 | break; | ||
| 1261 | } | ||
| 1262 | num = find_next_bit(iommu->domain_ids, | ||
| 1263 | cap_ndoms(iommu->cap), num+1); | ||
| 1264 | } | ||
| 1265 | |||
| 1266 | if (found) { | ||
| 1267 | clear_bit(num, iommu->domain_ids); | ||
| 1268 | clear_bit(iommu->seq_id, &domain->iommu_bmp); | ||
| 1269 | iommu->domains[num] = NULL; | ||
| 1270 | } | ||
| 1234 | spin_unlock_irqrestore(&iommu->lock, flags); | 1271 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 1235 | } | 1272 | } |
| 1236 | 1273 | ||
| @@ -1350,6 +1387,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
| 1350 | 1387 | ||
| 1351 | static void domain_exit(struct dmar_domain *domain) | 1388 | static void domain_exit(struct dmar_domain *domain) |
| 1352 | { | 1389 | { |
| 1390 | struct dmar_drhd_unit *drhd; | ||
| 1391 | struct intel_iommu *iommu; | ||
| 1353 | u64 end; | 1392 | u64 end; |
| 1354 | 1393 | ||
| 1355 | /* Domain 0 is reserved, so dont process it */ | 1394 | /* Domain 0 is reserved, so dont process it */ |
| @@ -1368,7 +1407,10 @@ static void domain_exit(struct dmar_domain *domain) | |||
| 1368 | /* free page tables */ | 1407 | /* free page tables */ |
| 1369 | dma_pte_free_pagetable(domain, 0, end); | 1408 | dma_pte_free_pagetable(domain, 0, end); |
| 1370 | 1409 | ||
| 1371 | iommu_free_domain(domain); | 1410 | for_each_active_iommu(iommu, drhd) |
| 1411 | if (test_bit(iommu->seq_id, &domain->iommu_bmp)) | ||
| 1412 | iommu_detach_domain(domain, iommu); | ||
| 1413 | |||
| 1372 | free_domain_mem(domain); | 1414 | free_domain_mem(domain); |
| 1373 | } | 1415 | } |
| 1374 | 1416 | ||
| @@ -1408,7 +1450,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
| 1408 | id = domain->id; | 1450 | id = domain->id; |
| 1409 | pgd = domain->pgd; | 1451 | pgd = domain->pgd; |
| 1410 | 1452 | ||
| 1411 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) { | 1453 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || |
| 1454 | domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) { | ||
| 1412 | int found = 0; | 1455 | int found = 0; |
| 1413 | 1456 | ||
| 1414 | /* find an available domain id for this device in iommu */ | 1457 | /* find an available domain id for this device in iommu */ |
| @@ -1433,6 +1476,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
| 1433 | } | 1476 | } |
| 1434 | 1477 | ||
| 1435 | set_bit(num, iommu->domain_ids); | 1478 | set_bit(num, iommu->domain_ids); |
| 1479 | set_bit(iommu->seq_id, &domain->iommu_bmp); | ||
| 1436 | iommu->domains[num] = domain; | 1480 | iommu->domains[num] = domain; |
| 1437 | id = num; | 1481 | id = num; |
| 1438 | } | 1482 | } |
| @@ -1675,6 +1719,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
| 1675 | unsigned long flags; | 1719 | unsigned long flags; |
| 1676 | int bus = 0, devfn = 0; | 1720 | int bus = 0, devfn = 0; |
| 1677 | int segment; | 1721 | int segment; |
| 1722 | int ret; | ||
| 1678 | 1723 | ||
| 1679 | domain = find_domain(pdev); | 1724 | domain = find_domain(pdev); |
| 1680 | if (domain) | 1725 | if (domain) |
| @@ -1707,6 +1752,10 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
| 1707 | } | 1752 | } |
| 1708 | } | 1753 | } |
| 1709 | 1754 | ||
| 1755 | domain = alloc_domain(); | ||
| 1756 | if (!domain) | ||
| 1757 | goto error; | ||
| 1758 | |||
| 1710 | /* Allocate new domain for the device */ | 1759 | /* Allocate new domain for the device */ |
| 1711 | drhd = dmar_find_matched_drhd_unit(pdev); | 1760 | drhd = dmar_find_matched_drhd_unit(pdev); |
| 1712 | if (!drhd) { | 1761 | if (!drhd) { |
| @@ -1716,9 +1765,11 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
| 1716 | } | 1765 | } |
| 1717 | iommu = drhd->iommu; | 1766 | iommu = drhd->iommu; |
| 1718 | 1767 | ||
| 1719 | domain = iommu_alloc_domain(iommu); | 1768 | ret = iommu_attach_domain(domain, iommu); |
| 1720 | if (!domain) | 1769 | if (ret) { |
| 1770 | domain_exit(domain); | ||
| 1721 | goto error; | 1771 | goto error; |
| 1772 | } | ||
| 1722 | 1773 | ||
| 1723 | if (domain_init(domain, gaw)) { | 1774 | if (domain_init(domain, gaw)) { |
| 1724 | domain_exit(domain); | 1775 | domain_exit(domain); |
| @@ -1792,6 +1843,8 @@ error: | |||
| 1792 | return find_domain(pdev); | 1843 | return find_domain(pdev); |
| 1793 | } | 1844 | } |
| 1794 | 1845 | ||
| 1846 | static int iommu_identity_mapping; | ||
| 1847 | |||
| 1795 | static int iommu_prepare_identity_map(struct pci_dev *pdev, | 1848 | static int iommu_prepare_identity_map(struct pci_dev *pdev, |
| 1796 | unsigned long long start, | 1849 | unsigned long long start, |
| 1797 | unsigned long long end) | 1850 | unsigned long long end) |
| @@ -1804,8 +1857,11 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, | |||
| 1804 | printk(KERN_INFO | 1857 | printk(KERN_INFO |
| 1805 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", | 1858 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", |
| 1806 | pci_name(pdev), start, end); | 1859 | pci_name(pdev), start, end); |
| 1807 | /* page table init */ | 1860 | if (iommu_identity_mapping) |
| 1808 | domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); | 1861 | domain = si_domain; |
| 1862 | else | ||
| 1863 | /* page table init */ | ||
| 1864 | domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); | ||
| 1809 | if (!domain) | 1865 | if (!domain) |
| 1810 | return -ENOMEM; | 1866 | return -ENOMEM; |
| 1811 | 1867 | ||
| @@ -1952,7 +2008,110 @@ static int __init init_context_pass_through(void) | |||
| 1952 | return 0; | 2008 | return 0; |
| 1953 | } | 2009 | } |
| 1954 | 2010 | ||
| 1955 | static int __init init_dmars(void) | 2011 | static int md_domain_init(struct dmar_domain *domain, int guest_width); |
| 2012 | static int si_domain_init(void) | ||
| 2013 | { | ||
| 2014 | struct dmar_drhd_unit *drhd; | ||
| 2015 | struct intel_iommu *iommu; | ||
| 2016 | int ret = 0; | ||
| 2017 | |||
| 2018 | si_domain = alloc_domain(); | ||
| 2019 | if (!si_domain) | ||
| 2020 | return -EFAULT; | ||
| 2021 | |||
| 2022 | |||
| 2023 | for_each_active_iommu(iommu, drhd) { | ||
| 2024 | ret = iommu_attach_domain(si_domain, iommu); | ||
| 2025 | if (ret) { | ||
| 2026 | domain_exit(si_domain); | ||
| 2027 | return -EFAULT; | ||
| 2028 | } | ||
| 2029 | } | ||
| 2030 | |||
| 2031 | if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | ||
| 2032 | domain_exit(si_domain); | ||
| 2033 | return -EFAULT; | ||
| 2034 | } | ||
| 2035 | |||
| 2036 | si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; | ||
| 2037 | |||
| 2038 | return 0; | ||
| 2039 | } | ||
| 2040 | |||
| 2041 | static void domain_remove_one_dev_info(struct dmar_domain *domain, | ||
| 2042 | struct pci_dev *pdev); | ||
| 2043 | static int identity_mapping(struct pci_dev *pdev) | ||
| 2044 | { | ||
| 2045 | struct device_domain_info *info; | ||
| 2046 | |||
| 2047 | if (likely(!iommu_identity_mapping)) | ||
| 2048 | return 0; | ||
| 2049 | |||
| 2050 | |||
| 2051 | list_for_each_entry(info, &si_domain->devices, link) | ||
| 2052 | if (info->dev == pdev) | ||
| 2053 | return 1; | ||
| 2054 | return 0; | ||
| 2055 | } | ||
| 2056 | |||
| 2057 | static int domain_add_dev_info(struct dmar_domain *domain, | ||
| 2058 | struct pci_dev *pdev) | ||
| 2059 | { | ||
| 2060 | struct device_domain_info *info; | ||
| 2061 | unsigned long flags; | ||
| 2062 | |||
| 2063 | info = alloc_devinfo_mem(); | ||
| 2064 | if (!info) | ||
| 2065 | return -ENOMEM; | ||
| 2066 | |||
| 2067 | info->segment = pci_domain_nr(pdev->bus); | ||
| 2068 | info->bus = pdev->bus->number; | ||
| 2069 | info->devfn = pdev->devfn; | ||
| 2070 | info->dev = pdev; | ||
| 2071 | info->domain = domain; | ||
| 2072 | |||
| 2073 | spin_lock_irqsave(&device_domain_lock, flags); | ||
| 2074 | list_add(&info->link, &domain->devices); | ||
| 2075 | list_add(&info->global, &device_domain_list); | ||
| 2076 | pdev->dev.archdata.iommu = info; | ||
| 2077 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
| 2078 | |||
| 2079 | return 0; | ||
| 2080 | } | ||
| 2081 | |||
| 2082 | static int iommu_prepare_static_identity_mapping(void) | ||
| 2083 | { | ||
| 2084 | int i; | ||
| 2085 | struct pci_dev *pdev = NULL; | ||
| 2086 | int ret; | ||
| 2087 | |||
| 2088 | ret = si_domain_init(); | ||
| 2089 | if (ret) | ||
| 2090 | return -EFAULT; | ||
| 2091 | |||
| 2092 | printk(KERN_INFO "IOMMU: Setting identity map:\n"); | ||
| 2093 | for_each_pci_dev(pdev) { | ||
| 2094 | for (i = 0; i < e820.nr_map; i++) { | ||
| 2095 | struct e820entry *ei = &e820.map[i]; | ||
| 2096 | |||
| 2097 | if (ei->type == E820_RAM) { | ||
| 2098 | ret = iommu_prepare_identity_map(pdev, | ||
| 2099 | ei->addr, ei->addr + ei->size); | ||
| 2100 | if (ret) { | ||
| 2101 | printk(KERN_INFO "1:1 mapping to one domain failed.\n"); | ||
| 2102 | return -EFAULT; | ||
| 2103 | } | ||
| 2104 | } | ||
| 2105 | } | ||
| 2106 | ret = domain_add_dev_info(si_domain, pdev); | ||
| 2107 | if (ret) | ||
| 2108 | return ret; | ||
| 2109 | } | ||
| 2110 | |||
| 2111 | return 0; | ||
| 2112 | } | ||
| 2113 | |||
| 2114 | int __init init_dmars(void) | ||
| 1956 | { | 2115 | { |
| 1957 | struct dmar_drhd_unit *drhd; | 2116 | struct dmar_drhd_unit *drhd; |
| 1958 | struct dmar_rmrr_unit *rmrr; | 2117 | struct dmar_rmrr_unit *rmrr; |
| @@ -1962,6 +2121,13 @@ static int __init init_dmars(void) | |||
| 1962 | int pass_through = 1; | 2121 | int pass_through = 1; |
| 1963 | 2122 | ||
| 1964 | /* | 2123 | /* |
| 2124 | * In case pass through can not be enabled, iommu tries to use identity | ||
| 2125 | * mapping. | ||
| 2126 | */ | ||
| 2127 | if (iommu_pass_through) | ||
| 2128 | iommu_identity_mapping = 1; | ||
| 2129 | |||
| 2130 | /* | ||
| 1965 | * for each drhd | 2131 | * for each drhd |
| 1966 | * allocate root | 2132 | * allocate root |
| 1967 | * initialize and program root entry to not present | 2133 | * initialize and program root entry to not present |
| @@ -2090,9 +2256,12 @@ static int __init init_dmars(void) | |||
| 2090 | 2256 | ||
| 2091 | /* | 2257 | /* |
| 2092 | * If pass through is not set or not enabled, setup context entries for | 2258 | * If pass through is not set or not enabled, setup context entries for |
| 2093 | * identity mappings for rmrr, gfx, and isa. | 2259 | * identity mappings for rmrr, gfx, and isa and may fall back to static |
| 2260 | * identity mapping if iommu_identity_mapping is set. | ||
| 2094 | */ | 2261 | */ |
| 2095 | if (!iommu_pass_through) { | 2262 | if (!iommu_pass_through) { |
| 2263 | if (iommu_identity_mapping) | ||
| 2264 | iommu_prepare_static_identity_mapping(); | ||
| 2096 | /* | 2265 | /* |
| 2097 | * For each rmrr | 2266 | * For each rmrr |
| 2098 | * for each dev attached to rmrr | 2267 | * for each dev attached to rmrr |
| @@ -2107,6 +2276,7 @@ static int __init init_dmars(void) | |||
| 2107 | * endfor | 2276 | * endfor |
| 2108 | * endfor | 2277 | * endfor |
| 2109 | */ | 2278 | */ |
| 2279 | printk(KERN_INFO "IOMMU: Setting RMRR:\n"); | ||
| 2110 | for_each_rmrr_units(rmrr) { | 2280 | for_each_rmrr_units(rmrr) { |
| 2111 | for (i = 0; i < rmrr->devices_cnt; i++) { | 2281 | for (i = 0; i < rmrr->devices_cnt; i++) { |
| 2112 | pdev = rmrr->devices[i]; | 2282 | pdev = rmrr->devices[i]; |
| @@ -2248,6 +2418,52 @@ get_valid_domain_for_dev(struct pci_dev *pdev) | |||
| 2248 | return domain; | 2418 | return domain; |
| 2249 | } | 2419 | } |
| 2250 | 2420 | ||
| 2421 | static int iommu_dummy(struct pci_dev *pdev) | ||
| 2422 | { | ||
| 2423 | return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; | ||
| 2424 | } | ||
| 2425 | |||
| 2426 | /* Check if the pdev needs to go through non-identity map and unmap process.*/ | ||
| 2427 | static int iommu_no_mapping(struct pci_dev *pdev) | ||
| 2428 | { | ||
| 2429 | int found; | ||
| 2430 | |||
| 2431 | if (!iommu_identity_mapping) | ||
| 2432 | return iommu_dummy(pdev); | ||
| 2433 | |||
| 2434 | found = identity_mapping(pdev); | ||
| 2435 | if (found) { | ||
| 2436 | if (pdev->dma_mask > DMA_BIT_MASK(32)) | ||
| 2437 | return 1; | ||
| 2438 | else { | ||
| 2439 | /* | ||
| 2440 | * 32 bit DMA is removed from si_domain and fall back | ||
| 2441 | * to non-identity mapping. | ||
| 2442 | */ | ||
| 2443 | domain_remove_one_dev_info(si_domain, pdev); | ||
| 2444 | printk(KERN_INFO "32bit %s uses non-identity mapping\n", | ||
| 2445 | pci_name(pdev)); | ||
| 2446 | return 0; | ||
| 2447 | } | ||
| 2448 | } else { | ||
| 2449 | /* | ||
| 2450 | * In case of a detached 64 bit DMA device from vm, the device | ||
| 2451 | * is put into si_domain for identity mapping. | ||
| 2452 | */ | ||
| 2453 | if (pdev->dma_mask > DMA_BIT_MASK(32)) { | ||
| 2454 | int ret; | ||
| 2455 | ret = domain_add_dev_info(si_domain, pdev); | ||
| 2456 | if (!ret) { | ||
| 2457 | printk(KERN_INFO "64bit %s uses identity mapping\n", | ||
| 2458 | pci_name(pdev)); | ||
| 2459 | return 1; | ||
| 2460 | } | ||
| 2461 | } | ||
| 2462 | } | ||
| 2463 | |||
| 2464 | return iommu_dummy(pdev); | ||
| 2465 | } | ||
| 2466 | |||
| 2251 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | 2467 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, |
| 2252 | size_t size, int dir, u64 dma_mask) | 2468 | size_t size, int dir, u64 dma_mask) |
| 2253 | { | 2469 | { |
| @@ -2260,7 +2476,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
| 2260 | struct intel_iommu *iommu; | 2476 | struct intel_iommu *iommu; |
| 2261 | 2477 | ||
| 2262 | BUG_ON(dir == DMA_NONE); | 2478 | BUG_ON(dir == DMA_NONE); |
| 2263 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2479 | |
| 2480 | if (iommu_no_mapping(pdev)) | ||
| 2264 | return paddr; | 2481 | return paddr; |
| 2265 | 2482 | ||
| 2266 | domain = get_valid_domain_for_dev(pdev); | 2483 | domain = get_valid_domain_for_dev(pdev); |
| @@ -2401,8 +2618,9 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
| 2401 | struct iova *iova; | 2618 | struct iova *iova; |
| 2402 | struct intel_iommu *iommu; | 2619 | struct intel_iommu *iommu; |
| 2403 | 2620 | ||
| 2404 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2621 | if (iommu_no_mapping(pdev)) |
| 2405 | return; | 2622 | return; |
| 2623 | |||
| 2406 | domain = find_domain(pdev); | 2624 | domain = find_domain(pdev); |
| 2407 | BUG_ON(!domain); | 2625 | BUG_ON(!domain); |
| 2408 | 2626 | ||
| @@ -2492,7 +2710,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
| 2492 | struct scatterlist *sg; | 2710 | struct scatterlist *sg; |
| 2493 | struct intel_iommu *iommu; | 2711 | struct intel_iommu *iommu; |
| 2494 | 2712 | ||
| 2495 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2713 | if (iommu_no_mapping(pdev)) |
| 2496 | return; | 2714 | return; |
| 2497 | 2715 | ||
| 2498 | domain = find_domain(pdev); | 2716 | domain = find_domain(pdev); |
| @@ -2553,7 +2771,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
| 2553 | struct intel_iommu *iommu; | 2771 | struct intel_iommu *iommu; |
| 2554 | 2772 | ||
| 2555 | BUG_ON(dir == DMA_NONE); | 2773 | BUG_ON(dir == DMA_NONE); |
| 2556 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2774 | if (iommu_no_mapping(pdev)) |
| 2557 | return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); | 2775 | return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); |
| 2558 | 2776 | ||
| 2559 | domain = get_valid_domain_for_dev(pdev); | 2777 | domain = get_valid_domain_for_dev(pdev); |
| @@ -2951,31 +3169,6 @@ int __init intel_iommu_init(void) | |||
| 2951 | return 0; | 3169 | return 0; |
| 2952 | } | 3170 | } |
| 2953 | 3171 | ||
| 2954 | static int vm_domain_add_dev_info(struct dmar_domain *domain, | ||
| 2955 | struct pci_dev *pdev) | ||
| 2956 | { | ||
| 2957 | struct device_domain_info *info; | ||
| 2958 | unsigned long flags; | ||
| 2959 | |||
| 2960 | info = alloc_devinfo_mem(); | ||
| 2961 | if (!info) | ||
| 2962 | return -ENOMEM; | ||
| 2963 | |||
| 2964 | info->segment = pci_domain_nr(pdev->bus); | ||
| 2965 | info->bus = pdev->bus->number; | ||
| 2966 | info->devfn = pdev->devfn; | ||
| 2967 | info->dev = pdev; | ||
| 2968 | info->domain = domain; | ||
| 2969 | |||
| 2970 | spin_lock_irqsave(&device_domain_lock, flags); | ||
| 2971 | list_add(&info->link, &domain->devices); | ||
| 2972 | list_add(&info->global, &device_domain_list); | ||
| 2973 | pdev->dev.archdata.iommu = info; | ||
| 2974 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
| 2975 | |||
| 2976 | return 0; | ||
| 2977 | } | ||
| 2978 | |||
| 2979 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, | 3172 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, |
| 2980 | struct pci_dev *pdev) | 3173 | struct pci_dev *pdev) |
| 2981 | { | 3174 | { |
| @@ -3003,7 +3196,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu, | |||
| 3003 | } | 3196 | } |
| 3004 | } | 3197 | } |
| 3005 | 3198 | ||
| 3006 | static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, | 3199 | static void domain_remove_one_dev_info(struct dmar_domain *domain, |
| 3007 | struct pci_dev *pdev) | 3200 | struct pci_dev *pdev) |
| 3008 | { | 3201 | { |
| 3009 | struct device_domain_info *info; | 3202 | struct device_domain_info *info; |
| @@ -3136,7 +3329,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void) | |||
| 3136 | return domain; | 3329 | return domain; |
| 3137 | } | 3330 | } |
| 3138 | 3331 | ||
| 3139 | static int vm_domain_init(struct dmar_domain *domain, int guest_width) | 3332 | static int md_domain_init(struct dmar_domain *domain, int guest_width) |
| 3140 | { | 3333 | { |
| 3141 | int adjust_width; | 3334 | int adjust_width; |
| 3142 | 3335 | ||
| @@ -3227,7 +3420,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain) | |||
| 3227 | "intel_iommu_domain_init: dmar_domain == NULL\n"); | 3420 | "intel_iommu_domain_init: dmar_domain == NULL\n"); |
| 3228 | return -ENOMEM; | 3421 | return -ENOMEM; |
| 3229 | } | 3422 | } |
| 3230 | if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | 3423 | if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { |
| 3231 | printk(KERN_ERR | 3424 | printk(KERN_ERR |
| 3232 | "intel_iommu_domain_init() failed\n"); | 3425 | "intel_iommu_domain_init() failed\n"); |
| 3233 | vm_domain_exit(dmar_domain); | 3426 | vm_domain_exit(dmar_domain); |
| @@ -3262,8 +3455,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
| 3262 | 3455 | ||
| 3263 | old_domain = find_domain(pdev); | 3456 | old_domain = find_domain(pdev); |
| 3264 | if (old_domain) { | 3457 | if (old_domain) { |
| 3265 | if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) | 3458 | if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || |
| 3266 | vm_domain_remove_one_dev_info(old_domain, pdev); | 3459 | dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) |
| 3460 | domain_remove_one_dev_info(old_domain, pdev); | ||
| 3267 | else | 3461 | else |
| 3268 | domain_remove_dev_info(old_domain); | 3462 | domain_remove_dev_info(old_domain); |
| 3269 | } | 3463 | } |
| @@ -3285,7 +3479,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
| 3285 | return -EFAULT; | 3479 | return -EFAULT; |
| 3286 | } | 3480 | } |
| 3287 | 3481 | ||
| 3288 | ret = vm_domain_add_dev_info(dmar_domain, pdev); | 3482 | ret = domain_add_dev_info(dmar_domain, pdev); |
| 3289 | if (ret) | 3483 | if (ret) |
| 3290 | return ret; | 3484 | return ret; |
| 3291 | 3485 | ||
| @@ -3299,7 +3493,7 @@ static void intel_iommu_detach_device(struct iommu_domain *domain, | |||
| 3299 | struct dmar_domain *dmar_domain = domain->priv; | 3493 | struct dmar_domain *dmar_domain = domain->priv; |
| 3300 | struct pci_dev *pdev = to_pci_dev(dev); | 3494 | struct pci_dev *pdev = to_pci_dev(dev); |
| 3301 | 3495 | ||
| 3302 | vm_domain_remove_one_dev_info(dmar_domain, pdev); | 3496 | domain_remove_one_dev_info(dmar_domain, pdev); |
| 3303 | } | 3497 | } |
| 3304 | 3498 | ||
| 3305 | static int intel_iommu_map_range(struct iommu_domain *domain, | 3499 | static int intel_iommu_map_range(struct iommu_domain *domain, |
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 1e83c8c5f985..4f5b8712931f 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
| @@ -10,6 +10,8 @@ | |||
| 10 | #include <linux/intel-iommu.h> | 10 | #include <linux/intel-iommu.h> |
| 11 | #include "intr_remapping.h" | 11 | #include "intr_remapping.h" |
| 12 | #include <acpi/acpi.h> | 12 | #include <acpi/acpi.h> |
| 13 | #include <asm/pci-direct.h> | ||
| 14 | #include "pci.h" | ||
| 13 | 15 | ||
| 14 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | 16 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; |
| 15 | static int ir_ioapic_num; | 17 | static int ir_ioapic_num; |
| @@ -314,7 +316,8 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
| 314 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 316 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
| 315 | irte = &iommu->ir_table->base[index]; | 317 | irte = &iommu->ir_table->base[index]; |
| 316 | 318 | ||
| 317 | set_64bit((unsigned long *)irte, irte_modified->low); | 319 | set_64bit((unsigned long *)&irte->low, irte_modified->low); |
| 320 | set_64bit((unsigned long *)&irte->high, irte_modified->high); | ||
| 318 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); | 321 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); |
| 319 | 322 | ||
| 320 | rc = qi_flush_iec(iommu, index, 0); | 323 | rc = qi_flush_iec(iommu, index, 0); |
| @@ -369,12 +372,32 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) | |||
| 369 | return drhd->iommu; | 372 | return drhd->iommu; |
| 370 | } | 373 | } |
| 371 | 374 | ||
| 375 | static int clear_entries(struct irq_2_iommu *irq_iommu) | ||
| 376 | { | ||
| 377 | struct irte *start, *entry, *end; | ||
| 378 | struct intel_iommu *iommu; | ||
| 379 | int index; | ||
| 380 | |||
| 381 | if (irq_iommu->sub_handle) | ||
| 382 | return 0; | ||
| 383 | |||
| 384 | iommu = irq_iommu->iommu; | ||
| 385 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | ||
| 386 | |||
| 387 | start = iommu->ir_table->base + index; | ||
| 388 | end = start + (1 << irq_iommu->irte_mask); | ||
| 389 | |||
| 390 | for (entry = start; entry < end; entry++) { | ||
| 391 | set_64bit((unsigned long *)&entry->low, 0); | ||
| 392 | set_64bit((unsigned long *)&entry->high, 0); | ||
| 393 | } | ||
| 394 | |||
| 395 | return qi_flush_iec(iommu, index, irq_iommu->irte_mask); | ||
| 396 | } | ||
| 397 | |||
| 372 | int free_irte(int irq) | 398 | int free_irte(int irq) |
| 373 | { | 399 | { |
| 374 | int rc = 0; | 400 | int rc = 0; |
| 375 | int index, i; | ||
| 376 | struct irte *irte; | ||
| 377 | struct intel_iommu *iommu; | ||
| 378 | struct irq_2_iommu *irq_iommu; | 401 | struct irq_2_iommu *irq_iommu; |
| 379 | unsigned long flags; | 402 | unsigned long flags; |
| 380 | 403 | ||
| @@ -385,16 +408,7 @@ int free_irte(int irq) | |||
| 385 | return -1; | 408 | return -1; |
| 386 | } | 409 | } |
| 387 | 410 | ||
| 388 | iommu = irq_iommu->iommu; | 411 | rc = clear_entries(irq_iommu); |
| 389 | |||
| 390 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | ||
| 391 | irte = &iommu->ir_table->base[index]; | ||
| 392 | |||
| 393 | if (!irq_iommu->sub_handle) { | ||
| 394 | for (i = 0; i < (1 << irq_iommu->irte_mask); i++) | ||
| 395 | set_64bit((unsigned long *)(irte + i), 0); | ||
| 396 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); | ||
| 397 | } | ||
| 398 | 412 | ||
| 399 | irq_iommu->iommu = NULL; | 413 | irq_iommu->iommu = NULL; |
| 400 | irq_iommu->irte_index = 0; | 414 | irq_iommu->irte_index = 0; |
| @@ -406,6 +420,91 @@ int free_irte(int irq) | |||
| 406 | return rc; | 420 | return rc; |
| 407 | } | 421 | } |
| 408 | 422 | ||
| 423 | /* | ||
| 424 | * source validation type | ||
| 425 | */ | ||
| 426 | #define SVT_NO_VERIFY 0x0 /* no verification is required */ | ||
| 427 | #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */ | ||
| 428 | #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */ | ||
| 429 | |||
| 430 | /* | ||
| 431 | * source-id qualifier | ||
| 432 | */ | ||
| 433 | #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */ | ||
| 434 | #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore | ||
| 435 | * the third least significant bit | ||
| 436 | */ | ||
| 437 | #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore | ||
| 438 | * the second and third least significant bits | ||
| 439 | */ | ||
| 440 | #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore | ||
| 441 | * the least three significant bits | ||
| 442 | */ | ||
| 443 | |||
| 444 | /* | ||
| 445 | * set SVT, SQ and SID fields of irte to verify | ||
| 446 | * source ids of interrupt requests | ||
| 447 | */ | ||
| 448 | static void set_irte_sid(struct irte *irte, unsigned int svt, | ||
| 449 | unsigned int sq, unsigned int sid) | ||
| 450 | { | ||
| 451 | irte->svt = svt; | ||
| 452 | irte->sq = sq; | ||
| 453 | irte->sid = sid; | ||
| 454 | } | ||
| 455 | |||
| 456 | int set_ioapic_sid(struct irte *irte, int apic) | ||
| 457 | { | ||
| 458 | int i; | ||
| 459 | u16 sid = 0; | ||
| 460 | |||
| 461 | if (!irte) | ||
| 462 | return -1; | ||
| 463 | |||
| 464 | for (i = 0; i < MAX_IO_APICS; i++) { | ||
| 465 | if (ir_ioapic[i].id == apic) { | ||
| 466 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; | ||
| 467 | break; | ||
| 468 | } | ||
| 469 | } | ||
| 470 | |||
| 471 | if (sid == 0) { | ||
| 472 | pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); | ||
| 473 | return -1; | ||
| 474 | } | ||
| 475 | |||
| 476 | set_irte_sid(irte, 1, 0, sid); | ||
| 477 | |||
| 478 | return 0; | ||
| 479 | } | ||
| 480 | |||
| 481 | int set_msi_sid(struct irte *irte, struct pci_dev *dev) | ||
| 482 | { | ||
| 483 | struct pci_dev *bridge; | ||
| 484 | |||
| 485 | if (!irte || !dev) | ||
| 486 | return -1; | ||
| 487 | |||
| 488 | /* PCIe device or Root Complex integrated PCI device */ | ||
| 489 | if (dev->is_pcie || !dev->bus->parent) { | ||
| 490 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | ||
| 491 | (dev->bus->number << 8) | dev->devfn); | ||
| 492 | return 0; | ||
| 493 | } | ||
| 494 | |||
| 495 | bridge = pci_find_upstream_pcie_bridge(dev); | ||
| 496 | if (bridge) { | ||
| 497 | if (bridge->is_pcie) /* this is a PCIE-to-PCI/PCIX bridge */ | ||
| 498 | set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, | ||
| 499 | (bridge->bus->number << 8) | dev->bus->number); | ||
| 500 | else /* this is a legacy PCI bridge */ | ||
| 501 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | ||
| 502 | (bridge->bus->number << 8) | bridge->devfn); | ||
| 503 | } | ||
| 504 | |||
| 505 | return 0; | ||
| 506 | } | ||
| 507 | |||
| 409 | static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) | 508 | static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) |
| 410 | { | 509 | { |
| 411 | u64 addr; | 510 | u64 addr; |
| @@ -612,6 +711,35 @@ error: | |||
| 612 | return -1; | 711 | return -1; |
| 613 | } | 712 | } |
| 614 | 713 | ||
| 714 | static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, | ||
| 715 | struct intel_iommu *iommu) | ||
| 716 | { | ||
| 717 | struct acpi_dmar_pci_path *path; | ||
| 718 | u8 bus; | ||
| 719 | int count; | ||
| 720 | |||
| 721 | bus = scope->bus; | ||
| 722 | path = (struct acpi_dmar_pci_path *)(scope + 1); | ||
| 723 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | ||
| 724 | / sizeof(struct acpi_dmar_pci_path); | ||
| 725 | |||
| 726 | while (--count > 0) { | ||
| 727 | /* | ||
| 728 | * Access PCI directly due to the PCI | ||
| 729 | * subsystem isn't initialized yet. | ||
| 730 | */ | ||
| 731 | bus = read_pci_config_byte(bus, path->dev, path->fn, | ||
| 732 | PCI_SECONDARY_BUS); | ||
| 733 | path++; | ||
| 734 | } | ||
| 735 | |||
| 736 | ir_ioapic[ir_ioapic_num].bus = bus; | ||
| 737 | ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn); | ||
| 738 | ir_ioapic[ir_ioapic_num].iommu = iommu; | ||
| 739 | ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; | ||
| 740 | ir_ioapic_num++; | ||
| 741 | } | ||
| 742 | |||
| 615 | static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, | 743 | static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, |
| 616 | struct intel_iommu *iommu) | 744 | struct intel_iommu *iommu) |
| 617 | { | 745 | { |
| @@ -636,9 +764,7 @@ static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, | |||
| 636 | " 0x%Lx\n", scope->enumeration_id, | 764 | " 0x%Lx\n", scope->enumeration_id, |
| 637 | drhd->address); | 765 | drhd->address); |
| 638 | 766 | ||
| 639 | ir_ioapic[ir_ioapic_num].iommu = iommu; | 767 | ir_parse_one_ioapic_scope(scope, iommu); |
| 640 | ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; | ||
| 641 | ir_ioapic_num++; | ||
| 642 | } | 768 | } |
| 643 | start += scope->length; | 769 | start += scope->length; |
| 644 | } | 770 | } |
diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h index ca48f0df8ac9..63a263c18415 100644 --- a/drivers/pci/intr_remapping.h +++ b/drivers/pci/intr_remapping.h | |||
| @@ -3,6 +3,8 @@ | |||
| 3 | struct ioapic_scope { | 3 | struct ioapic_scope { |
| 4 | struct intel_iommu *iommu; | 4 | struct intel_iommu *iommu; |
| 5 | unsigned int id; | 5 | unsigned int id; |
| 6 | unsigned int bus; /* PCI bus number */ | ||
| 7 | unsigned int devfn; /* PCI devfn number */ | ||
| 6 | }; | 8 | }; |
| 7 | 9 | ||
| 8 | #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) | 10 | #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 1731fb5fd775..4a2b162c256a 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
| @@ -126,6 +126,8 @@ extern int free_irte(int irq); | |||
| 126 | extern int irq_remapped(int irq); | 126 | extern int irq_remapped(int irq); |
| 127 | extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); | 127 | extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); |
| 128 | extern struct intel_iommu *map_ioapic_to_ir(int apic); | 128 | extern struct intel_iommu *map_ioapic_to_ir(int apic); |
| 129 | extern int set_ioapic_sid(struct irte *irte, int apic); | ||
| 130 | extern int set_msi_sid(struct irte *irte, struct pci_dev *dev); | ||
| 129 | #else | 131 | #else |
| 130 | static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | 132 | static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) |
| 131 | { | 133 | { |
| @@ -156,6 +158,15 @@ static inline struct intel_iommu *map_ioapic_to_ir(int apic) | |||
| 156 | { | 158 | { |
| 157 | return NULL; | 159 | return NULL; |
| 158 | } | 160 | } |
| 161 | static inline int set_ioapic_sid(struct irte *irte, int apic) | ||
| 162 | { | ||
| 163 | return 0; | ||
| 164 | } | ||
| 165 | static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) | ||
| 166 | { | ||
| 167 | return 0; | ||
| 168 | } | ||
| 169 | |||
| 159 | #define irq_remapped(irq) (0) | 170 | #define irq_remapped(irq) (0) |
| 160 | #define enable_intr_remapping(mode) (-1) | 171 | #define enable_intr_remapping(mode) (-1) |
| 161 | #define disable_intr_remapping() (0) | 172 | #define disable_intr_remapping() (0) |
