aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/pci/intel-iommu.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c96
1 files changed, 53 insertions, 43 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 1840a0578a42..417312528ddf 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -277,6 +277,7 @@ static int hw_pass_through = 1;
277 277
278struct dmar_domain { 278struct dmar_domain {
279 int id; /* domain id */ 279 int id; /* domain id */
280 int nid; /* node id */
280 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ 281 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
281 282
282 struct list_head devices; /* all devices' list */ 283 struct list_head devices; /* all devices' list */
@@ -304,7 +305,7 @@ struct device_domain_info {
304 int segment; /* PCI domain */ 305 int segment; /* PCI domain */
305 u8 bus; /* PCI bus number */ 306 u8 bus; /* PCI bus number */
306 u8 devfn; /* PCI devfn number */ 307 u8 devfn; /* PCI devfn number */
307 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ 308 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
308 struct intel_iommu *iommu; /* IOMMU used by this device */ 309 struct intel_iommu *iommu; /* IOMMU used by this device */
309 struct dmar_domain *domain; /* pointer to domain */ 310 struct dmar_domain *domain; /* pointer to domain */
310}; 311};
@@ -386,30 +387,14 @@ static struct kmem_cache *iommu_domain_cache;
386static struct kmem_cache *iommu_devinfo_cache; 387static struct kmem_cache *iommu_devinfo_cache;
387static struct kmem_cache *iommu_iova_cache; 388static struct kmem_cache *iommu_iova_cache;
388 389
389static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep) 390static inline void *alloc_pgtable_page(int node)
390{ 391{
391 unsigned int flags; 392 struct page *page;
392 void *vaddr; 393 void *vaddr = NULL;
393
394 /* trying to avoid low memory issues */
395 flags = current->flags & PF_MEMALLOC;
396 current->flags |= PF_MEMALLOC;
397 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
398 current->flags &= (~PF_MEMALLOC | flags);
399 return vaddr;
400}
401
402 394
403static inline void *alloc_pgtable_page(void) 395 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
404{ 396 if (page)
405 unsigned int flags; 397 vaddr = page_address(page);
406 void *vaddr;
407
408 /* trying to avoid low memory issues */
409 flags = current->flags & PF_MEMALLOC;
410 current->flags |= PF_MEMALLOC;
411 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
412 current->flags &= (~PF_MEMALLOC | flags);
413 return vaddr; 398 return vaddr;
414} 399}
415 400
@@ -420,7 +405,7 @@ static inline void free_pgtable_page(void *vaddr)
420 405
421static inline void *alloc_domain_mem(void) 406static inline void *alloc_domain_mem(void)
422{ 407{
423 return iommu_kmem_cache_alloc(iommu_domain_cache); 408 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
424} 409}
425 410
426static void free_domain_mem(void *vaddr) 411static void free_domain_mem(void *vaddr)
@@ -430,7 +415,7 @@ static void free_domain_mem(void *vaddr)
430 415
431static inline void * alloc_devinfo_mem(void) 416static inline void * alloc_devinfo_mem(void)
432{ 417{
433 return iommu_kmem_cache_alloc(iommu_devinfo_cache); 418 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
434} 419}
435 420
436static inline void free_devinfo_mem(void *vaddr) 421static inline void free_devinfo_mem(void *vaddr)
@@ -440,7 +425,7 @@ static inline void free_devinfo_mem(void *vaddr)
440 425
441struct iova *alloc_iova_mem(void) 426struct iova *alloc_iova_mem(void)
442{ 427{
443 return iommu_kmem_cache_alloc(iommu_iova_cache); 428 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
444} 429}
445 430
446void free_iova_mem(struct iova *iova) 431void free_iova_mem(struct iova *iova)
@@ -589,7 +574,8 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
589 root = &iommu->root_entry[bus]; 574 root = &iommu->root_entry[bus];
590 context = get_context_addr_from_root(root); 575 context = get_context_addr_from_root(root);
591 if (!context) { 576 if (!context) {
592 context = (struct context_entry *)alloc_pgtable_page(); 577 context = (struct context_entry *)
578 alloc_pgtable_page(iommu->node);
593 if (!context) { 579 if (!context) {
594 spin_unlock_irqrestore(&iommu->lock, flags); 580 spin_unlock_irqrestore(&iommu->lock, flags);
595 return NULL; 581 return NULL;
@@ -732,7 +718,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
732 if (!dma_pte_present(pte)) { 718 if (!dma_pte_present(pte)) {
733 uint64_t pteval; 719 uint64_t pteval;
734 720
735 tmp_page = alloc_pgtable_page(); 721 tmp_page = alloc_pgtable_page(domain->nid);
736 722
737 if (!tmp_page) 723 if (!tmp_page)
738 return NULL; 724 return NULL;
@@ -868,7 +854,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
868 struct root_entry *root; 854 struct root_entry *root;
869 unsigned long flags; 855 unsigned long flags;
870 856
871 root = (struct root_entry *)alloc_pgtable_page(); 857 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
872 if (!root) 858 if (!root)
873 return -ENOMEM; 859 return -ENOMEM;
874 860
@@ -1263,6 +1249,7 @@ static struct dmar_domain *alloc_domain(void)
1263 if (!domain) 1249 if (!domain)
1264 return NULL; 1250 return NULL;
1265 1251
1252 domain->nid = -1;
1266 memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); 1253 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1267 domain->flags = 0; 1254 domain->flags = 0;
1268 1255
@@ -1420,9 +1407,10 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1420 domain->iommu_snooping = 0; 1407 domain->iommu_snooping = 0;
1421 1408
1422 domain->iommu_count = 1; 1409 domain->iommu_count = 1;
1410 domain->nid = iommu->node;
1423 1411
1424 /* always allocate the top pgd */ 1412 /* always allocate the top pgd */
1425 domain->pgd = (struct dma_pte *)alloc_pgtable_page(); 1413 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1426 if (!domain->pgd) 1414 if (!domain->pgd)
1427 return -ENOMEM; 1415 return -ENOMEM;
1428 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); 1416 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
@@ -1523,12 +1511,15 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1523 1511
1524 /* Skip top levels of page tables for 1512 /* Skip top levels of page tables for
1525 * iommu which has less agaw than default. 1513 * iommu which has less agaw than default.
1514 * Unnecessary for PT mode.
1526 */ 1515 */
1527 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { 1516 if (translation != CONTEXT_TT_PASS_THROUGH) {
1528 pgd = phys_to_virt(dma_pte_addr(pgd)); 1517 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1529 if (!dma_pte_present(pgd)) { 1518 pgd = phys_to_virt(dma_pte_addr(pgd));
1530 spin_unlock_irqrestore(&iommu->lock, flags); 1519 if (!dma_pte_present(pgd)) {
1531 return -ENOMEM; 1520 spin_unlock_irqrestore(&iommu->lock, flags);
1521 return -ENOMEM;
1522 }
1532 } 1523 }
1533 } 1524 }
1534 } 1525 }
@@ -1577,6 +1568,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1577 spin_lock_irqsave(&domain->iommu_lock, flags); 1568 spin_lock_irqsave(&domain->iommu_lock, flags);
1578 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { 1569 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1579 domain->iommu_count++; 1570 domain->iommu_count++;
1571 if (domain->iommu_count == 1)
1572 domain->nid = iommu->node;
1580 domain_update_iommu_cap(domain); 1573 domain_update_iommu_cap(domain);
1581 } 1574 }
1582 spin_unlock_irqrestore(&domain->iommu_lock, flags); 1575 spin_unlock_irqrestore(&domain->iommu_lock, flags);
@@ -1611,7 +1604,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1611 return ret; 1604 return ret;
1612 parent = parent->bus->self; 1605 parent = parent->bus->self;
1613 } 1606 }
1614 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ 1607 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
1615 return domain_context_mapping_one(domain, 1608 return domain_context_mapping_one(domain,
1616 pci_domain_nr(tmp->subordinate), 1609 pci_domain_nr(tmp->subordinate),
1617 tmp->subordinate->number, 0, 1610 tmp->subordinate->number, 0,
@@ -1651,7 +1644,7 @@ static int domain_context_mapped(struct pci_dev *pdev)
1651 return ret; 1644 return ret;
1652 parent = parent->bus->self; 1645 parent = parent->bus->self;
1653 } 1646 }
1654 if (tmp->is_pcie) 1647 if (pci_is_pcie(tmp))
1655 return device_context_mapped(iommu, tmp->subordinate->number, 1648 return device_context_mapped(iommu, tmp->subordinate->number,
1656 0); 1649 0);
1657 else 1650 else
@@ -1821,7 +1814,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1821 1814
1822 dev_tmp = pci_find_upstream_pcie_bridge(pdev); 1815 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1823 if (dev_tmp) { 1816 if (dev_tmp) {
1824 if (dev_tmp->is_pcie) { 1817 if (pci_is_pcie(dev_tmp)) {
1825 bus = dev_tmp->subordinate->number; 1818 bus = dev_tmp->subordinate->number;
1826 devfn = 0; 1819 devfn = 0;
1827 } else { 1820 } else {
@@ -1991,6 +1984,16 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
1991 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", 1984 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1992 pci_name(pdev), start, end); 1985 pci_name(pdev), start, end);
1993 1986
1987 if (end < start) {
1988 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
1989 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
1990 dmi_get_system_info(DMI_BIOS_VENDOR),
1991 dmi_get_system_info(DMI_BIOS_VERSION),
1992 dmi_get_system_info(DMI_PRODUCT_VERSION));
1993 ret = -EIO;
1994 goto error;
1995 }
1996
1994 if (end >> agaw_to_width(domain->agaw)) { 1997 if (end >> agaw_to_width(domain->agaw)) {
1995 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n" 1998 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
1996 "BIOS vendor: %s; Ver: %s; Product Version: %s\n", 1999 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
@@ -2182,7 +2185,7 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2182 * the 1:1 domain, just in _case_ one of their siblings turns out 2185 * the 1:1 domain, just in _case_ one of their siblings turns out
2183 * not to be able to map all of memory. 2186 * not to be able to map all of memory.
2184 */ 2187 */
2185 if (!pdev->is_pcie) { 2188 if (!pci_is_pcie(pdev)) {
2186 if (!pci_is_root_bus(pdev->bus)) 2189 if (!pci_is_root_bus(pdev->bus))
2187 return 0; 2190 return 0;
2188 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) 2191 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
@@ -3228,6 +3231,9 @@ static int device_notifier(struct notifier_block *nb,
3228 struct pci_dev *pdev = to_pci_dev(dev); 3231 struct pci_dev *pdev = to_pci_dev(dev);
3229 struct dmar_domain *domain; 3232 struct dmar_domain *domain;
3230 3233
3234 if (iommu_no_mapping(dev))
3235 return 0;
3236
3231 domain = find_domain(pdev); 3237 domain = find_domain(pdev);
3232 if (!domain) 3238 if (!domain)
3233 return 0; 3239 return 0;
@@ -3266,7 +3272,7 @@ int __init intel_iommu_init(void)
3266 * Check the need for DMA-remapping initialization now. 3272 * Check the need for DMA-remapping initialization now.
3267 * Above initialization will also be used by Interrupt-remapping. 3273 * Above initialization will also be used by Interrupt-remapping.
3268 */ 3274 */
3269 if (no_iommu || swiotlb || dmar_disabled) 3275 if (no_iommu || dmar_disabled)
3270 return -ENODEV; 3276 return -ENODEV;
3271 3277
3272 iommu_init_mempool(); 3278 iommu_init_mempool();
@@ -3287,7 +3293,9 @@ int __init intel_iommu_init(void)
3287 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); 3293 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3288 3294
3289 init_timer(&unmap_timer); 3295 init_timer(&unmap_timer);
3290 force_iommu = 1; 3296#ifdef CONFIG_SWIOTLB
3297 swiotlb = 0;
3298#endif
3291 dma_ops = &intel_dma_ops; 3299 dma_ops = &intel_dma_ops;
3292 3300
3293 init_iommu_sysfs(); 3301 init_iommu_sysfs();
@@ -3317,7 +3325,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3317 parent->devfn); 3325 parent->devfn);
3318 parent = parent->bus->self; 3326 parent = parent->bus->self;
3319 } 3327 }
3320 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ 3328 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
3321 iommu_detach_dev(iommu, 3329 iommu_detach_dev(iommu,
3322 tmp->subordinate->number, 0); 3330 tmp->subordinate->number, 0);
3323 else /* this is a legacy PCI bridge */ 3331 else /* this is a legacy PCI bridge */
@@ -3453,6 +3461,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
3453 return NULL; 3461 return NULL;
3454 3462
3455 domain->id = vm_domid++; 3463 domain->id = vm_domid++;
3464 domain->nid = -1;
3456 memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); 3465 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3457 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; 3466 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3458 3467
@@ -3479,9 +3488,10 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
3479 domain->iommu_coherency = 0; 3488 domain->iommu_coherency = 0;
3480 domain->iommu_snooping = 0; 3489 domain->iommu_snooping = 0;
3481 domain->max_addr = 0; 3490 domain->max_addr = 0;
3491 domain->nid = -1;
3482 3492
3483 /* always allocate the top pgd */ 3493 /* always allocate the top pgd */
3484 domain->pgd = (struct dma_pte *)alloc_pgtable_page(); 3494 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
3485 if (!domain->pgd) 3495 if (!domain->pgd)
3486 return -ENOMEM; 3496 return -ENOMEM;
3487 domain_flush_cache(domain, domain->pgd, PAGE_SIZE); 3497 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);