aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-16 13:11:38 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-16 13:11:38 -0500
commita79960e576ebca9dbf24489b562689f2be7e9ff0 (patch)
treeb0748839230c2bba1d49ccdd732608d7d1f334cb /drivers/pci/intel-iommu.c
parent661e338f728d101b4839b6b157d44cfcb80e3c5e (diff)
parentcd7bcf32d42b15891620b3f1387a00178b54291a (diff)
Merge git://git.infradead.org/iommu-2.6
* git://git.infradead.org/iommu-2.6: implement early_io{re,un}map for ia64 Revert "Intel IOMMU: Avoid memory allocation failures in dma map api calls" intel-iommu: ignore page table validation in pass through mode intel-iommu: Fix oops with intel_iommu=igfx_off intel-iommu: Check for an RMRR which ends before it starts. intel-iommu: Apply BIOS sanity checks for interrupt remapping too. intel-iommu: Detect DMAR in hyperspace at probe time. dmar: Fix build failure without NUMA, warn on bogus RHSA tables and don't abort iommu: Allocate dma-remapping structures using numa locality info intr_remap: Allocate intr-remapping table using numa locality info dmar: Allocate queued invalidation structure using numa locality info dmar: support for parsing Remapping Hardware Static Affinity structure
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c78
1 files changed, 43 insertions, 35 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 8d6159426311..e56f9bed6f2b 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -277,6 +277,7 @@ static int hw_pass_through = 1;
277 277
278struct dmar_domain { 278struct dmar_domain {
279 int id; /* domain id */ 279 int id; /* domain id */
280 int nid; /* node id */
280 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ 281 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
281 282
282 struct list_head devices; /* all devices' list */ 283 struct list_head devices; /* all devices' list */
@@ -386,30 +387,14 @@ static struct kmem_cache *iommu_domain_cache;
386static struct kmem_cache *iommu_devinfo_cache; 387static struct kmem_cache *iommu_devinfo_cache;
387static struct kmem_cache *iommu_iova_cache; 388static struct kmem_cache *iommu_iova_cache;
388 389
389static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep) 390static inline void *alloc_pgtable_page(int node)
390{ 391{
391 unsigned int flags; 392 struct page *page;
392 void *vaddr; 393 void *vaddr = NULL;
393
394 /* trying to avoid low memory issues */
395 flags = current->flags & PF_MEMALLOC;
396 current->flags |= PF_MEMALLOC;
397 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
398 current->flags &= (~PF_MEMALLOC | flags);
399 return vaddr;
400}
401
402 394
403static inline void *alloc_pgtable_page(void) 395 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
404{ 396 if (page)
405 unsigned int flags; 397 vaddr = page_address(page);
406 void *vaddr;
407
408 /* trying to avoid low memory issues */
409 flags = current->flags & PF_MEMALLOC;
410 current->flags |= PF_MEMALLOC;
411 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
412 current->flags &= (~PF_MEMALLOC | flags);
413 return vaddr; 398 return vaddr;
414} 399}
415 400
@@ -420,7 +405,7 @@ static inline void free_pgtable_page(void *vaddr)
420 405
421static inline void *alloc_domain_mem(void) 406static inline void *alloc_domain_mem(void)
422{ 407{
423 return iommu_kmem_cache_alloc(iommu_domain_cache); 408 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
424} 409}
425 410
426static void free_domain_mem(void *vaddr) 411static void free_domain_mem(void *vaddr)
@@ -430,7 +415,7 @@ static void free_domain_mem(void *vaddr)
430 415
431static inline void * alloc_devinfo_mem(void) 416static inline void * alloc_devinfo_mem(void)
432{ 417{
433 return iommu_kmem_cache_alloc(iommu_devinfo_cache); 418 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
434} 419}
435 420
436static inline void free_devinfo_mem(void *vaddr) 421static inline void free_devinfo_mem(void *vaddr)
@@ -440,7 +425,7 @@ static inline void free_devinfo_mem(void *vaddr)
440 425
441struct iova *alloc_iova_mem(void) 426struct iova *alloc_iova_mem(void)
442{ 427{
443 return iommu_kmem_cache_alloc(iommu_iova_cache); 428 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
444} 429}
445 430
446void free_iova_mem(struct iova *iova) 431void free_iova_mem(struct iova *iova)
@@ -589,7 +574,8 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
589 root = &iommu->root_entry[bus]; 574 root = &iommu->root_entry[bus];
590 context = get_context_addr_from_root(root); 575 context = get_context_addr_from_root(root);
591 if (!context) { 576 if (!context) {
592 context = (struct context_entry *)alloc_pgtable_page(); 577 context = (struct context_entry *)
578 alloc_pgtable_page(iommu->node);
593 if (!context) { 579 if (!context) {
594 spin_unlock_irqrestore(&iommu->lock, flags); 580 spin_unlock_irqrestore(&iommu->lock, flags);
595 return NULL; 581 return NULL;
@@ -732,7 +718,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
732 if (!dma_pte_present(pte)) { 718 if (!dma_pte_present(pte)) {
733 uint64_t pteval; 719 uint64_t pteval;
734 720
735 tmp_page = alloc_pgtable_page(); 721 tmp_page = alloc_pgtable_page(domain->nid);
736 722
737 if (!tmp_page) 723 if (!tmp_page)
738 return NULL; 724 return NULL;
@@ -868,7 +854,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
868 struct root_entry *root; 854 struct root_entry *root;
869 unsigned long flags; 855 unsigned long flags;
870 856
871 root = (struct root_entry *)alloc_pgtable_page(); 857 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
872 if (!root) 858 if (!root)
873 return -ENOMEM; 859 return -ENOMEM;
874 860
@@ -1263,6 +1249,7 @@ static struct dmar_domain *alloc_domain(void)
1263 if (!domain) 1249 if (!domain)
1264 return NULL; 1250 return NULL;
1265 1251
1252 domain->nid = -1;
1266 memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); 1253 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1267 domain->flags = 0; 1254 domain->flags = 0;
1268 1255
@@ -1420,9 +1407,10 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1420 domain->iommu_snooping = 0; 1407 domain->iommu_snooping = 0;
1421 1408
1422 domain->iommu_count = 1; 1409 domain->iommu_count = 1;
1410 domain->nid = iommu->node;
1423 1411
1424 /* always allocate the top pgd */ 1412 /* always allocate the top pgd */
1425 domain->pgd = (struct dma_pte *)alloc_pgtable_page(); 1413 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1426 if (!domain->pgd) 1414 if (!domain->pgd)
1427 return -ENOMEM; 1415 return -ENOMEM;
1428 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); 1416 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
@@ -1523,12 +1511,15 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1523 1511
1524 /* Skip top levels of page tables for 1512 /* Skip top levels of page tables for
1525 * iommu which has less agaw than default. 1513 * iommu which has less agaw than default.
1514 * Unnecessary for PT mode.
1526 */ 1515 */
1527 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { 1516 if (translation != CONTEXT_TT_PASS_THROUGH) {
1528 pgd = phys_to_virt(dma_pte_addr(pgd)); 1517 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1529 if (!dma_pte_present(pgd)) { 1518 pgd = phys_to_virt(dma_pte_addr(pgd));
1530 spin_unlock_irqrestore(&iommu->lock, flags); 1519 if (!dma_pte_present(pgd)) {
1531 return -ENOMEM; 1520 spin_unlock_irqrestore(&iommu->lock, flags);
1521 return -ENOMEM;
1522 }
1532 } 1523 }
1533 } 1524 }
1534 } 1525 }
@@ -1577,6 +1568,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1577 spin_lock_irqsave(&domain->iommu_lock, flags); 1568 spin_lock_irqsave(&domain->iommu_lock, flags);
1578 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { 1569 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1579 domain->iommu_count++; 1570 domain->iommu_count++;
1571 if (domain->iommu_count == 1)
1572 domain->nid = iommu->node;
1580 domain_update_iommu_cap(domain); 1573 domain_update_iommu_cap(domain);
1581 } 1574 }
1582 spin_unlock_irqrestore(&domain->iommu_lock, flags); 1575 spin_unlock_irqrestore(&domain->iommu_lock, flags);
@@ -1991,6 +1984,16 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
1991 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", 1984 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1992 pci_name(pdev), start, end); 1985 pci_name(pdev), start, end);
1993 1986
1987 if (end < start) {
1988 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
1989 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
1990 dmi_get_system_info(DMI_BIOS_VENDOR),
1991 dmi_get_system_info(DMI_BIOS_VERSION),
1992 dmi_get_system_info(DMI_PRODUCT_VERSION));
1993 ret = -EIO;
1994 goto error;
1995 }
1996
1994 if (end >> agaw_to_width(domain->agaw)) { 1997 if (end >> agaw_to_width(domain->agaw)) {
1995 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n" 1998 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
1996 "BIOS vendor: %s; Ver: %s; Product Version: %s\n", 1999 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
@@ -3228,6 +3231,9 @@ static int device_notifier(struct notifier_block *nb,
3228 struct pci_dev *pdev = to_pci_dev(dev); 3231 struct pci_dev *pdev = to_pci_dev(dev);
3229 struct dmar_domain *domain; 3232 struct dmar_domain *domain;
3230 3233
3234 if (iommu_no_mapping(dev))
3235 return 0;
3236
3231 domain = find_domain(pdev); 3237 domain = find_domain(pdev);
3232 if (!domain) 3238 if (!domain)
3233 return 0; 3239 return 0;
@@ -3455,6 +3461,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
3455 return NULL; 3461 return NULL;
3456 3462
3457 domain->id = vm_domid++; 3463 domain->id = vm_domid++;
3464 domain->nid = -1;
3458 memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); 3465 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3459 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; 3466 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3460 3467
@@ -3481,9 +3488,10 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
3481 domain->iommu_coherency = 0; 3488 domain->iommu_coherency = 0;
3482 domain->iommu_snooping = 0; 3489 domain->iommu_snooping = 0;
3483 domain->max_addr = 0; 3490 domain->max_addr = 0;
3491 domain->nid = -1;
3484 3492
3485 /* always allocate the top pgd */ 3493 /* always allocate the top pgd */
3486 domain->pgd = (struct dma_pte *)alloc_pgtable_page(); 3494 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
3487 if (!domain->pgd) 3495 if (!domain->pgd)
3488 return -ENOMEM; 3496 return -ENOMEM;
3489 domain_flush_cache(domain, domain->pgd, PAGE_SIZE); 3497 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);