aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2008-09-11 04:24:48 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-19 06:59:21 -0400
commitbd60b735c658e6e8c656e89771d281bcfcf51279 (patch)
tree3587d684e56ed7e057d30b022fa1985990a08939
parentb39ba6ad004a31bf2a08ba2b08c1e0f9b3530bb7 (diff)
AMD IOMMU: don't assign preallocated protection domains to devices
In isolation mode the protection domains for the devices are preallocated and preassigned. This is bad if a device should be passed to a virtualization guest because the IOMMU code does not know if it is in use by a driver. This patch changes the code to assign the device to the preallocated domain only if there are dma mapping requests for it. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/amd_iommu.c43
-rw-r--r--include/asm-x86/amd_iommu_types.h6
2 files changed, 44 insertions, 5 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index a6a6f8ed1cf5..7c1791447451 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -33,6 +33,10 @@
33 33
34static DEFINE_RWLOCK(amd_iommu_devtable_lock); 34static DEFINE_RWLOCK(amd_iommu_devtable_lock);
35 35
36/* A list of preallocated protection domains */
37static LIST_HEAD(iommu_pd_list);
38static DEFINE_SPINLOCK(iommu_pd_list_lock);
39
36/* 40/*
37 * general struct to manage commands send to an IOMMU 41 * general struct to manage commands send to an IOMMU
38 */ 42 */
@@ -663,6 +667,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
663 dma_dom->next_bit = 0; 667 dma_dom->next_bit = 0;
664 668
665 dma_dom->need_flush = false; 669 dma_dom->need_flush = false;
670 dma_dom->target_dev = 0xffff;
666 671
667 /* Intialize the exclusion range if necessary */ 672 /* Intialize the exclusion range if necessary */
668 if (iommu->exclusion_start && 673 if (iommu->exclusion_start &&
@@ -769,6 +774,33 @@ static bool check_device(struct device *dev)
769} 774}
770 775
771/* 776/*
777 * In this function the list of preallocated protection domains is traversed to
778 * find the domain for a specific device
779 */
780static struct dma_ops_domain *find_protection_domain(u16 devid)
781{
782 struct dma_ops_domain *entry, *ret = NULL;
783 unsigned long flags;
784
785 if (list_empty(&iommu_pd_list))
786 return NULL;
787
788 spin_lock_irqsave(&iommu_pd_list_lock, flags);
789
790 list_for_each_entry(entry, &iommu_pd_list, list) {
791 if (entry->target_dev == devid) {
792 ret = entry;
793 list_del(&ret->list);
794 break;
795 }
796 }
797
798 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
799
800 return ret;
801}
802
803/*
772 * In the dma_ops path we only have the struct device. This function 804 * In the dma_ops path we only have the struct device. This function
773 * finds the corresponding IOMMU, the protection domain and the 805 * finds the corresponding IOMMU, the protection domain and the
774 * requestor id for a given device. 806 * requestor id for a given device.
@@ -803,9 +835,11 @@ static int get_device_resources(struct device *dev,
803 *iommu = amd_iommu_rlookup_table[*bdf]; 835 *iommu = amd_iommu_rlookup_table[*bdf];
804 if (*iommu == NULL) 836 if (*iommu == NULL)
805 return 0; 837 return 0;
806 dma_dom = (*iommu)->default_dom;
807 *domain = domain_for_device(*bdf); 838 *domain = domain_for_device(*bdf);
808 if (*domain == NULL) { 839 if (*domain == NULL) {
840 dma_dom = find_protection_domain(*bdf);
841 if (!dma_dom)
842 dma_dom = (*iommu)->default_dom;
809 *domain = &dma_dom->domain; 843 *domain = &dma_dom->domain;
810 set_device_domain(*iommu, *domain, *bdf); 844 set_device_domain(*iommu, *domain, *bdf);
811 printk(KERN_INFO "AMD IOMMU: Using protection domain %d for " 845 printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
@@ -1257,10 +1291,9 @@ void prealloc_protection_domains(void)
1257 if (!dma_dom) 1291 if (!dma_dom)
1258 continue; 1292 continue;
1259 init_unity_mappings_for_device(dma_dom, devid); 1293 init_unity_mappings_for_device(dma_dom, devid);
1260 set_device_domain(iommu, &dma_dom->domain, devid); 1294 dma_dom->target_dev = devid;
1261 printk(KERN_INFO "AMD IOMMU: Allocated domain %d for device ", 1295
1262 dma_dom->domain.id); 1296 list_add_tail(&dma_dom->list, &iommu_pd_list);
1263 print_devid(devid, 1);
1264 } 1297 }
1265} 1298}
1266 1299
diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h
index d8c5a6c69955..9aa22ead22f3 100644
--- a/include/asm-x86/amd_iommu_types.h
+++ b/include/asm-x86/amd_iommu_types.h
@@ -227,6 +227,12 @@ struct dma_ops_domain {
227 227
228 /* This will be set to true when TLB needs to be flushed */ 228 /* This will be set to true when TLB needs to be flushed */
229 bool need_flush; 229 bool need_flush;
230
231 /*
232 * if this is a preallocated domain, keep the device for which it was
233 * preallocated in this variable
234 */
235 u16 target_dev;
230}; 236};
231 237
232/* 238/*