aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-05-19 06:16:29 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2009-05-28 12:16:49 -0400
commitd9cfed925448f097ec7faab80d903eb7e5f99712 (patch)
tree79a88e1763f05d128f592cd96e6ed25234a245bb /arch
parent11b83888ae729457b5cfb936dbd498481f6408df (diff)
amd-iommu: remove amd_iommu_size kernel parameter
This parameter is not longer necessary when aperture increases dynamically. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/amd_iommu.c18
-rw-r--r--arch/x86/kernel/amd_iommu_init.c15
2 files changed, 4 insertions, 29 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index d129d8feba07..31d56c36010a 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -939,17 +939,10 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
939 * It also intializes the page table and the address allocator data 939 * It also intializes the page table and the address allocator data
940 * structures required for the dma_ops interface 940 * structures required for the dma_ops interface
941 */ 941 */
942static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, 942static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
943 unsigned order)
944{ 943{
945 struct dma_ops_domain *dma_dom; 944 struct dma_ops_domain *dma_dom;
946 945
947 /*
948 * Currently the DMA aperture must be between 32 MB and 1GB in size
949 */
950 if ((order < 25) || (order > 30))
951 return NULL;
952
953 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL); 946 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
954 if (!dma_dom) 947 if (!dma_dom)
955 return NULL; 948 return NULL;
@@ -1087,7 +1080,6 @@ static int device_change_notifier(struct notifier_block *nb,
1087 struct protection_domain *domain; 1080 struct protection_domain *domain;
1088 struct dma_ops_domain *dma_domain; 1081 struct dma_ops_domain *dma_domain;
1089 struct amd_iommu *iommu; 1082 struct amd_iommu *iommu;
1090 int order = amd_iommu_aperture_order;
1091 unsigned long flags; 1083 unsigned long flags;
1092 1084
1093 if (devid > amd_iommu_last_bdf) 1085 if (devid > amd_iommu_last_bdf)
@@ -1126,7 +1118,7 @@ static int device_change_notifier(struct notifier_block *nb,
1126 dma_domain = find_protection_domain(devid); 1118 dma_domain = find_protection_domain(devid);
1127 if (dma_domain) 1119 if (dma_domain)
1128 goto out; 1120 goto out;
1129 dma_domain = dma_ops_domain_alloc(iommu, order); 1121 dma_domain = dma_ops_domain_alloc(iommu);
1130 if (!dma_domain) 1122 if (!dma_domain)
1131 goto out; 1123 goto out;
1132 dma_domain->target_dev = devid; 1124 dma_domain->target_dev = devid;
@@ -1826,7 +1818,6 @@ static void prealloc_protection_domains(void)
1826 struct pci_dev *dev = NULL; 1818 struct pci_dev *dev = NULL;
1827 struct dma_ops_domain *dma_dom; 1819 struct dma_ops_domain *dma_dom;
1828 struct amd_iommu *iommu; 1820 struct amd_iommu *iommu;
1829 int order = amd_iommu_aperture_order;
1830 u16 devid; 1821 u16 devid;
1831 1822
1832 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 1823 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
@@ -1839,7 +1830,7 @@ static void prealloc_protection_domains(void)
1839 iommu = amd_iommu_rlookup_table[devid]; 1830 iommu = amd_iommu_rlookup_table[devid];
1840 if (!iommu) 1831 if (!iommu)
1841 continue; 1832 continue;
1842 dma_dom = dma_ops_domain_alloc(iommu, order); 1833 dma_dom = dma_ops_domain_alloc(iommu);
1843 if (!dma_dom) 1834 if (!dma_dom)
1844 continue; 1835 continue;
1845 init_unity_mappings_for_device(dma_dom, devid); 1836 init_unity_mappings_for_device(dma_dom, devid);
@@ -1865,7 +1856,6 @@ static struct dma_map_ops amd_iommu_dma_ops = {
1865int __init amd_iommu_init_dma_ops(void) 1856int __init amd_iommu_init_dma_ops(void)
1866{ 1857{
1867 struct amd_iommu *iommu; 1858 struct amd_iommu *iommu;
1868 int order = amd_iommu_aperture_order;
1869 int ret; 1859 int ret;
1870 1860
1871 /* 1861 /*
@@ -1874,7 +1864,7 @@ int __init amd_iommu_init_dma_ops(void)
1874 * protection domain will be assigned to the default one. 1864 * protection domain will be assigned to the default one.
1875 */ 1865 */
1876 list_for_each_entry(iommu, &amd_iommu_list, list) { 1866 list_for_each_entry(iommu, &amd_iommu_list, list) {
1877 iommu->default_dom = dma_ops_domain_alloc(iommu, order); 1867 iommu->default_dom = dma_ops_domain_alloc(iommu);
1878 if (iommu->default_dom == NULL) 1868 if (iommu->default_dom == NULL)
1879 return -ENOMEM; 1869 return -ENOMEM;
1880 iommu->default_dom->domain.flags |= PD_DEFAULT_MASK; 1870 iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 8c0be0902dac..762a4eefec93 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -121,7 +121,6 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
121 to handle */ 121 to handle */
122LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings 122LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
123 we find in ACPI */ 123 we find in ACPI */
124unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
125bool amd_iommu_isolate = true; /* if true, device isolation is 124bool amd_iommu_isolate = true; /* if true, device isolation is
126 enabled */ 125 enabled */
127bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ 126bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
@@ -1137,9 +1136,6 @@ int __init amd_iommu_init(void)
1137 1136
1138 enable_iommus(); 1137 enable_iommus();
1139 1138
1140 printk(KERN_INFO "AMD IOMMU: aperture size is %d MB\n",
1141 (1 << (amd_iommu_aperture_order-20)));
1142
1143 printk(KERN_INFO "AMD IOMMU: device isolation "); 1139 printk(KERN_INFO "AMD IOMMU: device isolation ");
1144 if (amd_iommu_isolate) 1140 if (amd_iommu_isolate)
1145 printk("enabled\n"); 1141 printk("enabled\n");
@@ -1225,15 +1221,4 @@ static int __init parse_amd_iommu_options(char *str)
1225 return 1; 1221 return 1;
1226} 1222}
1227 1223
1228static int __init parse_amd_iommu_size_options(char *str)
1229{
1230 unsigned order = PAGE_SHIFT + get_order(memparse(str, &str));
1231
1232 if ((order > 24) && (order < 31))
1233 amd_iommu_aperture_order = order;
1234
1235 return 1;
1236}
1237
1238__setup("amd_iommu=", parse_amd_iommu_options); 1224__setup("amd_iommu=", parse_amd_iommu_options);
1239__setup("amd_iommu_size=", parse_amd_iommu_size_options);