diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2009-05-19 06:16:29 -0400 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2009-05-28 12:16:49 -0400 |
commit | d9cfed925448f097ec7faab80d903eb7e5f99712 (patch) | |
tree | 79a88e1763f05d128f592cd96e6ed25234a245bb /arch/x86/kernel/amd_iommu.c | |
parent | 11b83888ae729457b5cfb936dbd498481f6408df (diff) |
amd-iommu: remove amd_iommu_size kernel parameter
This parameter is not longer necessary when aperture increases
dynamically.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 18 |
1 files changed, 4 insertions, 14 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index d129d8feba07..31d56c36010a 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -939,17 +939,10 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) | |||
939 | * It also intializes the page table and the address allocator data | 939 | * It also intializes the page table and the address allocator data |
940 | * structures required for the dma_ops interface | 940 | * structures required for the dma_ops interface |
941 | */ | 941 | */ |
942 | static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, | 942 | static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu) |
943 | unsigned order) | ||
944 | { | 943 | { |
945 | struct dma_ops_domain *dma_dom; | 944 | struct dma_ops_domain *dma_dom; |
946 | 945 | ||
947 | /* | ||
948 | * Currently the DMA aperture must be between 32 MB and 1GB in size | ||
949 | */ | ||
950 | if ((order < 25) || (order > 30)) | ||
951 | return NULL; | ||
952 | |||
953 | dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL); | 946 | dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL); |
954 | if (!dma_dom) | 947 | if (!dma_dom) |
955 | return NULL; | 948 | return NULL; |
@@ -1087,7 +1080,6 @@ static int device_change_notifier(struct notifier_block *nb, | |||
1087 | struct protection_domain *domain; | 1080 | struct protection_domain *domain; |
1088 | struct dma_ops_domain *dma_domain; | 1081 | struct dma_ops_domain *dma_domain; |
1089 | struct amd_iommu *iommu; | 1082 | struct amd_iommu *iommu; |
1090 | int order = amd_iommu_aperture_order; | ||
1091 | unsigned long flags; | 1083 | unsigned long flags; |
1092 | 1084 | ||
1093 | if (devid > amd_iommu_last_bdf) | 1085 | if (devid > amd_iommu_last_bdf) |
@@ -1126,7 +1118,7 @@ static int device_change_notifier(struct notifier_block *nb, | |||
1126 | dma_domain = find_protection_domain(devid); | 1118 | dma_domain = find_protection_domain(devid); |
1127 | if (dma_domain) | 1119 | if (dma_domain) |
1128 | goto out; | 1120 | goto out; |
1129 | dma_domain = dma_ops_domain_alloc(iommu, order); | 1121 | dma_domain = dma_ops_domain_alloc(iommu); |
1130 | if (!dma_domain) | 1122 | if (!dma_domain) |
1131 | goto out; | 1123 | goto out; |
1132 | dma_domain->target_dev = devid; | 1124 | dma_domain->target_dev = devid; |
@@ -1826,7 +1818,6 @@ static void prealloc_protection_domains(void) | |||
1826 | struct pci_dev *dev = NULL; | 1818 | struct pci_dev *dev = NULL; |
1827 | struct dma_ops_domain *dma_dom; | 1819 | struct dma_ops_domain *dma_dom; |
1828 | struct amd_iommu *iommu; | 1820 | struct amd_iommu *iommu; |
1829 | int order = amd_iommu_aperture_order; | ||
1830 | u16 devid; | 1821 | u16 devid; |
1831 | 1822 | ||
1832 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 1823 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { |
@@ -1839,7 +1830,7 @@ static void prealloc_protection_domains(void) | |||
1839 | iommu = amd_iommu_rlookup_table[devid]; | 1830 | iommu = amd_iommu_rlookup_table[devid]; |
1840 | if (!iommu) | 1831 | if (!iommu) |
1841 | continue; | 1832 | continue; |
1842 | dma_dom = dma_ops_domain_alloc(iommu, order); | 1833 | dma_dom = dma_ops_domain_alloc(iommu); |
1843 | if (!dma_dom) | 1834 | if (!dma_dom) |
1844 | continue; | 1835 | continue; |
1845 | init_unity_mappings_for_device(dma_dom, devid); | 1836 | init_unity_mappings_for_device(dma_dom, devid); |
@@ -1865,7 +1856,6 @@ static struct dma_map_ops amd_iommu_dma_ops = { | |||
1865 | int __init amd_iommu_init_dma_ops(void) | 1856 | int __init amd_iommu_init_dma_ops(void) |
1866 | { | 1857 | { |
1867 | struct amd_iommu *iommu; | 1858 | struct amd_iommu *iommu; |
1868 | int order = amd_iommu_aperture_order; | ||
1869 | int ret; | 1859 | int ret; |
1870 | 1860 | ||
1871 | /* | 1861 | /* |
@@ -1874,7 +1864,7 @@ int __init amd_iommu_init_dma_ops(void) | |||
1874 | * protection domain will be assigned to the default one. | 1864 | * protection domain will be assigned to the default one. |
1875 | */ | 1865 | */ |
1876 | list_for_each_entry(iommu, &amd_iommu_list, list) { | 1866 | list_for_each_entry(iommu, &amd_iommu_list, list) { |
1877 | iommu->default_dom = dma_ops_domain_alloc(iommu, order); | 1867 | iommu->default_dom = dma_ops_domain_alloc(iommu); |
1878 | if (iommu->default_dom == NULL) | 1868 | if (iommu->default_dom == NULL) |
1879 | return -ENOMEM; | 1869 | return -ENOMEM; |
1880 | iommu->default_dom->domain.flags |= PD_DEFAULT_MASK; | 1870 | iommu->default_dom->domain.flags |= PD_DEFAULT_MASK; |