aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-08-26 10:52:40 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2009-09-03 10:15:34 -0400
commit2650815fb03fe2bf1e6701584087ba669dcf92cd (patch)
tree7b54969835f05ed3939fa91185e506842a056333 /arch/x86/kernel/amd_iommu.c
parentac0101d396fee24994632f71b55b9f7f9ee16eff (diff)
x86/amd-iommu: Add core functions for pd allocation/freeing
This patch factors some code of protection domain allocation into seperate functions. This way the logic can be used to allocate the passthrough domain later. As a side effect this patch fixes an unlikely domain id leakage bug. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c36
1 files changed, 32 insertions, 4 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 6c99f5037801..0934348abfad 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1988,19 +1988,47 @@ static void cleanup_domain(struct protection_domain *domain)
1988 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1988 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1989} 1989}
1990 1990
1991static int amd_iommu_domain_init(struct iommu_domain *dom) 1991static void protection_domain_free(struct protection_domain *domain)
1992{
1993 if (!domain)
1994 return;
1995
1996 if (domain->id)
1997 domain_id_free(domain->id);
1998
1999 kfree(domain);
2000}
2001
2002static struct protection_domain *protection_domain_alloc(void)
1992{ 2003{
1993 struct protection_domain *domain; 2004 struct protection_domain *domain;
1994 2005
1995 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 2006 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
1996 if (!domain) 2007 if (!domain)
1997 return -ENOMEM; 2008 return NULL;
1998 2009
1999 spin_lock_init(&domain->lock); 2010 spin_lock_init(&domain->lock);
2000 domain->mode = PAGE_MODE_3_LEVEL;
2001 domain->id = domain_id_alloc(); 2011 domain->id = domain_id_alloc();
2002 if (!domain->id) 2012 if (!domain->id)
2013 goto out_err;
2014
2015 return domain;
2016
2017out_err:
2018 kfree(domain);
2019
2020 return NULL;
2021}
2022
2023static int amd_iommu_domain_init(struct iommu_domain *dom)
2024{
2025 struct protection_domain *domain;
2026
2027 domain = protection_domain_alloc();
2028 if (!domain)
2003 goto out_free; 2029 goto out_free;
2030
2031 domain->mode = PAGE_MODE_3_LEVEL;
2004 domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); 2032 domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2005 if (!domain->pt_root) 2033 if (!domain->pt_root)
2006 goto out_free; 2034 goto out_free;
@@ -2010,7 +2038,7 @@ static int amd_iommu_domain_init(struct iommu_domain *dom)
2010 return 0; 2038 return 0;
2011 2039
2012out_free: 2040out_free:
2013 kfree(domain); 2041 protection_domain_free(domain);
2014 2042
2015 return -ENOMEM; 2043 return -ENOMEM;
2016} 2044}