aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-02-08 08:44:49 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2010-03-01 08:16:22 -0500
commit5d214fe6e808a8caa9cb6f610c0190d3f50ac570 (patch)
tree326b6d2ea83d9a5df8ea4eef91834dc3e0da2bac
parent339d3261aa3eb0e12f68ef868e042c1ca03628f7 (diff)
x86/amd-iommu: Protect IOMMU-API map/unmap path
This patch introduces a mutex to lock page table updates in the IOMMU-API path. We can't use the spin_lock here because this patch might sleep. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h2
-rw-r--r--arch/x86/kernel/amd_iommu.c9
2 files changed, 11 insertions, 0 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index ba19ad4c47d0..5e46e78f3b1b 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -21,6 +21,7 @@
21#define _ASM_X86_AMD_IOMMU_TYPES_H 21#define _ASM_X86_AMD_IOMMU_TYPES_H
22 22
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/mutex.h>
24#include <linux/list.h> 25#include <linux/list.h>
25#include <linux/spinlock.h> 26#include <linux/spinlock.h>
26 27
@@ -237,6 +238,7 @@ struct protection_domain {
237 struct list_head list; /* for list of all protection domains */ 238 struct list_head list; /* for list of all protection domains */
238 struct list_head dev_list; /* List of all devices in this domain */ 239 struct list_head dev_list; /* List of all devices in this domain */
239 spinlock_t lock; /* mostly used to lock the page table*/ 240 spinlock_t lock; /* mostly used to lock the page table*/
241 struct mutex api_lock; /* protect page tables in the iommu-api path */
240 u16 id; /* the domain id written to the device table */ 242 u16 id; /* the domain id written to the device table */
241 int mode; /* paging mode (0-6 levels) */ 243 int mode; /* paging mode (0-6 levels) */
242 u64 *pt_root; /* page table root pointer */ 244 u64 *pt_root; /* page table root pointer */
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 2c4a5012038e..b97f2f1c449a 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -2327,6 +2327,7 @@ static struct protection_domain *protection_domain_alloc(void)
2327 return NULL; 2327 return NULL;
2328 2328
2329 spin_lock_init(&domain->lock); 2329 spin_lock_init(&domain->lock);
2330 mutex_init(&domain->api_lock);
2330 domain->id = domain_id_alloc(); 2331 domain->id = domain_id_alloc();
2331 if (!domain->id) 2332 if (!domain->id)
2332 goto out_err; 2333 goto out_err;
@@ -2456,6 +2457,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
2456 iova &= PAGE_MASK; 2457 iova &= PAGE_MASK;
2457 paddr &= PAGE_MASK; 2458 paddr &= PAGE_MASK;
2458 2459
2460 mutex_lock(&domain->api_lock);
2461
2459 for (i = 0; i < npages; ++i) { 2462 for (i = 0; i < npages; ++i) {
2460 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); 2463 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);
2461 if (ret) 2464 if (ret)
@@ -2465,6 +2468,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
2465 paddr += PAGE_SIZE; 2468 paddr += PAGE_SIZE;
2466 } 2469 }
2467 2470
2471 mutex_unlock(&domain->api_lock);
2472
2468 return 0; 2473 return 0;
2469} 2474}
2470 2475
@@ -2477,12 +2482,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom,
2477 2482
2478 iova &= PAGE_MASK; 2483 iova &= PAGE_MASK;
2479 2484
2485 mutex_lock(&domain->api_lock);
2486
2480 for (i = 0; i < npages; ++i) { 2487 for (i = 0; i < npages; ++i) {
2481 iommu_unmap_page(domain, iova, PM_MAP_4k); 2488 iommu_unmap_page(domain, iova, PM_MAP_4k);
2482 iova += PAGE_SIZE; 2489 iova += PAGE_SIZE;
2483 } 2490 }
2484 2491
2485 iommu_flush_tlb_pde(domain); 2492 iommu_flush_tlb_pde(domain);
2493
2494 mutex_unlock(&domain->api_lock);
2486} 2495}
2487 2496
2488static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, 2497static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,