aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-07-08 10:23:30 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-07-15 03:17:02 -0400
commit3d39cecc4841e8d4c4abdb401d10180f5faaded0 (patch)
tree8aa51bc0e644752384c2734ecc25ea851dc919f7
parent147202aa772329a02c6e80bc2b7a6b8dd3deac0b (diff)
intel-iommu: Remove superfluous iova_alloc_lock from IOVA code
We only ever obtain this lock immediately before the iova_rbtree_lock, and release it immediately after the iova_rbtree_lock. So ditch it and just use iova_rbtree_lock. [v2: Remove the lockdep bits this time too] Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
-rw-r--r--drivers/pci/intel-iommu.c3
-rw-r--r--drivers/pci/iova.c16
-rw-r--r--include/linux/iova.h1
3 files changed, 4 insertions, 16 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index c5f7c73cbb55..d6a857397ec3 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1309,7 +1309,6 @@ static void iommu_detach_domain(struct dmar_domain *domain,
1309} 1309}
1310 1310
1311static struct iova_domain reserved_iova_list; 1311static struct iova_domain reserved_iova_list;
1312static struct lock_class_key reserved_alloc_key;
1313static struct lock_class_key reserved_rbtree_key; 1312static struct lock_class_key reserved_rbtree_key;
1314 1313
1315static void dmar_init_reserved_ranges(void) 1314static void dmar_init_reserved_ranges(void)
@@ -1320,8 +1319,6 @@ static void dmar_init_reserved_ranges(void)
1320 1319
1321 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); 1320 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1322 1321
1323 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1324 &reserved_alloc_key);
1325 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, 1322 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1326 &reserved_rbtree_key); 1323 &reserved_rbtree_key);
1327 1324
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 46dd440e2315..7914951ef29a 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -22,7 +22,6 @@
22void 22void
23init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) 23init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
24{ 24{
25 spin_lock_init(&iovad->iova_alloc_lock);
26 spin_lock_init(&iovad->iova_rbtree_lock); 25 spin_lock_init(&iovad->iova_rbtree_lock);
27 iovad->rbroot = RB_ROOT; 26 iovad->rbroot = RB_ROOT;
28 iovad->cached32_node = NULL; 27 iovad->cached32_node = NULL;
@@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
205 unsigned long limit_pfn, 204 unsigned long limit_pfn,
206 bool size_aligned) 205 bool size_aligned)
207{ 206{
208 unsigned long flags;
209 struct iova *new_iova; 207 struct iova *new_iova;
210 int ret; 208 int ret;
211 209
@@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
219 if (size_aligned) 217 if (size_aligned)
220 size = __roundup_pow_of_two(size); 218 size = __roundup_pow_of_two(size);
221 219
222 spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
223 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, 220 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
224 new_iova, size_aligned); 221 new_iova, size_aligned);
225 222
226 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
227 if (ret) { 223 if (ret) {
228 free_iova_mem(new_iova); 224 free_iova_mem(new_iova);
229 return NULL; 225 return NULL;
@@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad,
381 struct iova *iova; 377 struct iova *iova;
382 unsigned int overlap = 0; 378 unsigned int overlap = 0;
383 379
384 spin_lock_irqsave(&iovad->iova_alloc_lock, flags); 380 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
385 spin_lock(&iovad->iova_rbtree_lock);
386 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { 381 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
387 if (__is_range_overlap(node, pfn_lo, pfn_hi)) { 382 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
388 iova = container_of(node, struct iova, node); 383 iova = container_of(node, struct iova, node);
@@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad,
402 iova = __insert_new_range(iovad, pfn_lo, pfn_hi); 397 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
403finish: 398finish:
404 399
405 spin_unlock(&iovad->iova_rbtree_lock); 400 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
406 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
407 return iova; 401 return iova;
408} 402}
409 403
@@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
420 unsigned long flags; 414 unsigned long flags;
421 struct rb_node *node; 415 struct rb_node *node;
422 416
423 spin_lock_irqsave(&from->iova_alloc_lock, flags); 417 spin_lock_irqsave(&from->iova_rbtree_lock, flags);
424 spin_lock(&from->iova_rbtree_lock);
425 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { 418 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
426 struct iova *iova = container_of(node, struct iova, node); 419 struct iova *iova = container_of(node, struct iova, node);
427 struct iova *new_iova; 420 struct iova *new_iova;
@@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
430 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", 423 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
431 iova->pfn_lo, iova->pfn_lo); 424 iova->pfn_lo, iova->pfn_lo);
432 } 425 }
433 spin_unlock(&from->iova_rbtree_lock); 426 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
434 spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
435} 427}
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 228f6c94b69c..76a0759e88ec 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -28,7 +28,6 @@ struct iova {
28 28
29/* holds all the iova translations for a domain */ 29/* holds all the iova translations for a domain */
30struct iova_domain { 30struct iova_domain {
31 spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */
32 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ 31 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
33 struct rb_root rbroot; /* iova domain rbtree root */ 32 struct rb_root rbroot; /* iova domain rbtree root */
34 struct rb_node *cached32_node; /* Save last alloced node */ 33 struct rb_node *cached32_node; /* Save last alloced node */