aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/iova.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-07-08 10:23:30 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-07-15 03:17:02 -0400
commit3d39cecc4841e8d4c4abdb401d10180f5faaded0 (patch)
tree8aa51bc0e644752384c2734ecc25ea851dc919f7 /drivers/pci/iova.c
parent147202aa772329a02c6e80bc2b7a6b8dd3deac0b (diff)
intel-iommu: Remove superfluous iova_alloc_lock from IOVA code
We only ever obtain this lock immediately before the iova_rbtree_lock, and release it immediately after the iova_rbtree_lock. So ditch it and just use iova_rbtree_lock. [v2: Remove the lockdep bits this time too] Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/iova.c')
-rw-r--r--drivers/pci/iova.c16
1 files changed, 4 insertions, 12 deletions
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 46dd440e2315..7914951ef29a 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -22,7 +22,6 @@
22void 22void
23init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) 23init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
24{ 24{
25 spin_lock_init(&iovad->iova_alloc_lock);
26 spin_lock_init(&iovad->iova_rbtree_lock); 25 spin_lock_init(&iovad->iova_rbtree_lock);
27 iovad->rbroot = RB_ROOT; 26 iovad->rbroot = RB_ROOT;
28 iovad->cached32_node = NULL; 27 iovad->cached32_node = NULL;
@@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
205 unsigned long limit_pfn, 204 unsigned long limit_pfn,
206 bool size_aligned) 205 bool size_aligned)
207{ 206{
208 unsigned long flags;
209 struct iova *new_iova; 207 struct iova *new_iova;
210 int ret; 208 int ret;
211 209
@@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
219 if (size_aligned) 217 if (size_aligned)
220 size = __roundup_pow_of_two(size); 218 size = __roundup_pow_of_two(size);
221 219
222 spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
223 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, 220 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
224 new_iova, size_aligned); 221 new_iova, size_aligned);
225 222
226 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
227 if (ret) { 223 if (ret) {
228 free_iova_mem(new_iova); 224 free_iova_mem(new_iova);
229 return NULL; 225 return NULL;
@@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad,
381 struct iova *iova; 377 struct iova *iova;
382 unsigned int overlap = 0; 378 unsigned int overlap = 0;
383 379
384 spin_lock_irqsave(&iovad->iova_alloc_lock, flags); 380 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
385 spin_lock(&iovad->iova_rbtree_lock);
386 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { 381 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
387 if (__is_range_overlap(node, pfn_lo, pfn_hi)) { 382 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
388 iova = container_of(node, struct iova, node); 383 iova = container_of(node, struct iova, node);
@@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad,
402 iova = __insert_new_range(iovad, pfn_lo, pfn_hi); 397 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
403finish: 398finish:
404 399
405 spin_unlock(&iovad->iova_rbtree_lock); 400 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
406 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
407 return iova; 401 return iova;
408} 402}
409 403
@@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
420 unsigned long flags; 414 unsigned long flags;
421 struct rb_node *node; 415 struct rb_node *node;
422 416
423 spin_lock_irqsave(&from->iova_alloc_lock, flags); 417 spin_lock_irqsave(&from->iova_rbtree_lock, flags);
424 spin_lock(&from->iova_rbtree_lock);
425 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { 418 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
426 struct iova *iova = container_of(node, struct iova, node); 419 struct iova *iova = container_of(node, struct iova, node);
427 struct iova *new_iova; 420 struct iova *new_iova;
@@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
430 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", 423 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
431 iova->pfn_lo, iova->pfn_lo); 424 iova->pfn_lo, iova->pfn_lo);
432 } 425 }
433 spin_unlock(&from->iova_rbtree_lock); 426 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
434 spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
435} 427}