aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGeliang Tang <geliangtang@gmail.com>2016-12-19 09:46:58 -0500
committerJoerg Roedel <jroedel@suse.de>2017-01-04 09:35:19 -0500
commiteba484b51b8c1346759785d40fa4d6ec5590b705 (patch)
treee7ede74c662acd2a78f58f41517559d51ceef112
parent0c744ea4f77d72b3dcebb7a8f2684633ec79be88 (diff)
iommu/iova: Use rb_entry()
To make the code clearer, use rb_entry() instead of container_of() to deal with rbtree. Signed-off-by: Geliang Tang <geliangtang@gmail.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/iova.c23
1 files changed, 11 insertions, 12 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 080beca0197d..b7268a14184f 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -62,7 +62,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
62 else { 62 else {
63 struct rb_node *prev_node = rb_prev(iovad->cached32_node); 63 struct rb_node *prev_node = rb_prev(iovad->cached32_node);
64 struct iova *curr_iova = 64 struct iova *curr_iova =
65 container_of(iovad->cached32_node, struct iova, node); 65 rb_entry(iovad->cached32_node, struct iova, node);
66 *limit_pfn = curr_iova->pfn_lo - 1; 66 *limit_pfn = curr_iova->pfn_lo - 1;
67 return prev_node; 67 return prev_node;
68 } 68 }
@@ -86,11 +86,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
86 if (!iovad->cached32_node) 86 if (!iovad->cached32_node)
87 return; 87 return;
88 curr = iovad->cached32_node; 88 curr = iovad->cached32_node;
89 cached_iova = container_of(curr, struct iova, node); 89 cached_iova = rb_entry(curr, struct iova, node);
90 90
91 if (free->pfn_lo >= cached_iova->pfn_lo) { 91 if (free->pfn_lo >= cached_iova->pfn_lo) {
92 struct rb_node *node = rb_next(&free->node); 92 struct rb_node *node = rb_next(&free->node);
93 struct iova *iova = container_of(node, struct iova, node); 93 struct iova *iova = rb_entry(node, struct iova, node);
94 94
95 /* only cache if it's below 32bit pfn */ 95 /* only cache if it's below 32bit pfn */
96 if (node && iova->pfn_lo < iovad->dma_32bit_pfn) 96 if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
@@ -125,7 +125,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
125 curr = __get_cached_rbnode(iovad, &limit_pfn); 125 curr = __get_cached_rbnode(iovad, &limit_pfn);
126 prev = curr; 126 prev = curr;
127 while (curr) { 127 while (curr) {
128 struct iova *curr_iova = container_of(curr, struct iova, node); 128 struct iova *curr_iova = rb_entry(curr, struct iova, node);
129 129
130 if (limit_pfn < curr_iova->pfn_lo) 130 if (limit_pfn < curr_iova->pfn_lo)
131 goto move_left; 131 goto move_left;
@@ -171,8 +171,7 @@ move_left:
171 171
172 /* Figure out where to put new node */ 172 /* Figure out where to put new node */
173 while (*entry) { 173 while (*entry) {
174 struct iova *this = container_of(*entry, 174 struct iova *this = rb_entry(*entry, struct iova, node);
175 struct iova, node);
176 parent = *entry; 175 parent = *entry;
177 176
178 if (new->pfn_lo < this->pfn_lo) 177 if (new->pfn_lo < this->pfn_lo)
@@ -201,7 +200,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
201 struct rb_node **new = &(root->rb_node), *parent = NULL; 200 struct rb_node **new = &(root->rb_node), *parent = NULL;
202 /* Figure out where to put new node */ 201 /* Figure out where to put new node */
203 while (*new) { 202 while (*new) {
204 struct iova *this = container_of(*new, struct iova, node); 203 struct iova *this = rb_entry(*new, struct iova, node);
205 204
206 parent = *new; 205 parent = *new;
207 206
@@ -311,7 +310,7 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn)
311 assert_spin_locked(&iovad->iova_rbtree_lock); 310 assert_spin_locked(&iovad->iova_rbtree_lock);
312 311
313 while (node) { 312 while (node) {
314 struct iova *iova = container_of(node, struct iova, node); 313 struct iova *iova = rb_entry(node, struct iova, node);
315 314
316 /* If pfn falls within iova's range, return iova */ 315 /* If pfn falls within iova's range, return iova */
317 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { 316 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
@@ -463,7 +462,7 @@ void put_iova_domain(struct iova_domain *iovad)
463 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); 462 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
464 node = rb_first(&iovad->rbroot); 463 node = rb_first(&iovad->rbroot);
465 while (node) { 464 while (node) {
466 struct iova *iova = container_of(node, struct iova, node); 465 struct iova *iova = rb_entry(node, struct iova, node);
467 466
468 rb_erase(node, &iovad->rbroot); 467 rb_erase(node, &iovad->rbroot);
469 free_iova_mem(iova); 468 free_iova_mem(iova);
@@ -477,7 +476,7 @@ static int
477__is_range_overlap(struct rb_node *node, 476__is_range_overlap(struct rb_node *node,
478 unsigned long pfn_lo, unsigned long pfn_hi) 477 unsigned long pfn_lo, unsigned long pfn_hi)
479{ 478{
480 struct iova *iova = container_of(node, struct iova, node); 479 struct iova *iova = rb_entry(node, struct iova, node);
481 480
482 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) 481 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
483 return 1; 482 return 1;
@@ -541,7 +540,7 @@ reserve_iova(struct iova_domain *iovad,
541 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); 540 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
542 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { 541 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
543 if (__is_range_overlap(node, pfn_lo, pfn_hi)) { 542 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
544 iova = container_of(node, struct iova, node); 543 iova = rb_entry(node, struct iova, node);
545 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); 544 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
546 if ((pfn_lo >= iova->pfn_lo) && 545 if ((pfn_lo >= iova->pfn_lo) &&
547 (pfn_hi <= iova->pfn_hi)) 546 (pfn_hi <= iova->pfn_hi))
@@ -578,7 +577,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
578 577
579 spin_lock_irqsave(&from->iova_rbtree_lock, flags); 578 spin_lock_irqsave(&from->iova_rbtree_lock, flags);
580 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { 579 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
581 struct iova *iova = container_of(node, struct iova, node); 580 struct iova *iova = rb_entry(node, struct iova, node);
582 struct iova *new_iova; 581 struct iova *new_iova;
583 582
584 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); 583 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);