diff options
author | Marek Szyprowski <m.szyprowski@samsung.com> | 2017-02-24 06:13:37 -0500 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2017-03-21 09:58:18 -0400 |
commit | d751751a9f7f2f8e406d5a09565d337f009835d6 (patch) | |
tree | b5ed3a1536a8e0418dd738a17ad356d0eba882e7 | |
parent | 97da3854c526d3a6ee05c849c96e48d21527606c (diff) |
iommu/iova: Consolidate code for adding new node to iovad domain rbtree
This patch consolidates almost the same code used in iova_insert_rbtree()
and __alloc_and_insert_iova_range() functions. While touching this code,
replace BUG() with WARN_ON(1) to avoid taking down the whole system in
case of corrupted iova tree or incorrect calls.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r-- | drivers/iommu/iova.c | 87 |
1 files changed, 33 insertions, 54 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index b7268a14184f..e80a4105ac2a 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c | |||
@@ -100,6 +100,34 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) | |||
100 | } | 100 | } |
101 | } | 101 | } |
102 | 102 | ||
103 | /* Insert the iova into domain rbtree by holding writer lock */ | ||
104 | static void | ||
105 | iova_insert_rbtree(struct rb_root *root, struct iova *iova, | ||
106 | struct rb_node *start) | ||
107 | { | ||
108 | struct rb_node **new, *parent = NULL; | ||
109 | |||
110 | new = (start) ? &start : &(root->rb_node); | ||
111 | /* Figure out where to put new node */ | ||
112 | while (*new) { | ||
113 | struct iova *this = rb_entry(*new, struct iova, node); | ||
114 | |||
115 | parent = *new; | ||
116 | |||
117 | if (iova->pfn_lo < this->pfn_lo) | ||
118 | new = &((*new)->rb_left); | ||
119 | else if (iova->pfn_lo > this->pfn_lo) | ||
120 | new = &((*new)->rb_right); | ||
121 | else { | ||
122 | WARN_ON(1); /* this should not happen */ | ||
123 | return; | ||
124 | } | ||
125 | } | ||
126 | /* Add new node and rebalance tree. */ | ||
127 | rb_link_node(&iova->node, parent, new); | ||
128 | rb_insert_color(&iova->node, root); | ||
129 | } | ||
130 | |||
103 | /* | 131 | /* |
104 | * Computes the padding size required, to make the start address | 132 | * Computes the padding size required, to make the start address |
105 | * naturally aligned on the power-of-two order of its size | 133 | * naturally aligned on the power-of-two order of its size |
@@ -157,35 +185,8 @@ move_left: | |||
157 | new->pfn_lo = limit_pfn - (size + pad_size) + 1; | 185 | new->pfn_lo = limit_pfn - (size + pad_size) + 1; |
158 | new->pfn_hi = new->pfn_lo + size - 1; | 186 | new->pfn_hi = new->pfn_lo + size - 1; |
159 | 187 | ||
160 | /* Insert the new_iova into domain rbtree by holding writer lock */ | 188 | /* If we have 'prev', it's a valid place to start the insertion. */ |
161 | /* Add new node and rebalance tree. */ | 189 | iova_insert_rbtree(&iovad->rbroot, new, prev); |
162 | { | ||
163 | struct rb_node **entry, *parent = NULL; | ||
164 | |||
165 | /* If we have 'prev', it's a valid place to start the | ||
166 | insertion. Otherwise, start from the root. */ | ||
167 | if (prev) | ||
168 | entry = &prev; | ||
169 | else | ||
170 | entry = &iovad->rbroot.rb_node; | ||
171 | |||
172 | /* Figure out where to put new node */ | ||
173 | while (*entry) { | ||
174 | struct iova *this = rb_entry(*entry, struct iova, node); | ||
175 | parent = *entry; | ||
176 | |||
177 | if (new->pfn_lo < this->pfn_lo) | ||
178 | entry = &((*entry)->rb_left); | ||
179 | else if (new->pfn_lo > this->pfn_lo) | ||
180 | entry = &((*entry)->rb_right); | ||
181 | else | ||
182 | BUG(); /* this should not happen */ | ||
183 | } | ||
184 | |||
185 | /* Add new node and rebalance tree. */ | ||
186 | rb_link_node(&new->node, parent, entry); | ||
187 | rb_insert_color(&new->node, &iovad->rbroot); | ||
188 | } | ||
189 | __cached_rbnode_insert_update(iovad, saved_pfn, new); | 190 | __cached_rbnode_insert_update(iovad, saved_pfn, new); |
190 | 191 | ||
191 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 192 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
@@ -194,28 +195,6 @@ move_left: | |||
194 | return 0; | 195 | return 0; |
195 | } | 196 | } |
196 | 197 | ||
197 | static void | ||
198 | iova_insert_rbtree(struct rb_root *root, struct iova *iova) | ||
199 | { | ||
200 | struct rb_node **new = &(root->rb_node), *parent = NULL; | ||
201 | /* Figure out where to put new node */ | ||
202 | while (*new) { | ||
203 | struct iova *this = rb_entry(*new, struct iova, node); | ||
204 | |||
205 | parent = *new; | ||
206 | |||
207 | if (iova->pfn_lo < this->pfn_lo) | ||
208 | new = &((*new)->rb_left); | ||
209 | else if (iova->pfn_lo > this->pfn_lo) | ||
210 | new = &((*new)->rb_right); | ||
211 | else | ||
212 | BUG(); /* this should not happen */ | ||
213 | } | ||
214 | /* Add new node and rebalance tree. */ | ||
215 | rb_link_node(&iova->node, parent, new); | ||
216 | rb_insert_color(&iova->node, root); | ||
217 | } | ||
218 | |||
219 | static struct kmem_cache *iova_cache; | 198 | static struct kmem_cache *iova_cache; |
220 | static unsigned int iova_cache_users; | 199 | static unsigned int iova_cache_users; |
221 | static DEFINE_MUTEX(iova_cache_mutex); | 200 | static DEFINE_MUTEX(iova_cache_mutex); |
@@ -505,7 +484,7 @@ __insert_new_range(struct iova_domain *iovad, | |||
505 | 484 | ||
506 | iova = alloc_and_init_iova(pfn_lo, pfn_hi); | 485 | iova = alloc_and_init_iova(pfn_lo, pfn_hi); |
507 | if (iova) | 486 | if (iova) |
508 | iova_insert_rbtree(&iovad->rbroot, iova); | 487 | iova_insert_rbtree(&iovad->rbroot, iova, NULL); |
509 | 488 | ||
510 | return iova; | 489 | return iova; |
511 | } | 490 | } |
@@ -612,11 +591,11 @@ split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, | |||
612 | rb_erase(&iova->node, &iovad->rbroot); | 591 | rb_erase(&iova->node, &iovad->rbroot); |
613 | 592 | ||
614 | if (prev) { | 593 | if (prev) { |
615 | iova_insert_rbtree(&iovad->rbroot, prev); | 594 | iova_insert_rbtree(&iovad->rbroot, prev, NULL); |
616 | iova->pfn_lo = pfn_lo; | 595 | iova->pfn_lo = pfn_lo; |
617 | } | 596 | } |
618 | if (next) { | 597 | if (next) { |
619 | iova_insert_rbtree(&iovad->rbroot, next); | 598 | iova_insert_rbtree(&iovad->rbroot, next, NULL); |
620 | iova->pfn_hi = pfn_hi; | 599 | iova->pfn_hi = pfn_hi; |
621 | } | 600 | } |
622 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 601 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |