aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/iommu/iova.c60
-rw-r--r--include/linux/iova.h3
2 files changed, 30 insertions, 33 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 20be9a8b3188..c6f5a22f8d20 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -48,6 +48,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
48 48
49 spin_lock_init(&iovad->iova_rbtree_lock); 49 spin_lock_init(&iovad->iova_rbtree_lock);
50 iovad->rbroot = RB_ROOT; 50 iovad->rbroot = RB_ROOT;
51 iovad->cached_node = NULL;
51 iovad->cached32_node = NULL; 52 iovad->cached32_node = NULL;
52 iovad->granule = granule; 53 iovad->granule = granule;
53 iovad->start_pfn = start_pfn; 54 iovad->start_pfn = start_pfn;
@@ -110,48 +111,44 @@ EXPORT_SYMBOL_GPL(init_iova_flush_queue);
110static struct rb_node * 111static struct rb_node *
111__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) 112__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
112{ 113{
113 if ((*limit_pfn > iovad->dma_32bit_pfn) || 114 struct rb_node *cached_node = NULL;
114 (iovad->cached32_node == NULL)) 115 struct iova *curr_iova;
116
117 if (*limit_pfn <= iovad->dma_32bit_pfn)
118 cached_node = iovad->cached32_node;
119 if (!cached_node)
120 cached_node = iovad->cached_node;
121 if (!cached_node)
115 return rb_last(&iovad->rbroot); 122 return rb_last(&iovad->rbroot);
116 else { 123
117 struct rb_node *prev_node = rb_prev(iovad->cached32_node); 124 curr_iova = rb_entry(cached_node, struct iova, node);
118 struct iova *curr_iova = 125 *limit_pfn = min(*limit_pfn, curr_iova->pfn_lo);
119 rb_entry(iovad->cached32_node, struct iova, node); 126
120 *limit_pfn = curr_iova->pfn_lo; 127 return rb_prev(cached_node);
121 return prev_node;
122 }
123} 128}
124 129
125static void 130static void
126__cached_rbnode_insert_update(struct iova_domain *iovad, 131__cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
127 unsigned long limit_pfn, struct iova *new)
128{ 132{
129 if (limit_pfn != iovad->dma_32bit_pfn) 133 if (new->pfn_hi < iovad->dma_32bit_pfn)
130 return; 134 iovad->cached32_node = &new->node;
131 iovad->cached32_node = &new->node; 135 else
136 iovad->cached_node = &new->node;
132} 137}
133 138
134static void 139static void
135__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) 140__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
136{ 141{
137 struct iova *cached_iova; 142 struct iova *cached_iova;
138 struct rb_node *curr;
139 143
140 if (!iovad->cached32_node) 144 cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
141 return; 145 if (free->pfn_hi < iovad->dma_32bit_pfn &&
142 curr = iovad->cached32_node; 146 iovad->cached32_node && free->pfn_lo >= cached_iova->pfn_lo)
143 cached_iova = rb_entry(curr, struct iova, node); 147 iovad->cached32_node = rb_next(&free->node);
144 148
145 if (free->pfn_lo >= cached_iova->pfn_lo) { 149 cached_iova = rb_entry(iovad->cached_node, struct iova, node);
146 struct rb_node *node = rb_next(&free->node); 150 if (iovad->cached_node && free->pfn_lo >= cached_iova->pfn_lo)
147 struct iova *iova = rb_entry(node, struct iova, node); 151 iovad->cached_node = rb_next(&free->node);
148
149 /* only cache if it's below 32bit pfn */
150 if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
151 iovad->cached32_node = node;
152 else
153 iovad->cached32_node = NULL;
154 }
155} 152}
156 153
157/* Insert the iova into domain rbtree by holding writer lock */ 154/* Insert the iova into domain rbtree by holding writer lock */
@@ -188,7 +185,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
188{ 185{
189 struct rb_node *prev, *curr = NULL; 186 struct rb_node *prev, *curr = NULL;
190 unsigned long flags; 187 unsigned long flags;
191 unsigned long saved_pfn, new_pfn; 188 unsigned long new_pfn;
192 unsigned long align_mask = ~0UL; 189 unsigned long align_mask = ~0UL;
193 190
194 if (size_aligned) 191 if (size_aligned)
@@ -196,7 +193,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
196 193
197 /* Walk the tree backwards */ 194 /* Walk the tree backwards */
198 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); 195 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
199 saved_pfn = limit_pfn;
200 curr = __get_cached_rbnode(iovad, &limit_pfn); 196 curr = __get_cached_rbnode(iovad, &limit_pfn);
201 prev = curr; 197 prev = curr;
202 while (curr) { 198 while (curr) {
@@ -226,7 +222,7 @@ move_left:
226 222
227 /* If we have 'prev', it's a valid place to start the insertion. */ 223 /* If we have 'prev', it's a valid place to start the insertion. */
228 iova_insert_rbtree(&iovad->rbroot, new, prev); 224 iova_insert_rbtree(&iovad->rbroot, new, prev);
229 __cached_rbnode_insert_update(iovad, saved_pfn, new); 225 __cached_rbnode_insert_update(iovad, new);
230 226
231 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 227 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
232 228
diff --git a/include/linux/iova.h b/include/linux/iova.h
index d179b9bf7814..69ea3e258ff2 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -70,7 +70,8 @@ struct iova_fq {
70struct iova_domain { 70struct iova_domain {
71 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ 71 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
72 struct rb_root rbroot; /* iova domain rbtree root */ 72 struct rb_root rbroot; /* iova domain rbtree root */
73 struct rb_node *cached32_node; /* Save last alloced node */ 73 struct rb_node *cached_node; /* Save last alloced node */
74 struct rb_node *cached32_node; /* Save last 32-bit alloced node */
74 unsigned long granule; /* pfn granularity for this domain */ 75 unsigned long granule; /* pfn granularity for this domain */
75 unsigned long start_pfn; /* Lower limit for this domain */ 76 unsigned long start_pfn; /* Lower limit for this domain */
76 unsigned long dma_32bit_pfn; 77 unsigned long dma_32bit_pfn;