aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/iova.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/iova.c')
-rw-r--r--drivers/pci/iova.c357
1 files changed, 357 insertions, 0 deletions
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
new file mode 100644
index 00000000000..717fafaa7e0
--- /dev/null
+++ b/drivers/pci/iova.c
@@ -0,0 +1,357 @@
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This file is released under the GPLv2.
5 *
6 * Copyright (C) 2006 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
7 */
8
9#include "iova.h"
10
11void
12init_iova_domain(struct iova_domain *iovad)
13{
14 spin_lock_init(&iovad->iova_alloc_lock);
15 spin_lock_init(&iovad->iova_rbtree_lock);
16 iovad->rbroot = RB_ROOT;
17 iovad->cached32_node = NULL;
18
19}
20
21static struct rb_node *
22__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
23{
24 if ((*limit_pfn != DMA_32BIT_PFN) ||
25 (iovad->cached32_node == NULL))
26 return rb_last(&iovad->rbroot);
27 else {
28 struct rb_node *prev_node = rb_prev(iovad->cached32_node);
29 struct iova *curr_iova =
30 container_of(iovad->cached32_node, struct iova, node);
31 *limit_pfn = curr_iova->pfn_lo - 1;
32 return prev_node;
33 }
34}
35
36static void
37__cached_rbnode_insert_update(struct iova_domain *iovad,
38 unsigned long limit_pfn, struct iova *new)
39{
40 if (limit_pfn != DMA_32BIT_PFN)
41 return;
42 iovad->cached32_node = &new->node;
43}
44
45static void
46__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
47{
48 struct iova *cached_iova;
49 struct rb_node *curr;
50
51 if (!iovad->cached32_node)
52 return;
53 curr = iovad->cached32_node;
54 cached_iova = container_of(curr, struct iova, node);
55
56 if (free->pfn_lo >= cached_iova->pfn_lo)
57 iovad->cached32_node = rb_next(&free->node);
58}
59
60static int __alloc_iova_range(struct iova_domain *iovad,
61 unsigned long size, unsigned long limit_pfn, struct iova *new)
62{
63 struct rb_node *curr = NULL;
64 unsigned long flags;
65 unsigned long saved_pfn;
66
67 /* Walk the tree backwards */
68 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
69 saved_pfn = limit_pfn;
70 curr = __get_cached_rbnode(iovad, &limit_pfn);
71 while (curr) {
72 struct iova *curr_iova = container_of(curr, struct iova, node);
73 if (limit_pfn < curr_iova->pfn_lo)
74 goto move_left;
75 if (limit_pfn < curr_iova->pfn_hi)
76 goto adjust_limit_pfn;
77 if ((curr_iova->pfn_hi + size) <= limit_pfn)
78 break; /* found a free slot */
79adjust_limit_pfn:
80 limit_pfn = curr_iova->pfn_lo - 1;
81move_left:
82 curr = rb_prev(curr);
83 }
84
85 if ((!curr) && !(IOVA_START_PFN + size <= limit_pfn)) {
86 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
87 return -ENOMEM;
88 }
89 new->pfn_hi = limit_pfn;
90 new->pfn_lo = limit_pfn - size + 1;
91
92 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
93 return 0;
94}
95
96static void
97iova_insert_rbtree(struct rb_root *root, struct iova *iova)
98{
99 struct rb_node **new = &(root->rb_node), *parent = NULL;
100 /* Figure out where to put new node */
101 while (*new) {
102 struct iova *this = container_of(*new, struct iova, node);
103 parent = *new;
104
105 if (iova->pfn_lo < this->pfn_lo)
106 new = &((*new)->rb_left);
107 else if (iova->pfn_lo > this->pfn_lo)
108 new = &((*new)->rb_right);
109 else
110 BUG(); /* this should not happen */
111 }
112 /* Add new node and rebalance tree. */
113 rb_link_node(&iova->node, parent, new);
114 rb_insert_color(&iova->node, root);
115}
116
117/**
118 * alloc_iova - allocates an iova
119 * @iovad - iova domain in question
120 * @size - size of page frames to allocate
121 * @limit_pfn - max limit address
122 * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
123 * looking from limit_pfn instead from IOVA_START_PFN.
124 */
125struct iova *
126alloc_iova(struct iova_domain *iovad, unsigned long size,
127 unsigned long limit_pfn)
128{
129 unsigned long flags;
130 struct iova *new_iova;
131 int ret;
132
133 new_iova = alloc_iova_mem();
134 if (!new_iova)
135 return NULL;
136
137 spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
138 ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova);
139
140 if (ret) {
141 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
142 free_iova_mem(new_iova);
143 return NULL;
144 }
145
146 /* Insert the new_iova into domain rbtree by holding writer lock */
147 spin_lock(&iovad->iova_rbtree_lock);
148 iova_insert_rbtree(&iovad->rbroot, new_iova);
149 __cached_rbnode_insert_update(iovad, limit_pfn, new_iova);
150 spin_unlock(&iovad->iova_rbtree_lock);
151
152 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
153
154 return new_iova;
155}
156
157/**
158 * find_iova - find's an iova for a given pfn
159 * @iovad - iova domain in question.
160 * pfn - page frame number
161 * This function finds and returns an iova belonging to the
162 * given doamin which matches the given pfn.
163 */
164struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
165{
166 unsigned long flags;
167 struct rb_node *node;
168
169 /* Take the lock so that no other thread is manipulating the rbtree */
170 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
171 node = iovad->rbroot.rb_node;
172 while (node) {
173 struct iova *iova = container_of(node, struct iova, node);
174
175 /* If pfn falls within iova's range, return iova */
176 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
177 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
178 /* We are not holding the lock while this iova
179 * is referenced by the caller as the same thread
180 * which called this function also calls __free_iova()
181 * and it is by desing that only one thread can possibly
182 * reference a particular iova and hence no conflict.
183 */
184 return iova;
185 }
186
187 if (pfn < iova->pfn_lo)
188 node = node->rb_left;
189 else if (pfn > iova->pfn_lo)
190 node = node->rb_right;
191 }
192
193 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
194 return NULL;
195}
196
197/**
198 * __free_iova - frees the given iova
199 * @iovad: iova domain in question.
200 * @iova: iova in question.
201 * Frees the given iova belonging to the giving domain
202 */
203void
204__free_iova(struct iova_domain *iovad, struct iova *iova)
205{
206 unsigned long flags;
207
208 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
209 __cached_rbnode_delete_update(iovad, iova);
210 rb_erase(&iova->node, &iovad->rbroot);
211 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
212 free_iova_mem(iova);
213}
214
215/**
216 * free_iova - finds and frees the iova for a given pfn
217 * @iovad: - iova domain in question.
218 * @pfn: - pfn that is allocated previously
219 * This functions finds an iova for a given pfn and then
220 * frees the iova from that domain.
221 */
222void
223free_iova(struct iova_domain *iovad, unsigned long pfn)
224{
225 struct iova *iova = find_iova(iovad, pfn);
226 if (iova)
227 __free_iova(iovad, iova);
228
229}
230
231/**
232 * put_iova_domain - destroys the iova doamin
233 * @iovad: - iova domain in question.
234 * All the iova's in that domain are destroyed.
235 */
236void put_iova_domain(struct iova_domain *iovad)
237{
238 struct rb_node *node;
239 unsigned long flags;
240
241 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
242 node = rb_first(&iovad->rbroot);
243 while (node) {
244 struct iova *iova = container_of(node, struct iova, node);
245 rb_erase(node, &iovad->rbroot);
246 free_iova_mem(iova);
247 node = rb_first(&iovad->rbroot);
248 }
249 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
250}
251
252static int
253__is_range_overlap(struct rb_node *node,
254 unsigned long pfn_lo, unsigned long pfn_hi)
255{
256 struct iova *iova = container_of(node, struct iova, node);
257
258 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
259 return 1;
260 return 0;
261}
262
263static struct iova *
264__insert_new_range(struct iova_domain *iovad,
265 unsigned long pfn_lo, unsigned long pfn_hi)
266{
267 struct iova *iova;
268
269 iova = alloc_iova_mem();
270 if (!iova)
271 return iova;
272
273 iova->pfn_hi = pfn_hi;
274 iova->pfn_lo = pfn_lo;
275 iova_insert_rbtree(&iovad->rbroot, iova);
276 return iova;
277}
278
279static void
280__adjust_overlap_range(struct iova *iova,
281 unsigned long *pfn_lo, unsigned long *pfn_hi)
282{
283 if (*pfn_lo < iova->pfn_lo)
284 iova->pfn_lo = *pfn_lo;
285 if (*pfn_hi > iova->pfn_hi)
286 *pfn_lo = iova->pfn_hi + 1;
287}
288
289/**
290 * reserve_iova - reserves an iova in the given range
291 * @iovad: - iova domain pointer
292 * @pfn_lo: - lower page frame address
293 * @pfn_hi:- higher pfn adderss
294 * This function allocates reserves the address range from pfn_lo to pfn_hi so
295 * that this address is not dished out as part of alloc_iova.
296 */
297struct iova *
298reserve_iova(struct iova_domain *iovad,
299 unsigned long pfn_lo, unsigned long pfn_hi)
300{
301 struct rb_node *node;
302 unsigned long flags;
303 struct iova *iova;
304 unsigned int overlap = 0;
305
306 spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
307 spin_lock(&iovad->iova_rbtree_lock);
308 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
309 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
310 iova = container_of(node, struct iova, node);
311 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
312 if ((pfn_lo >= iova->pfn_lo) &&
313 (pfn_hi <= iova->pfn_hi))
314 goto finish;
315 overlap = 1;
316
317 } else if (overlap)
318 break;
319 }
320
321 /* We are here either becasue this is the first reserver node
322 * or need to insert remaining non overlap addr range
323 */
324 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
325finish:
326
327 spin_unlock(&iovad->iova_rbtree_lock);
328 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
329 return iova;
330}
331
332/**
333 * copy_reserved_iova - copies the reserved between domains
334 * @from: - source doamin from where to copy
335 * @to: - destination domin where to copy
336 * This function copies reserved iova's from one doamin to
337 * other.
338 */
339void
340copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
341{
342 unsigned long flags;
343 struct rb_node *node;
344
345 spin_lock_irqsave(&from->iova_alloc_lock, flags);
346 spin_lock(&from->iova_rbtree_lock);
347 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
348 struct iova *iova = container_of(node, struct iova, node);
349 struct iova *new_iova;
350 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
351 if (!new_iova)
352 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
353 iova->pfn_lo, iova->pfn_lo);
354 }
355 spin_unlock(&from->iova_rbtree_lock);
356 spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
357}