diff options
author | David Miller <davem@davemloft.net> | 2008-02-06 04:36:23 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-06 13:41:01 -0500 |
commit | f661197e0a95ec7305e1e790d95b72a74a1c4a0f (patch) | |
tree | a6916d877a3d9db9bc658758bd347d4f436f6d59 /drivers/pci/iova.c | |
parent | b1ed88b47f5e18c6efb8041275c16eeead5377df (diff) |
Genericizing iova.[ch]
I would like to potentially move the sparc64 IOMMU code over to using
the nice new drivers/pci/iova.[ch] code for free area management..
In order to do that we have to detach the IOMMU page size assumptions
which only really need to exist in the intel-iommu.[ch] code.
This patch attempts to implement that.
[akpm@linux-foundation.org: build fix]
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/pci/iova.c')
-rw-r--r-- | drivers/pci/iova.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c index a84571c29360..8de7ab6c6d0c 100644 --- a/drivers/pci/iova.c +++ b/drivers/pci/iova.c | |||
@@ -9,19 +9,19 @@ | |||
9 | #include "iova.h" | 9 | #include "iova.h" |
10 | 10 | ||
11 | void | 11 | void |
12 | init_iova_domain(struct iova_domain *iovad) | 12 | init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) |
13 | { | 13 | { |
14 | spin_lock_init(&iovad->iova_alloc_lock); | 14 | spin_lock_init(&iovad->iova_alloc_lock); |
15 | spin_lock_init(&iovad->iova_rbtree_lock); | 15 | spin_lock_init(&iovad->iova_rbtree_lock); |
16 | iovad->rbroot = RB_ROOT; | 16 | iovad->rbroot = RB_ROOT; |
17 | iovad->cached32_node = NULL; | 17 | iovad->cached32_node = NULL; |
18 | 18 | iovad->dma_32bit_pfn = pfn_32bit; | |
19 | } | 19 | } |
20 | 20 | ||
21 | static struct rb_node * | 21 | static struct rb_node * |
22 | __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) | 22 | __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) |
23 | { | 23 | { |
24 | if ((*limit_pfn != DMA_32BIT_PFN) || | 24 | if ((*limit_pfn != iovad->dma_32bit_pfn) || |
25 | (iovad->cached32_node == NULL)) | 25 | (iovad->cached32_node == NULL)) |
26 | return rb_last(&iovad->rbroot); | 26 | return rb_last(&iovad->rbroot); |
27 | else { | 27 | else { |
@@ -37,7 +37,7 @@ static void | |||
37 | __cached_rbnode_insert_update(struct iova_domain *iovad, | 37 | __cached_rbnode_insert_update(struct iova_domain *iovad, |
38 | unsigned long limit_pfn, struct iova *new) | 38 | unsigned long limit_pfn, struct iova *new) |
39 | { | 39 | { |
40 | if (limit_pfn != DMA_32BIT_PFN) | 40 | if (limit_pfn != iovad->dma_32bit_pfn) |
41 | return; | 41 | return; |
42 | iovad->cached32_node = &new->node; | 42 | iovad->cached32_node = &new->node; |
43 | } | 43 | } |