diff options
author | David Miller <davem@davemloft.net> | 2008-02-06 04:36:23 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-06 13:41:01 -0500 |
commit | f661197e0a95ec7305e1e790d95b72a74a1c4a0f (patch) | |
tree | a6916d877a3d9db9bc658758bd347d4f436f6d59 /drivers/pci/iova.h | |
parent | b1ed88b47f5e18c6efb8041275c16eeead5377df (diff) |
Genericizing iova.[ch]
I would like to potentially move the sparc64 IOMMU code over to using
the nice new drivers/pci/iova.[ch] code for free area management..
In order to do that we have to detach the IOMMU page size assumptions
which only really need to exist in the intel-iommu.[ch] code.
This patch attempts to implement that.
[akpm@linux-foundation.org: build fix]
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/pci/iova.h')
-rw-r--r-- | drivers/pci/iova.h | 16 |
1 files changed, 2 insertions, 14 deletions
diff --git a/drivers/pci/iova.h b/drivers/pci/iova.h index ae3028d5a941..d521b5b7319c 100644 --- a/drivers/pci/iova.h +++ b/drivers/pci/iova.h | |||
@@ -15,22 +15,9 @@ | |||
15 | #include <linux/rbtree.h> | 15 | #include <linux/rbtree.h> |
16 | #include <linux/dma-mapping.h> | 16 | #include <linux/dma-mapping.h> |
17 | 17 | ||
18 | /* | ||
19 | * We need a fixed PAGE_SIZE of 4K irrespective of | ||
20 | * arch PAGE_SIZE for IOMMU page tables. | ||
21 | */ | ||
22 | #define PAGE_SHIFT_4K (12) | ||
23 | #define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) | ||
24 | #define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) | ||
25 | #define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K) | ||
26 | |||
27 | /* IO virtual address start page frame number */ | 18 | /* IO virtual address start page frame number */ |
28 | #define IOVA_START_PFN (1) | 19 | #define IOVA_START_PFN (1) |
29 | 20 | ||
30 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K) | ||
31 | #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) | ||
32 | #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) | ||
33 | |||
34 | /* iova structure */ | 21 | /* iova structure */ |
35 | struct iova { | 22 | struct iova { |
36 | struct rb_node node; | 23 | struct rb_node node; |
@@ -44,6 +31,7 @@ struct iova_domain { | |||
44 | spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ | 31 | spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ |
45 | struct rb_root rbroot; /* iova domain rbtree root */ | 32 | struct rb_root rbroot; /* iova domain rbtree root */ |
46 | struct rb_node *cached32_node; /* Save last alloced node */ | 33 | struct rb_node *cached32_node; /* Save last alloced node */ |
34 | unsigned long dma_32bit_pfn; | ||
47 | }; | 35 | }; |
48 | 36 | ||
49 | struct iova *alloc_iova_mem(void); | 37 | struct iova *alloc_iova_mem(void); |
@@ -56,7 +44,7 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, | |||
56 | struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, | 44 | struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, |
57 | unsigned long pfn_hi); | 45 | unsigned long pfn_hi); |
58 | void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); | 46 | void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); |
59 | void init_iova_domain(struct iova_domain *iovad); | 47 | void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit); |
60 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); | 48 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); |
61 | void put_iova_domain(struct iova_domain *iovad); | 49 | void put_iova_domain(struct iova_domain *iovad); |
62 | 50 | ||