aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorDavid Miller <davem@davemloft.net>2008-02-06 04:36:23 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-06 13:41:01 -0500
commitf661197e0a95ec7305e1e790d95b72a74a1c4a0f (patch)
treea6916d877a3d9db9bc658758bd347d4f436f6d59 /drivers/pci
parentb1ed88b47f5e18c6efb8041275c16eeead5377df (diff)
Genericizing iova.[ch]
I would like to potentially move the sparc64 IOMMU code over to using the nice new drivers/pci/iova.[ch] code for free area management.. In order to do that we have to detach the IOMMU page size assumptions which only really need to exist in the intel-iommu.[ch] code. This patch attempts to implement that. [akpm@linux-foundation.org: build fix] Signed-off-by: David S. Miller <davem@davemloft.net> Acked-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/dmar.c1
-rw-r--r--drivers/pci/intel-iommu.c4
-rw-r--r--drivers/pci/intel-iommu.h14
-rw-r--r--drivers/pci/iova.c8
-rw-r--r--drivers/pci/iova.h16
5 files changed, 23 insertions, 20 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 91b2dc956be5..8ed26480371f 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -26,6 +26,7 @@
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/dmar.h> 27#include <linux/dmar.h>
28#include "iova.h" 28#include "iova.h"
29#include "intel-iommu.h"
29 30
30#undef PREFIX 31#undef PREFIX
31#define PREFIX "DMAR:" 32#define PREFIX "DMAR:"
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 4e01df99681a..31fa6c92aa5e 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1088,7 +1088,7 @@ static void dmar_init_reserved_ranges(void)
1088 int i; 1088 int i;
1089 u64 addr, size; 1089 u64 addr, size;
1090 1090
1091 init_iova_domain(&reserved_iova_list); 1091 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1092 1092
1093 /* IOAPIC ranges shouldn't be accessed by DMA */ 1093 /* IOAPIC ranges shouldn't be accessed by DMA */
1094 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), 1094 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
@@ -1142,7 +1142,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1142 int adjust_width, agaw; 1142 int adjust_width, agaw;
1143 unsigned long sagaw; 1143 unsigned long sagaw;
1144 1144
1145 init_iova_domain(&domain->iovad); 1145 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1146 spin_lock_init(&domain->mapping_lock); 1146 spin_lock_init(&domain->mapping_lock);
1147 1147
1148 domain_reserve_special_ranges(domain); 1148 domain_reserve_special_ranges(domain);
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h
index 459ad1f9dc54..0e4862675ad2 100644
--- a/drivers/pci/intel-iommu.h
+++ b/drivers/pci/intel-iommu.h
@@ -23,10 +23,24 @@
23 23
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/msi.h> 25#include <linux/msi.h>
26#include <linux/sysdev.h>
26#include "iova.h" 27#include "iova.h"
27#include <linux/io.h> 28#include <linux/io.h>
28 29
29/* 30/*
31 * We need a fixed PAGE_SIZE of 4K irrespective of
32 * arch PAGE_SIZE for IOMMU page tables.
33 */
34#define PAGE_SHIFT_4K (12)
35#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
36#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
37#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
38
39#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K)
40#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
41#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
42
43/*
30 * Intel IOMMU register specification per version 1.0 public spec. 44 * Intel IOMMU register specification per version 1.0 public spec.
31 */ 45 */
32 46
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index a84571c29360..8de7ab6c6d0c 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -9,19 +9,19 @@
9#include "iova.h" 9#include "iova.h"
10 10
11void 11void
12init_iova_domain(struct iova_domain *iovad) 12init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
13{ 13{
14 spin_lock_init(&iovad->iova_alloc_lock); 14 spin_lock_init(&iovad->iova_alloc_lock);
15 spin_lock_init(&iovad->iova_rbtree_lock); 15 spin_lock_init(&iovad->iova_rbtree_lock);
16 iovad->rbroot = RB_ROOT; 16 iovad->rbroot = RB_ROOT;
17 iovad->cached32_node = NULL; 17 iovad->cached32_node = NULL;
18 18 iovad->dma_32bit_pfn = pfn_32bit;
19} 19}
20 20
21static struct rb_node * 21static struct rb_node *
22__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) 22__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
23{ 23{
24 if ((*limit_pfn != DMA_32BIT_PFN) || 24 if ((*limit_pfn != iovad->dma_32bit_pfn) ||
25 (iovad->cached32_node == NULL)) 25 (iovad->cached32_node == NULL))
26 return rb_last(&iovad->rbroot); 26 return rb_last(&iovad->rbroot);
27 else { 27 else {
@@ -37,7 +37,7 @@ static void
37__cached_rbnode_insert_update(struct iova_domain *iovad, 37__cached_rbnode_insert_update(struct iova_domain *iovad,
38 unsigned long limit_pfn, struct iova *new) 38 unsigned long limit_pfn, struct iova *new)
39{ 39{
40 if (limit_pfn != DMA_32BIT_PFN) 40 if (limit_pfn != iovad->dma_32bit_pfn)
41 return; 41 return;
42 iovad->cached32_node = &new->node; 42 iovad->cached32_node = &new->node;
43} 43}
diff --git a/drivers/pci/iova.h b/drivers/pci/iova.h
index ae3028d5a941..d521b5b7319c 100644
--- a/drivers/pci/iova.h
+++ b/drivers/pci/iova.h
@@ -15,22 +15,9 @@
15#include <linux/rbtree.h> 15#include <linux/rbtree.h>
16#include <linux/dma-mapping.h> 16#include <linux/dma-mapping.h>
17 17
18/*
19 * We need a fixed PAGE_SIZE of 4K irrespective of
20 * arch PAGE_SIZE for IOMMU page tables.
21 */
22#define PAGE_SHIFT_4K (12)
23#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
24#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
25#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
26
27/* IO virtual address start page frame number */ 18/* IO virtual address start page frame number */
28#define IOVA_START_PFN (1) 19#define IOVA_START_PFN (1)
29 20
30#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K)
31#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
32#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
33
34/* iova structure */ 21/* iova structure */
35struct iova { 22struct iova {
36 struct rb_node node; 23 struct rb_node node;
@@ -44,6 +31,7 @@ struct iova_domain {
44 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ 31 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
45 struct rb_root rbroot; /* iova domain rbtree root */ 32 struct rb_root rbroot; /* iova domain rbtree root */
46 struct rb_node *cached32_node; /* Save last alloced node */ 33 struct rb_node *cached32_node; /* Save last alloced node */
34 unsigned long dma_32bit_pfn;
47}; 35};
48 36
49struct iova *alloc_iova_mem(void); 37struct iova *alloc_iova_mem(void);
@@ -56,7 +44,7 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
56struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, 44struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
57 unsigned long pfn_hi); 45 unsigned long pfn_hi);
58void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); 46void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
59void init_iova_domain(struct iova_domain *iovad); 47void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
60struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); 48struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
61void put_iova_domain(struct iova_domain *iovad); 49void put_iova_domain(struct iova_domain *iovad);
62 50