diff options
author | Keshavamurthy, Anil S <anil.s.keshavamurthy@intel.com> | 2007-10-21 19:41:58 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-22 11:13:19 -0400 |
commit | f76aec76ec7f68829a66624d11a50ed6cb404185 (patch) | |
tree | e470670d6c26aa06ca1fa28e97b25717e80dfa8a /drivers/pci/iova.c | |
parent | 49a0429e53f29109cbf1eadd89497286ba81f1ae (diff) |
intel-iommu: optimize sg map/unmap calls
This patch adds PageSelectiveInvalidation support replacing existing
DomainSelectiveInvalidation for intel_{map/unmap}_sg() calls and also
enables to mapping one big contiguous DMA virtual address which is mapped
to discontiguous physical address for SG map/unmap calls.
"Doamin selective invalidations" wipes out the IOMMU address translation
cache based on domain ID where as "Page selective invalidations" wipes out
the IOMMU address translation cache for that address mask range which is
more cache friendly when compared to Domain selective invalidations.
Here is how it is done.
1) changes to iova.c
alloc_iova() now takes a bool size_aligned argument, which
when when set, returns the io virtual address that is
naturally aligned to 2 ^ x, where x is the order
of the size requested.
Returning this io vitual address which is naturally
aligned helps iommu to do the "page selective
invalidations" which is IOMMU cache friendly
over "domain selective invalidations".
2) Changes to driver/pci/intel-iommu.c
Clean up intel_{map/unmap}_{single/sg} () calls so that
s/g map/unamp calls is no more dependent on
intel_{map/unmap}_single()
intel_map_sg() now computes the total DMA virtual address
required and allocates the size aligned total DMA virtual address
and maps the discontiguous physical address to the allocated
contiguous DMA virtual address.
In the intel_unmap_sg() case since the DMA virtual address
is contiguous and size_aligned, PageSelectiveInvalidation
is used replacing earlier DomainSelectiveInvalidations.
Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: Greg KH <greg@kroah.com>
Cc: Ashok Raj <ashok.raj@intel.com>
Cc: Suresh B <suresh.b.siddha@intel.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Arjan van de Ven <arjan@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/pci/iova.c')
-rw-r--r-- | drivers/pci/iova.c | 63 |
1 files changed, 50 insertions, 13 deletions
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c index 717fafaa7e02..a84571c29360 100644 --- a/drivers/pci/iova.c +++ b/drivers/pci/iova.c | |||
@@ -57,12 +57,28 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) | |||
57 | iovad->cached32_node = rb_next(&free->node); | 57 | iovad->cached32_node = rb_next(&free->node); |
58 | } | 58 | } |
59 | 59 | ||
60 | static int __alloc_iova_range(struct iova_domain *iovad, | 60 | /* Computes the padding size required, to make the |
61 | unsigned long size, unsigned long limit_pfn, struct iova *new) | 61 | * the start address naturally aligned on its size |
62 | */ | ||
63 | static int | ||
64 | iova_get_pad_size(int size, unsigned int limit_pfn) | ||
65 | { | ||
66 | unsigned int pad_size = 0; | ||
67 | unsigned int order = ilog2(size); | ||
68 | |||
69 | if (order) | ||
70 | pad_size = (limit_pfn + 1) % (1 << order); | ||
71 | |||
72 | return pad_size; | ||
73 | } | ||
74 | |||
75 | static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size, | ||
76 | unsigned long limit_pfn, struct iova *new, bool size_aligned) | ||
62 | { | 77 | { |
63 | struct rb_node *curr = NULL; | 78 | struct rb_node *curr = NULL; |
64 | unsigned long flags; | 79 | unsigned long flags; |
65 | unsigned long saved_pfn; | 80 | unsigned long saved_pfn; |
81 | unsigned int pad_size = 0; | ||
66 | 82 | ||
67 | /* Walk the tree backwards */ | 83 | /* Walk the tree backwards */ |
68 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | 84 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
@@ -72,22 +88,32 @@ static int __alloc_iova_range(struct iova_domain *iovad, | |||
72 | struct iova *curr_iova = container_of(curr, struct iova, node); | 88 | struct iova *curr_iova = container_of(curr, struct iova, node); |
73 | if (limit_pfn < curr_iova->pfn_lo) | 89 | if (limit_pfn < curr_iova->pfn_lo) |
74 | goto move_left; | 90 | goto move_left; |
75 | if (limit_pfn < curr_iova->pfn_hi) | 91 | else if (limit_pfn < curr_iova->pfn_hi) |
76 | goto adjust_limit_pfn; | 92 | goto adjust_limit_pfn; |
77 | if ((curr_iova->pfn_hi + size) <= limit_pfn) | 93 | else { |
78 | break; /* found a free slot */ | 94 | if (size_aligned) |
95 | pad_size = iova_get_pad_size(size, limit_pfn); | ||
96 | if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn) | ||
97 | break; /* found a free slot */ | ||
98 | } | ||
79 | adjust_limit_pfn: | 99 | adjust_limit_pfn: |
80 | limit_pfn = curr_iova->pfn_lo - 1; | 100 | limit_pfn = curr_iova->pfn_lo - 1; |
81 | move_left: | 101 | move_left: |
82 | curr = rb_prev(curr); | 102 | curr = rb_prev(curr); |
83 | } | 103 | } |
84 | 104 | ||
85 | if ((!curr) && !(IOVA_START_PFN + size <= limit_pfn)) { | 105 | if (!curr) { |
86 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 106 | if (size_aligned) |
87 | return -ENOMEM; | 107 | pad_size = iova_get_pad_size(size, limit_pfn); |
108 | if ((IOVA_START_PFN + size + pad_size) > limit_pfn) { | ||
109 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | ||
110 | return -ENOMEM; | ||
111 | } | ||
88 | } | 112 | } |
89 | new->pfn_hi = limit_pfn; | 113 | |
90 | new->pfn_lo = limit_pfn - size + 1; | 114 | /* pfn_lo will point to size aligned address if size_aligned is set */ |
115 | new->pfn_lo = limit_pfn - (size + pad_size) + 1; | ||
116 | new->pfn_hi = new->pfn_lo + size - 1; | ||
91 | 117 | ||
92 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 118 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
93 | return 0; | 119 | return 0; |
@@ -119,12 +145,16 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova) | |||
119 | * @iovad - iova domain in question | 145 | * @iovad - iova domain in question |
120 | * @size - size of page frames to allocate | 146 | * @size - size of page frames to allocate |
121 | * @limit_pfn - max limit address | 147 | * @limit_pfn - max limit address |
148 | * @size_aligned - set if size_aligned address range is required | ||
122 | * This function allocates an iova in the range limit_pfn to IOVA_START_PFN | 149 | * This function allocates an iova in the range limit_pfn to IOVA_START_PFN |
123 | * looking from limit_pfn instead from IOVA_START_PFN. | 150 | * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned |
151 | * flag is set then the allocated address iova->pfn_lo will be naturally | ||
152 | * aligned on roundup_power_of_two(size). | ||
124 | */ | 153 | */ |
125 | struct iova * | 154 | struct iova * |
126 | alloc_iova(struct iova_domain *iovad, unsigned long size, | 155 | alloc_iova(struct iova_domain *iovad, unsigned long size, |
127 | unsigned long limit_pfn) | 156 | unsigned long limit_pfn, |
157 | bool size_aligned) | ||
128 | { | 158 | { |
129 | unsigned long flags; | 159 | unsigned long flags; |
130 | struct iova *new_iova; | 160 | struct iova *new_iova; |
@@ -134,8 +164,15 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, | |||
134 | if (!new_iova) | 164 | if (!new_iova) |
135 | return NULL; | 165 | return NULL; |
136 | 166 | ||
167 | /* If size aligned is set then round the size to | ||
168 | * to next power of two. | ||
169 | */ | ||
170 | if (size_aligned) | ||
171 | size = __roundup_pow_of_two(size); | ||
172 | |||
137 | spin_lock_irqsave(&iovad->iova_alloc_lock, flags); | 173 | spin_lock_irqsave(&iovad->iova_alloc_lock, flags); |
138 | ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova); | 174 | ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova, |
175 | size_aligned); | ||
139 | 176 | ||
140 | if (ret) { | 177 | if (ret) { |
141 | spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags); | 178 | spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags); |