aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMuli Ben-Yehuda <muli@il.ibm.com>2006-09-26 04:52:33 -0400
committerAndi Kleen <andi@basil.nowhere.org>2006-09-26 04:52:33 -0400
commit796e4390e0378e1e57c033349610cfc741696a3d (patch)
tree7669081e84ad3d3f39f8e15963dd2105b2ffa022 /arch
parentde684652f34f57cb60d4d78d09139a0e0c5e7b1b (diff)
[PATCH] only verify the allocation bitmap if CONFIG_IOMMU_DEBUG is on
Introduce new function verify_bit_range(). Define two versions, one for CONFIG_IOMMU_DEBUG enabled and one for disabled. Previously we were checking that the bitmap was consistent every time we allocated or freed an entry in the TCE table, which is good for debugging but incurs an unnecessary penalty on non debug builds. Signed-off-by: Muli Ben-Yehuda <muli@il.ibm.com> Signed-off-by: Jon Mason <jdmason@us.ibm.com> Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86_64/kernel/pci-calgary.c44
1 files changed, 35 insertions, 9 deletions
diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c
index ebe4e930b64d..7c43cb0f71a3 100644
--- a/arch/x86_64/kernel/pci-calgary.c
+++ b/arch/x86_64/kernel/pci-calgary.c
@@ -133,12 +133,35 @@ static inline void tce_cache_blast_stress(struct iommu_table *tbl)
133{ 133{
134 tce_cache_blast(tbl); 134 tce_cache_blast(tbl);
135} 135}
136
137static inline unsigned long verify_bit_range(unsigned long* bitmap,
138 int expected, unsigned long start, unsigned long end)
139{
140 unsigned long idx = start;
141
142 BUG_ON(start >= end);
143
144 while (idx < end) {
145 if (!!test_bit(idx, bitmap) != expected)
146 return idx;
147 ++idx;
148 }
149
150 /* all bits have the expected value */
151 return ~0UL;
152}
136#else /* debugging is disabled */ 153#else /* debugging is disabled */
137int debugging __read_mostly = 0; 154int debugging __read_mostly = 0;
138 155
139static inline void tce_cache_blast_stress(struct iommu_table *tbl) 156static inline void tce_cache_blast_stress(struct iommu_table *tbl)
140{ 157{
141} 158}
159
160static inline unsigned long verify_bit_range(unsigned long* bitmap,
161 int expected, unsigned long start, unsigned long end)
162{
163 return ~0UL;
164}
142#endif /* CONFIG_IOMMU_DEBUG */ 165#endif /* CONFIG_IOMMU_DEBUG */
143 166
144static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen) 167static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen)
@@ -162,6 +185,7 @@ static void iommu_range_reserve(struct iommu_table *tbl,
162{ 185{
163 unsigned long index; 186 unsigned long index;
164 unsigned long end; 187 unsigned long end;
188 unsigned long badbit;
165 189
166 index = start_addr >> PAGE_SHIFT; 190 index = start_addr >> PAGE_SHIFT;
167 191
@@ -173,14 +197,15 @@ static void iommu_range_reserve(struct iommu_table *tbl,
173 if (end > tbl->it_size) /* don't go off the table */ 197 if (end > tbl->it_size) /* don't go off the table */
174 end = tbl->it_size; 198 end = tbl->it_size;
175 199
176 while (index < end) { 200 badbit = verify_bit_range(tbl->it_map, 0, index, end);
177 if (test_bit(index, tbl->it_map)) 201 if (badbit != ~0UL) {
202 if (printk_ratelimit())
178 printk(KERN_ERR "Calgary: entry already allocated at " 203 printk(KERN_ERR "Calgary: entry already allocated at "
179 "0x%lx tbl %p dma 0x%lx npages %u\n", 204 "0x%lx tbl %p dma 0x%lx npages %u\n",
180 index, tbl, start_addr, npages); 205 badbit, tbl, start_addr, npages);
181 ++index;
182 } 206 }
183 set_bit_string(tbl->it_map, start_addr >> PAGE_SHIFT, npages); 207
208 set_bit_string(tbl->it_map, index, npages);
184} 209}
185 210
186static unsigned long iommu_range_alloc(struct iommu_table *tbl, 211static unsigned long iommu_range_alloc(struct iommu_table *tbl,
@@ -247,7 +272,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
247 unsigned int npages) 272 unsigned int npages)
248{ 273{
249 unsigned long entry; 274 unsigned long entry;
250 unsigned long i; 275 unsigned long badbit;
251 276
252 entry = dma_addr >> PAGE_SHIFT; 277 entry = dma_addr >> PAGE_SHIFT;
253 278
@@ -255,11 +280,12 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
255 280
256 tce_free(tbl, entry, npages); 281 tce_free(tbl, entry, npages);
257 282
258 for (i = 0; i < npages; ++i) { 283 badbit = verify_bit_range(tbl->it_map, 1, entry, entry + npages);
259 if (!test_bit(entry + i, tbl->it_map)) 284 if (badbit != ~0UL) {
285 if (printk_ratelimit())
260 printk(KERN_ERR "Calgary: bit is off at 0x%lx " 286 printk(KERN_ERR "Calgary: bit is off at 0x%lx "
261 "tbl %p dma 0x%Lx entry 0x%lx npages %u\n", 287 "tbl %p dma 0x%Lx entry 0x%lx npages %u\n",
262 entry + i, tbl, dma_addr, entry, npages); 288 badbit, tbl, dma_addr, entry, npages);
263 } 289 }
264 290
265 __clear_bit_string(tbl->it_map, entry, npages); 291 __clear_bit_string(tbl->it_map, entry, npages);