aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/pci-calgary.c
diff options
context:
space:
mode:
authorMuli Ben-Yehuda <muli@il.ibm.com>2007-07-21 11:11:04 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-21 21:37:11 -0400
commit820a149705c2c2a37989554a4f4a34e3d0b0df1f (patch)
treed6f13abb1b2640b598907c00d8eb4d2c17743b68 /arch/x86_64/kernel/pci-calgary.c
parent7354b07595b2e43b75fe353fcf18e73eb0427c9b (diff)
x86_64: Calgary - tighten up the bitmap locking
Currently the IOMMU table's lock protects both the bitmap and access to the hardware's TCE table. Access to the TCE table is synchronized through the bitmap; therefore, only hold the lock while modifying the bitmap. This gives a yummy 10-15% reduction in CPU utilization for netperf on a large SMP machine. Signed-off-by: Muli Ben-Yehuda <muli@il.ibm.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86_64/kernel/pci-calgary.c')
-rw-r--r--arch/x86_64/kernel/pci-calgary.c40
1 files changed, 17 insertions, 23 deletions
diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c
index a2f9c5369292..fa50d6a1ce46 100644
--- a/arch/x86_64/kernel/pci-calgary.c
+++ b/arch/x86_64/kernel/pci-calgary.c
@@ -233,6 +233,7 @@ static void iommu_range_reserve(struct iommu_table *tbl,
233 unsigned long index; 233 unsigned long index;
234 unsigned long end; 234 unsigned long end;
235 unsigned long badbit; 235 unsigned long badbit;
236 unsigned long flags;
236 237
237 index = start_addr >> PAGE_SHIFT; 238 index = start_addr >> PAGE_SHIFT;
238 239
@@ -244,6 +245,8 @@ static void iommu_range_reserve(struct iommu_table *tbl,
244 if (end > tbl->it_size) /* don't go off the table */ 245 if (end > tbl->it_size) /* don't go off the table */
245 end = tbl->it_size; 246 end = tbl->it_size;
246 247
248 spin_lock_irqsave(&tbl->it_lock, flags);
249
247 badbit = verify_bit_range(tbl->it_map, 0, index, end); 250 badbit = verify_bit_range(tbl->it_map, 0, index, end);
248 if (badbit != ~0UL) { 251 if (badbit != ~0UL) {
249 if (printk_ratelimit()) 252 if (printk_ratelimit())
@@ -253,15 +256,20 @@ static void iommu_range_reserve(struct iommu_table *tbl,
253 } 256 }
254 257
255 set_bit_string(tbl->it_map, index, npages); 258 set_bit_string(tbl->it_map, index, npages);
259
260 spin_unlock_irqrestore(&tbl->it_lock, flags);
256} 261}
257 262
258static unsigned long iommu_range_alloc(struct iommu_table *tbl, 263static unsigned long iommu_range_alloc(struct iommu_table *tbl,
259 unsigned int npages) 264 unsigned int npages)
260{ 265{
266 unsigned long flags;
261 unsigned long offset; 267 unsigned long offset;
262 268
263 BUG_ON(npages == 0); 269 BUG_ON(npages == 0);
264 270
271 spin_lock_irqsave(&tbl->it_lock, flags);
272
265 offset = find_next_zero_string(tbl->it_map, tbl->it_hint, 273 offset = find_next_zero_string(tbl->it_map, tbl->it_hint,
266 tbl->it_size, npages); 274 tbl->it_size, npages);
267 if (offset == ~0UL) { 275 if (offset == ~0UL) {
@@ -270,6 +278,7 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
270 tbl->it_size, npages); 278 tbl->it_size, npages);
271 if (offset == ~0UL) { 279 if (offset == ~0UL) {
272 printk(KERN_WARNING "Calgary: IOMMU full.\n"); 280 printk(KERN_WARNING "Calgary: IOMMU full.\n");
281 spin_unlock_irqrestore(&tbl->it_lock, flags);
273 if (panic_on_overflow) 282 if (panic_on_overflow)
274 panic("Calgary: fix the allocator.\n"); 283 panic("Calgary: fix the allocator.\n");
275 else 284 else
@@ -281,17 +290,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
281 tbl->it_hint = offset + npages; 290 tbl->it_hint = offset + npages;
282 BUG_ON(tbl->it_hint > tbl->it_size); 291 BUG_ON(tbl->it_hint > tbl->it_size);
283 292
293 spin_unlock_irqrestore(&tbl->it_lock, flags);
294
284 return offset; 295 return offset;
285} 296}
286 297
287static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr, 298static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr,
288 unsigned int npages, int direction) 299 unsigned int npages, int direction)
289{ 300{
290 unsigned long entry, flags; 301 unsigned long entry;
291 dma_addr_t ret = bad_dma_address; 302 dma_addr_t ret = bad_dma_address;
292 303
293 spin_lock_irqsave(&tbl->it_lock, flags);
294
295 entry = iommu_range_alloc(tbl, npages); 304 entry = iommu_range_alloc(tbl, npages);
296 305
297 if (unlikely(entry == bad_dma_address)) 306 if (unlikely(entry == bad_dma_address))
@@ -304,12 +313,9 @@ static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr,
304 tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK, 313 tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK,
305 direction); 314 direction);
306 315
307 spin_unlock_irqrestore(&tbl->it_lock, flags);
308
309 return ret; 316 return ret;
310 317
311error: 318error:
312 spin_unlock_irqrestore(&tbl->it_lock, flags);
313 printk(KERN_WARNING "Calgary: failed to allocate %u pages in " 319 printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
314 "iommu %p\n", npages, tbl); 320 "iommu %p\n", npages, tbl);
315 return bad_dma_address; 321 return bad_dma_address;
@@ -321,6 +327,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
321 unsigned long entry; 327 unsigned long entry;
322 unsigned long badbit; 328 unsigned long badbit;
323 unsigned long badend; 329 unsigned long badend;
330 unsigned long flags;
324 331
325 /* were we called with bad_dma_address? */ 332 /* were we called with bad_dma_address? */
326 badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE); 333 badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE);
@@ -337,6 +344,8 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
337 344
338 tce_free(tbl, entry, npages); 345 tce_free(tbl, entry, npages);
339 346
347 spin_lock_irqsave(&tbl->it_lock, flags);
348
340 badbit = verify_bit_range(tbl->it_map, 1, entry, entry + npages); 349 badbit = verify_bit_range(tbl->it_map, 1, entry, entry + npages);
341 if (badbit != ~0UL) { 350 if (badbit != ~0UL) {
342 if (printk_ratelimit()) 351 if (printk_ratelimit())
@@ -346,18 +355,14 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
346 } 355 }
347 356
348 __clear_bit_string(tbl->it_map, entry, npages); 357 __clear_bit_string(tbl->it_map, entry, npages);
358
359 spin_unlock_irqrestore(&tbl->it_lock, flags);
349} 360}
350 361
351static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 362static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
352 unsigned int npages) 363 unsigned int npages)
353{ 364{
354 unsigned long flags;
355
356 spin_lock_irqsave(&tbl->it_lock, flags);
357
358 __iommu_free(tbl, dma_addr, npages); 365 __iommu_free(tbl, dma_addr, npages);
359
360 spin_unlock_irqrestore(&tbl->it_lock, flags);
361} 366}
362 367
363static inline struct iommu_table *find_iommu_table(struct device *dev) 368static inline struct iommu_table *find_iommu_table(struct device *dev)
@@ -402,17 +407,12 @@ static void __calgary_unmap_sg(struct iommu_table *tbl,
402void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, 407void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist,
403 int nelems, int direction) 408 int nelems, int direction)
404{ 409{
405 unsigned long flags;
406 struct iommu_table *tbl = find_iommu_table(dev); 410 struct iommu_table *tbl = find_iommu_table(dev);
407 411
408 if (!translate_phb(to_pci_dev(dev))) 412 if (!translate_phb(to_pci_dev(dev)))
409 return; 413 return;
410 414
411 spin_lock_irqsave(&tbl->it_lock, flags);
412
413 __calgary_unmap_sg(tbl, sglist, nelems, direction); 415 __calgary_unmap_sg(tbl, sglist, nelems, direction);
414
415 spin_unlock_irqrestore(&tbl->it_lock, flags);
416} 416}
417 417
418static int calgary_nontranslate_map_sg(struct device* dev, 418static int calgary_nontranslate_map_sg(struct device* dev,
@@ -433,7 +433,6 @@ int calgary_map_sg(struct device *dev, struct scatterlist *sg,
433 int nelems, int direction) 433 int nelems, int direction)
434{ 434{
435 struct iommu_table *tbl = find_iommu_table(dev); 435 struct iommu_table *tbl = find_iommu_table(dev);
436 unsigned long flags;
437 unsigned long vaddr; 436 unsigned long vaddr;
438 unsigned int npages; 437 unsigned int npages;
439 unsigned long entry; 438 unsigned long entry;
@@ -442,8 +441,6 @@ int calgary_map_sg(struct device *dev, struct scatterlist *sg,
442 if (!translate_phb(to_pci_dev(dev))) 441 if (!translate_phb(to_pci_dev(dev)))
443 return calgary_nontranslate_map_sg(dev, sg, nelems, direction); 442 return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
444 443
445 spin_lock_irqsave(&tbl->it_lock, flags);
446
447 for (i = 0; i < nelems; i++ ) { 444 for (i = 0; i < nelems; i++ ) {
448 struct scatterlist *s = &sg[i]; 445 struct scatterlist *s = &sg[i];
449 BUG_ON(!s->page); 446 BUG_ON(!s->page);
@@ -467,8 +464,6 @@ int calgary_map_sg(struct device *dev, struct scatterlist *sg,
467 s->dma_length = s->length; 464 s->dma_length = s->length;
468 } 465 }
469 466
470 spin_unlock_irqrestore(&tbl->it_lock, flags);
471
472 return nelems; 467 return nelems;
473error: 468error:
474 __calgary_unmap_sg(tbl, sg, nelems, direction); 469 __calgary_unmap_sg(tbl, sg, nelems, direction);
@@ -476,7 +471,6 @@ error:
476 sg[i].dma_address = bad_dma_address; 471 sg[i].dma_address = bad_dma_address;
477 sg[i].dma_length = 0; 472 sg[i].dma_length = 0;
478 } 473 }
479 spin_unlock_irqrestore(&tbl->it_lock, flags);
480 return 0; 474 return 0;
481} 475}
482 476