aboutsummaryrefslogtreecommitdiffstats
path: root/lib/swiotlb.c
diff options
context:
space:
mode:
authorEric Sesterhenn <snakebyte@gmx.de>2006-03-24 12:47:11 -0500
committerAdrian Bunk <bunk@stusta.de>2006-03-24 12:47:11 -0500
commit34814545890db603b7648ea2ea477d1f83b61297 (patch)
treedcf8b60a50bbcbe8bf0ee29ea0d428c2435a895e /lib/swiotlb.c
parent6978c7052f2e22c6c40781cdd4eba5c4bce9a789 (diff)
BUG_ON() Conversion in lib/swiotlb.c
this changes if() BUG(); constructs to BUG_ON() which is cleaner, contains unlikely() and can better optimized away. Signed-off-by: Eric Sesterhenn <snakebyte@gmx.de> Signed-off-by: Adrian Bunk <bunk@stusta.de>
Diffstat (limited to 'lib/swiotlb.c')
-rw-r--r--lib/swiotlb.c32
1 files changed, 12 insertions, 20 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 0af497b6b9a8..10625785eefd 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -296,8 +296,7 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
296 else 296 else
297 stride = 1; 297 stride = 1;
298 298
299 if (!nslots) 299 BUG_ON(!nslots);
300 BUG();
301 300
302 /* 301 /*
303 * Find suitable number of IO TLB entries size that will fit this 302 * Find suitable number of IO TLB entries size that will fit this
@@ -416,14 +415,14 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
416 case SYNC_FOR_CPU: 415 case SYNC_FOR_CPU:
417 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 416 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
418 memcpy(buffer, dma_addr, size); 417 memcpy(buffer, dma_addr, size);
419 else if (dir != DMA_TO_DEVICE) 418 else
420 BUG(); 419 BUG_ON(dir != DMA_TO_DEVICE);
421 break; 420 break;
422 case SYNC_FOR_DEVICE: 421 case SYNC_FOR_DEVICE:
423 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 422 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
424 memcpy(dma_addr, buffer, size); 423 memcpy(dma_addr, buffer, size);
425 else if (dir != DMA_FROM_DEVICE) 424 else
426 BUG(); 425 BUG_ON(dir != DMA_FROM_DEVICE);
427 break; 426 break;
428 default: 427 default:
429 BUG(); 428 BUG();
@@ -529,8 +528,7 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
529 unsigned long dev_addr = virt_to_phys(ptr); 528 unsigned long dev_addr = virt_to_phys(ptr);
530 void *map; 529 void *map;
531 530
532 if (dir == DMA_NONE) 531 BUG_ON(dir == DMA_NONE);
533 BUG();
534 /* 532 /*
535 * If the pointer passed in happens to be in the device's DMA window, 533 * If the pointer passed in happens to be in the device's DMA window,
536 * we can safely return the device addr and not worry about bounce 534 * we can safely return the device addr and not worry about bounce
@@ -592,8 +590,7 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
592{ 590{
593 char *dma_addr = phys_to_virt(dev_addr); 591 char *dma_addr = phys_to_virt(dev_addr);
594 592
595 if (dir == DMA_NONE) 593 BUG_ON(dir == DMA_NONE);
596 BUG();
597 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 594 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
598 unmap_single(hwdev, dma_addr, size, dir); 595 unmap_single(hwdev, dma_addr, size, dir);
599 else if (dir == DMA_FROM_DEVICE) 596 else if (dir == DMA_FROM_DEVICE)
@@ -616,8 +613,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
616{ 613{
617 char *dma_addr = phys_to_virt(dev_addr); 614 char *dma_addr = phys_to_virt(dev_addr);
618 615
619 if (dir == DMA_NONE) 616 BUG_ON(dir == DMA_NONE);
620 BUG();
621 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 617 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
622 sync_single(hwdev, dma_addr, size, dir, target); 618 sync_single(hwdev, dma_addr, size, dir, target);
623 else if (dir == DMA_FROM_DEVICE) 619 else if (dir == DMA_FROM_DEVICE)
@@ -648,8 +644,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
648{ 644{
649 char *dma_addr = phys_to_virt(dev_addr) + offset; 645 char *dma_addr = phys_to_virt(dev_addr) + offset;
650 646
651 if (dir == DMA_NONE) 647 BUG_ON(dir == DMA_NONE);
652 BUG();
653 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 648 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
654 sync_single(hwdev, dma_addr, size, dir, target); 649 sync_single(hwdev, dma_addr, size, dir, target);
655 else if (dir == DMA_FROM_DEVICE) 650 else if (dir == DMA_FROM_DEVICE)
@@ -696,8 +691,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
696 unsigned long dev_addr; 691 unsigned long dev_addr;
697 int i; 692 int i;
698 693
699 if (dir == DMA_NONE) 694 BUG_ON(dir == DMA_NONE);
700 BUG();
701 695
702 for (i = 0; i < nelems; i++, sg++) { 696 for (i = 0; i < nelems; i++, sg++) {
703 addr = SG_ENT_VIRT_ADDRESS(sg); 697 addr = SG_ENT_VIRT_ADDRESS(sg);
@@ -730,8 +724,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
730{ 724{
731 int i; 725 int i;
732 726
733 if (dir == DMA_NONE) 727 BUG_ON(dir == DMA_NONE);
734 BUG();
735 728
736 for (i = 0; i < nelems; i++, sg++) 729 for (i = 0; i < nelems; i++, sg++)
737 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 730 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
@@ -753,8 +746,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
753{ 746{
754 int i; 747 int i;
755 748
756 if (dir == DMA_NONE) 749 BUG_ON(dir == DMA_NONE);
757 BUG();
758 750
759 for (i = 0; i < nelems; i++, sg++) 751 for (i = 0; i < nelems; i++, sg++)
760 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 752 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))