aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorEric Sesterhenn <snakebyte@gmx.de>2006-03-10 05:55:20 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:10:35 -0500
commit30d4d1ffed7098afe2641536d67eef150499da02 (patch)
treee651ab7e4199ba03e2db7d1918baf8569b86a36f /arch/sparc
parent94bbc1763b6b6d20d5cfa70c41cda23af27f8b55 (diff)
[SPARC]: BUG_ON() Conversion in arch/sparc/kernel/ioport.c
this changes if() BUG(); constructs to BUG_ON() which is cleaner and can better optimized away Signed-off-by: Eric Sesterhenn <snakebyte@gmx.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/kernel/ioport.c40
1 files changed, 15 insertions, 25 deletions
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index d39c9f206271..460f72e640e6 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -217,7 +217,7 @@ static void _sparc_free_io(struct resource *res)
217 unsigned long plen; 217 unsigned long plen;
218 218
219 plen = res->end - res->start + 1; 219 plen = res->end - res->start + 1;
220 if ((plen & (PAGE_SIZE-1)) != 0) BUG(); 220 BUG_ON((plen & (PAGE_SIZE-1)) != 0);
221 sparc_unmapiorange(res->start, plen); 221 sparc_unmapiorange(res->start, plen);
222 release_resource(res); 222 release_resource(res);
223} 223}
@@ -512,8 +512,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
512dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, 512dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
513 int direction) 513 int direction)
514{ 514{
515 if (direction == PCI_DMA_NONE) 515 BUG_ON(direction == PCI_DMA_NONE);
516 BUG();
517 /* IIep is write-through, not flushing. */ 516 /* IIep is write-through, not flushing. */
518 return virt_to_phys(ptr); 517 return virt_to_phys(ptr);
519} 518}
@@ -528,8 +527,7 @@ dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
528void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, 527void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
529 int direction) 528 int direction)
530{ 529{
531 if (direction == PCI_DMA_NONE) 530 BUG_ON(direction == PCI_DMA_NONE);
532 BUG();
533 if (direction != PCI_DMA_TODEVICE) { 531 if (direction != PCI_DMA_TODEVICE) {
534 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 532 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
535 (size + PAGE_SIZE-1) & PAGE_MASK); 533 (size + PAGE_SIZE-1) & PAGE_MASK);
@@ -542,8 +540,7 @@ void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
542dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, 540dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
543 unsigned long offset, size_t size, int direction) 541 unsigned long offset, size_t size, int direction)
544{ 542{
545 if (direction == PCI_DMA_NONE) 543 BUG_ON(direction == PCI_DMA_NONE);
546 BUG();
547 /* IIep is write-through, not flushing. */ 544 /* IIep is write-through, not flushing. */
548 return page_to_phys(page) + offset; 545 return page_to_phys(page) + offset;
549} 546}
@@ -551,8 +548,7 @@ dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
551void pci_unmap_page(struct pci_dev *hwdev, 548void pci_unmap_page(struct pci_dev *hwdev,
552 dma_addr_t dma_address, size_t size, int direction) 549 dma_addr_t dma_address, size_t size, int direction)
553{ 550{
554 if (direction == PCI_DMA_NONE) 551 BUG_ON(direction == PCI_DMA_NONE);
555 BUG();
556 /* mmu_inval_dma_area XXX */ 552 /* mmu_inval_dma_area XXX */
557} 553}
558 554
@@ -576,11 +572,10 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
576{ 572{
577 int n; 573 int n;
578 574
579 if (direction == PCI_DMA_NONE) 575 BUG_ON(direction == PCI_DMA_NONE);
580 BUG();
581 /* IIep is write-through, not flushing. */ 576 /* IIep is write-through, not flushing. */
582 for (n = 0; n < nents; n++) { 577 for (n = 0; n < nents; n++) {
583 if (page_address(sg->page) == NULL) BUG(); 578 BUG_ON(page_address(sg->page) == NULL);
584 sg->dvma_address = virt_to_phys(page_address(sg->page)); 579 sg->dvma_address = virt_to_phys(page_address(sg->page));
585 sg->dvma_length = sg->length; 580 sg->dvma_length = sg->length;
586 sg++; 581 sg++;
@@ -597,11 +592,10 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
597{ 592{
598 int n; 593 int n;
599 594
600 if (direction == PCI_DMA_NONE) 595 BUG_ON(direction == PCI_DMA_NONE);
601 BUG();
602 if (direction != PCI_DMA_TODEVICE) { 596 if (direction != PCI_DMA_TODEVICE) {
603 for (n = 0; n < nents; n++) { 597 for (n = 0; n < nents; n++) {
604 if (page_address(sg->page) == NULL) BUG(); 598 BUG_ON(page_address(sg->page) == NULL);
605 mmu_inval_dma_area( 599 mmu_inval_dma_area(
606 (unsigned long) page_address(sg->page), 600 (unsigned long) page_address(sg->page),
607 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 601 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
@@ -622,8 +616,7 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
622 */ 616 */
623void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 617void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
624{ 618{
625 if (direction == PCI_DMA_NONE) 619 BUG_ON(direction == PCI_DMA_NONE);
626 BUG();
627 if (direction != PCI_DMA_TODEVICE) { 620 if (direction != PCI_DMA_TODEVICE) {
628 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 621 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
629 (size + PAGE_SIZE-1) & PAGE_MASK); 622 (size + PAGE_SIZE-1) & PAGE_MASK);
@@ -632,8 +625,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t si
632 625
633void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 626void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
634{ 627{
635 if (direction == PCI_DMA_NONE) 628 BUG_ON(direction == PCI_DMA_NONE);
636 BUG();
637 if (direction != PCI_DMA_TODEVICE) { 629 if (direction != PCI_DMA_TODEVICE) {
638 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 630 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
639 (size + PAGE_SIZE-1) & PAGE_MASK); 631 (size + PAGE_SIZE-1) & PAGE_MASK);
@@ -650,11 +642,10 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int
650{ 642{
651 int n; 643 int n;
652 644
653 if (direction == PCI_DMA_NONE) 645 BUG_ON(direction == PCI_DMA_NONE);
654 BUG();
655 if (direction != PCI_DMA_TODEVICE) { 646 if (direction != PCI_DMA_TODEVICE) {
656 for (n = 0; n < nents; n++) { 647 for (n = 0; n < nents; n++) {
657 if (page_address(sg->page) == NULL) BUG(); 648 BUG_ON(page_address(sg->page) == NULL);
658 mmu_inval_dma_area( 649 mmu_inval_dma_area(
659 (unsigned long) page_address(sg->page), 650 (unsigned long) page_address(sg->page),
660 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 651 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
@@ -667,11 +658,10 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, i
667{ 658{
668 int n; 659 int n;
669 660
670 if (direction == PCI_DMA_NONE) 661 BUG_ON(direction == PCI_DMA_NONE);
671 BUG();
672 if (direction != PCI_DMA_TODEVICE) { 662 if (direction != PCI_DMA_TODEVICE) {
673 for (n = 0; n < nents; n++) { 663 for (n = 0; n < nents; n++) {
674 if (page_address(sg->page) == NULL) BUG(); 664 BUG_ON(page_address(sg->page) == NULL);
675 mmu_inval_dma_area( 665 mmu_inval_dma_area(
676 (unsigned long) page_address(sg->page), 666 (unsigned long) page_address(sg->page),
677 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 667 (sg->length + PAGE_SIZE-1) & PAGE_MASK);