aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDenis Kirjanov <dkirjanov@kernel.org>2010-10-20 00:21:13 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-21 04:26:46 -0400
commit88426f2acae0cf887446013db9eab776871610e7 (patch)
treef02697838c11110b25671a1dfcc56b8f8394b68b /drivers
parent27b75c95f10d249574d9c4cb9dab878107faede8 (diff)
ibmveth: Cleanup error handling inside ibmveth_open
Remove duplicated code in one place. Signed-off-by: Denis Kirjanov <dkirjanov@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ibmveth.c44
1 files changed, 20 insertions, 24 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index b3e157ed6776..2ae8336478b3 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -546,9 +546,8 @@ static int ibmveth_open(struct net_device *netdev)
546 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) { 546 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
547 netdev_err(netdev, "unable to allocate filter or buffer list " 547 netdev_err(netdev, "unable to allocate filter or buffer list "
548 "pages\n"); 548 "pages\n");
549 ibmveth_cleanup(adapter); 549 rc = -ENOMEM;
550 napi_disable(&adapter->napi); 550 goto err_out;
551 return -ENOMEM;
552 } 551 }
553 552
554 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * 553 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
@@ -558,9 +557,8 @@ static int ibmveth_open(struct net_device *netdev)
558 557
559 if (!adapter->rx_queue.queue_addr) { 558 if (!adapter->rx_queue.queue_addr) {
560 netdev_err(netdev, "unable to allocate rx queue pages\n"); 559 netdev_err(netdev, "unable to allocate rx queue pages\n");
561 ibmveth_cleanup(adapter); 560 rc = -ENOMEM;
562 napi_disable(&adapter->napi); 561 goto err_out;
563 return -ENOMEM;
564 } 562 }
565 563
566 dev = &adapter->vdev->dev; 564 dev = &adapter->vdev->dev;
@@ -578,9 +576,8 @@ static int ibmveth_open(struct net_device *netdev)
578 (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { 576 (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
579 netdev_err(netdev, "unable to map filter or buffer list " 577 netdev_err(netdev, "unable to map filter or buffer list "
580 "pages\n"); 578 "pages\n");
581 ibmveth_cleanup(adapter); 579 rc = -ENOMEM;
582 napi_disable(&adapter->napi); 580 goto err_out;
583 return -ENOMEM;
584 } 581 }
585 582
586 adapter->rx_queue.index = 0; 583 adapter->rx_queue.index = 0;
@@ -611,9 +608,8 @@ static int ibmveth_open(struct net_device *netdev)
611 adapter->filter_list_dma, 608 adapter->filter_list_dma,
612 rxq_desc.desc, 609 rxq_desc.desc,
613 mac_address); 610 mac_address);
614 ibmveth_cleanup(adapter); 611 rc = -ENONET;
615 napi_disable(&adapter->napi); 612 goto err_out;
616 return -ENONET;
617 } 613 }
618 614
619 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 615 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
@@ -622,9 +618,8 @@ static int ibmveth_open(struct net_device *netdev)
622 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { 618 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
623 netdev_err(netdev, "unable to alloc pool\n"); 619 netdev_err(netdev, "unable to alloc pool\n");
624 adapter->rx_buff_pool[i].active = 0; 620 adapter->rx_buff_pool[i].active = 0;
625 ibmveth_cleanup(adapter); 621 rc = -ENOMEM;
626 napi_disable(&adapter->napi); 622 goto err_out;
627 return -ENOMEM ;
628 } 623 }
629 } 624 }
630 625
@@ -638,27 +633,23 @@ static int ibmveth_open(struct net_device *netdev)
638 rc = h_free_logical_lan(adapter->vdev->unit_address); 633 rc = h_free_logical_lan(adapter->vdev->unit_address);
639 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); 634 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
640 635
641 ibmveth_cleanup(adapter); 636 goto err_out;
642 napi_disable(&adapter->napi);
643 return rc;
644 } 637 }
645 638
646 adapter->bounce_buffer = 639 adapter->bounce_buffer =
647 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); 640 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
648 if (!adapter->bounce_buffer) { 641 if (!adapter->bounce_buffer) {
649 netdev_err(netdev, "unable to allocate bounce buffer\n"); 642 netdev_err(netdev, "unable to allocate bounce buffer\n");
650 ibmveth_cleanup(adapter); 643 rc = -ENOMEM;
651 napi_disable(&adapter->napi); 644 goto err_out;
652 return -ENOMEM;
653 } 645 }
654 adapter->bounce_buffer_dma = 646 adapter->bounce_buffer_dma =
655 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, 647 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
656 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); 648 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
657 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { 649 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
658 netdev_err(netdev, "unable to map bounce buffer\n"); 650 netdev_err(netdev, "unable to map bounce buffer\n");
659 ibmveth_cleanup(adapter); 651 rc = -ENOMEM;
660 napi_disable(&adapter->napi); 652 goto err_out;
661 return -ENOMEM;
662 } 653 }
663 654
664 netdev_dbg(netdev, "initial replenish cycle\n"); 655 netdev_dbg(netdev, "initial replenish cycle\n");
@@ -669,6 +660,11 @@ static int ibmveth_open(struct net_device *netdev)
669 netdev_dbg(netdev, "open complete\n"); 660 netdev_dbg(netdev, "open complete\n");
670 661
671 return 0; 662 return 0;
663
664err_out:
665 ibmveth_cleanup(adapter);
666 napi_disable(&adapter->napi);
667 return rc;
672} 668}
673 669
674static int ibmveth_close(struct net_device *netdev) 670static int ibmveth_close(struct net_device *netdev)