aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSantiago Leon <santil@linux.vnet.ibm.com>2010-09-03 14:29:19 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-06 21:21:53 -0400
commit21c2decea0f52980a34c79167fe69df3a84d2788 (patch)
tree97bf41ad57f230c73630f6a4d952745da20a4b4d /drivers
parentc43ced18a515bef29166f22c01a6d9062aa3008e (diff)
ibmveth: Convert driver specific error functions to netdev_err
Use netdev_err to standardise the error output. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Santiago Leon <santil@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ibmveth.c88
1 files changed, 47 insertions, 41 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 03eca309f326..98873678e597 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -57,12 +57,6 @@
57 57
58#undef DEBUG 58#undef DEBUG
59 59
60#define ibmveth_printk(fmt, args...) \
61 printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args)
62
63#define ibmveth_error_printk(fmt, args...) \
64 printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
65
66#ifdef DEBUG 60#ifdef DEBUG
67#define ibmveth_assert(expr) \ 61#define ibmveth_assert(expr) \
68 if(!(expr)) { \ 62 if(!(expr)) { \
@@ -558,7 +552,8 @@ static int ibmveth_open(struct net_device *netdev)
558 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 552 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
559 553
560 if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { 554 if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
561 ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); 555 netdev_err(netdev, "unable to allocate filter or buffer list "
556 "pages\n");
562 ibmveth_cleanup(adapter); 557 ibmveth_cleanup(adapter);
563 napi_disable(&adapter->napi); 558 napi_disable(&adapter->napi);
564 return -ENOMEM; 559 return -ENOMEM;
@@ -568,7 +563,7 @@ static int ibmveth_open(struct net_device *netdev)
568 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL); 563 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL);
569 564
570 if(!adapter->rx_queue.queue_addr) { 565 if(!adapter->rx_queue.queue_addr) {
571 ibmveth_error_printk("unable to allocate rx queue pages\n"); 566 netdev_err(netdev, "unable to allocate rx queue pages\n");
572 ibmveth_cleanup(adapter); 567 ibmveth_cleanup(adapter);
573 napi_disable(&adapter->napi); 568 napi_disable(&adapter->napi);
574 return -ENOMEM; 569 return -ENOMEM;
@@ -587,7 +582,8 @@ static int ibmveth_open(struct net_device *netdev)
587 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || 582 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
588 (dma_mapping_error(dev, adapter->filter_list_dma)) || 583 (dma_mapping_error(dev, adapter->filter_list_dma)) ||
589 (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { 584 (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
590 ibmveth_error_printk("unable to map filter or buffer list pages\n"); 585 netdev_err(netdev, "unable to map filter or buffer list "
586 "pages\n");
591 ibmveth_cleanup(adapter); 587 ibmveth_cleanup(adapter);
592 napi_disable(&adapter->napi); 588 napi_disable(&adapter->napi);
593 return -ENOMEM; 589 return -ENOMEM;
@@ -612,8 +608,10 @@ static int ibmveth_open(struct net_device *netdev)
612 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); 608 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
613 609
614 if(lpar_rc != H_SUCCESS) { 610 if(lpar_rc != H_SUCCESS) {
615 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); 611 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
616 ibmveth_error_printk("buffer TCE:0x%llx filter TCE:0x%llx rxq desc:0x%llx MAC:0x%llx\n", 612 lpar_rc);
613 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
614 "desc:0x%llx MAC:0x%llx\n",
617 adapter->buffer_list_dma, 615 adapter->buffer_list_dma,
618 adapter->filter_list_dma, 616 adapter->filter_list_dma,
619 rxq_desc.desc, 617 rxq_desc.desc,
@@ -627,7 +625,7 @@ static int ibmveth_open(struct net_device *netdev)
627 if(!adapter->rx_buff_pool[i].active) 625 if(!adapter->rx_buff_pool[i].active)
628 continue; 626 continue;
629 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { 627 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
630 ibmveth_error_printk("unable to alloc pool\n"); 628 netdev_err(netdev, "unable to alloc pool\n");
631 adapter->rx_buff_pool[i].active = 0; 629 adapter->rx_buff_pool[i].active = 0;
632 ibmveth_cleanup(adapter); 630 ibmveth_cleanup(adapter);
633 napi_disable(&adapter->napi); 631 napi_disable(&adapter->napi);
@@ -637,7 +635,8 @@ static int ibmveth_open(struct net_device *netdev)
637 635
638 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq); 636 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
639 if((rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, netdev)) != 0) { 637 if((rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
640 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc); 638 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
639 netdev->irq, rc);
641 do { 640 do {
642 rc = h_free_logical_lan(adapter->vdev->unit_address); 641 rc = h_free_logical_lan(adapter->vdev->unit_address);
643 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); 642 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
@@ -650,7 +649,7 @@ static int ibmveth_open(struct net_device *netdev)
650 adapter->bounce_buffer = 649 adapter->bounce_buffer =
651 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); 650 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
652 if (!adapter->bounce_buffer) { 651 if (!adapter->bounce_buffer) {
653 ibmveth_error_printk("unable to allocate bounce buffer\n"); 652 netdev_err(netdev, "unable to allocate bounce buffer\n");
654 ibmveth_cleanup(adapter); 653 ibmveth_cleanup(adapter);
655 napi_disable(&adapter->napi); 654 napi_disable(&adapter->napi);
656 return -ENOMEM; 655 return -ENOMEM;
@@ -659,7 +658,7 @@ static int ibmveth_open(struct net_device *netdev)
659 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, 658 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
660 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); 659 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
661 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { 660 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
662 ibmveth_error_printk("unable to map bounce buffer\n"); 661 netdev_err(netdev, "unable to map bounce buffer\n");
663 ibmveth_cleanup(adapter); 662 ibmveth_cleanup(adapter);
664 napi_disable(&adapter->napi); 663 napi_disable(&adapter->napi);
665 return -ENOMEM; 664 return -ENOMEM;
@@ -695,8 +694,8 @@ static int ibmveth_close(struct net_device *netdev)
695 694
696 if(lpar_rc != H_SUCCESS) 695 if(lpar_rc != H_SUCCESS)
697 { 696 {
698 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n", 697 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
699 lpar_rc); 698 "continuing with close\n", lpar_rc);
700 } 699 }
701 700
702 free_irq(netdev->irq, netdev); 701 free_irq(netdev->irq, netdev);
@@ -806,9 +805,9 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
806 set_attr, &ret_attr); 805 set_attr, &ret_attr);
807 806
808 if (ret != H_SUCCESS) { 807 if (ret != H_SUCCESS) {
809 ibmveth_error_printk("unable to change IPv4 checksum " 808 netdev_err(dev, "unable to change IPv4 checksum "
810 "offload settings. %d rc=%ld\n", 809 "offload settings. %d rc=%ld\n",
811 data, ret); 810 data, ret);
812 811
813 ret = h_illan_attributes(adapter->vdev->unit_address, 812 ret = h_illan_attributes(adapter->vdev->unit_address,
814 set_attr, clr_attr, &ret_attr); 813 set_attr, clr_attr, &ret_attr);
@@ -819,9 +818,9 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
819 clr_attr6, set_attr6, &ret_attr); 818 clr_attr6, set_attr6, &ret_attr);
820 819
821 if (ret6 != H_SUCCESS) { 820 if (ret6 != H_SUCCESS) {
822 ibmveth_error_printk("unable to change IPv6 checksum " 821 netdev_err(dev, "unable to change IPv6 checksum "
823 "offload settings. %d rc=%ld\n", 822 "offload settings. %d rc=%ld\n",
824 data, ret); 823 data, ret);
825 824
826 ret = h_illan_attributes(adapter->vdev->unit_address, 825 ret = h_illan_attributes(adapter->vdev->unit_address,
827 set_attr6, clr_attr6, 826 set_attr6, clr_attr6,
@@ -835,8 +834,9 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
835 rc1 = -EIO; 834 rc1 = -EIO;
836 } else { 835 } else {
837 rc1 = -EIO; 836 rc1 = -EIO;
838 ibmveth_error_printk("unable to change checksum offload settings." 837 netdev_err(dev, "unable to change checksum offload settings."
839 " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr); 838 " %d rc=%ld ret_attr=%lx\n", data, ret,
839 ret_attr);
840 } 840 }
841 841
842 if (restart) 842 if (restart)
@@ -952,8 +952,8 @@ static int ibmveth_send(struct ibmveth_adapter *adapter,
952 } while ((ret == H_BUSY) && (retry_count--)); 952 } while ((ret == H_BUSY) && (retry_count--));
953 953
954 if (ret != H_SUCCESS && ret != H_DROPPED) { 954 if (ret != H_SUCCESS && ret != H_DROPPED) {
955 ibmveth_error_printk("tx: h_send_logical_lan failed with " 955 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
956 "rc=%ld\n", ret); 956 "with rc=%ld\n", ret);
957 return 1; 957 return 1;
958 } 958 }
959 959
@@ -986,7 +986,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
986 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) && 986 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
987 skb_checksum_help(skb)) { 987 skb_checksum_help(skb)) {
988 988
989 ibmveth_error_printk("tx: failed to checksum packet\n"); 989 netdev_err(netdev, "tx: failed to checksum packet\n");
990 netdev->stats.tx_dropped++; 990 netdev->stats.tx_dropped++;
991 goto out; 991 goto out;
992 } 992 }
@@ -1082,7 +1082,7 @@ map_failed_frags:
1082 1082
1083map_failed: 1083map_failed:
1084 if (!firmware_has_feature(FW_FEATURE_CMO)) 1084 if (!firmware_has_feature(FW_FEATURE_CMO))
1085 ibmveth_error_printk("tx: unable to map xmit buffer\n"); 1085 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1086 adapter->tx_map_failed++; 1086 adapter->tx_map_failed++;
1087 skb_linearize(skb); 1087 skb_linearize(skb);
1088 force_bounce = 1; 1088 force_bounce = 1;
@@ -1198,7 +1198,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1198 IbmVethMcastDisableFiltering, 1198 IbmVethMcastDisableFiltering,
1199 0); 1199 0);
1200 if(lpar_rc != H_SUCCESS) { 1200 if(lpar_rc != H_SUCCESS) {
1201 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc); 1201 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1202 "entering promisc mode\n", lpar_rc);
1202 } 1203 }
1203 } else { 1204 } else {
1204 struct netdev_hw_addr *ha; 1205 struct netdev_hw_addr *ha;
@@ -1209,7 +1210,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1209 IbmVethMcastClearFilterTable, 1210 IbmVethMcastClearFilterTable,
1210 0); 1211 0);
1211 if(lpar_rc != H_SUCCESS) { 1212 if(lpar_rc != H_SUCCESS) {
1212 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc); 1213 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1214 "attempting to clear filter table\n",
1215 lpar_rc);
1213 } 1216 }
1214 /* add the addresses to the filter table */ 1217 /* add the addresses to the filter table */
1215 netdev_for_each_mc_addr(ha, netdev) { 1218 netdev_for_each_mc_addr(ha, netdev) {
@@ -1220,7 +1223,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1220 IbmVethMcastAddFilter, 1223 IbmVethMcastAddFilter,
1221 mcast_addr); 1224 mcast_addr);
1222 if(lpar_rc != H_SUCCESS) { 1225 if(lpar_rc != H_SUCCESS) {
1223 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc); 1226 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1227 "when adding an entry to the filter "
1228 "table\n", lpar_rc);
1224 } 1229 }
1225 } 1230 }
1226 1231
@@ -1229,7 +1234,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1229 IbmVethMcastEnableFiltering, 1234 IbmVethMcastEnableFiltering,
1230 0); 1235 0);
1231 if(lpar_rc != H_SUCCESS) { 1236 if(lpar_rc != H_SUCCESS) {
1232 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc); 1237 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1238 "enabling filtering\n", lpar_rc);
1233 } 1239 }
1234 } 1240 }
1235} 1241}
@@ -1361,17 +1367,15 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1361 mac_addr_p = (unsigned char *) vio_get_attribute(dev, 1367 mac_addr_p = (unsigned char *) vio_get_attribute(dev,
1362 VETH_MAC_ADDR, NULL); 1368 VETH_MAC_ADDR, NULL);
1363 if(!mac_addr_p) { 1369 if(!mac_addr_p) {
1364 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR " 1370 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1365 "attribute\n", __FILE__, __LINE__);
1366 return 0; 1371 return 0;
1367 } 1372 }
1368 1373
1369 mcastFilterSize_p = (unsigned int *) vio_get_attribute(dev, 1374 mcastFilterSize_p = (unsigned int *) vio_get_attribute(dev,
1370 VETH_MCAST_FILTER_SIZE, NULL); 1375 VETH_MCAST_FILTER_SIZE, NULL);
1371 if(!mcastFilterSize_p) { 1376 if(!mcastFilterSize_p) {
1372 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find " 1377 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1373 "VETH_MCAST_FILTER_SIZE attribute\n", 1378 "attribute\n");
1374 __FILE__, __LINE__);
1375 return 0; 1379 return 0;
1376 } 1380 }
1377 1381
@@ -1501,7 +1505,8 @@ const char * buf, size_t count)
1501 if (value && !pool->active) { 1505 if (value && !pool->active) {
1502 if (netif_running(netdev)) { 1506 if (netif_running(netdev)) {
1503 if(ibmveth_alloc_buffer_pool(pool)) { 1507 if(ibmveth_alloc_buffer_pool(pool)) {
1504 ibmveth_error_printk("unable to alloc pool\n"); 1508 netdev_err(netdev,
1509 "unable to alloc pool\n");
1505 return -ENOMEM; 1510 return -ENOMEM;
1506 } 1511 }
1507 pool->active = 1; 1512 pool->active = 1;
@@ -1527,7 +1532,7 @@ const char * buf, size_t count)
1527 } 1532 }
1528 1533
1529 if (i == IbmVethNumBufferPools) { 1534 if (i == IbmVethNumBufferPools) {
1530 ibmveth_error_printk("no active pool >= MTU\n"); 1535 netdev_err(netdev, "no active pool >= MTU\n");
1531 return -EPERM; 1536 return -EPERM;
1532 } 1537 }
1533 1538
@@ -1635,7 +1640,8 @@ static struct vio_driver ibmveth_driver = {
1635 1640
1636static int __init ibmveth_module_init(void) 1641static int __init ibmveth_module_init(void)
1637{ 1642{
1638 ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version); 1643 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1644 ibmveth_driver_string, ibmveth_driver_version);
1639 1645
1640 return vio_register_driver(&ibmveth_driver); 1646 return vio_register_driver(&ibmveth_driver);
1641} 1647}