aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2010-07-22 08:33:31 -0400
committerDavid S. Miller <davem@davemloft.net>2010-07-26 16:15:21 -0400
commiteddc5fbd80999444dd32aca3c90290c9d64da396 (patch)
tree7aa1fc3333a3c530b80f3a29e3939b72fb4ce9de /drivers/net
parent40b53d8a4edca018b8edb2fa99c5326642d450fa (diff)
drivers/net/qla3xxx.c: Update logging message style
Use pr_<level> Use netdev_<level> Use netif_<level> Remove #define PFX Improve a couple of loops to avoid deep indentation. Compile tested only $ size drivers/net/qla3xxx.o.* text data bss dec hex filename 51603 212 13864 65679 1008f drivers/net/qla3xxx.o.old 50413 212 13864 64489 fbe9 drivers/net/qla3xxx.o.new Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/qla3xxx.c524
1 files changed, 200 insertions, 324 deletions
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 54ebb65ada18..74debf167c52 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -5,6 +5,8 @@
5 * See LICENSE.qla3xxx for copyright and licensing details. 5 * See LICENSE.qla3xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
8#include <linux/kernel.h> 10#include <linux/kernel.h>
9#include <linux/init.h> 11#include <linux/init.h>
10#include <linux/types.h> 12#include <linux/types.h>
@@ -39,11 +41,13 @@
39#define DRV_NAME "qla3xxx" 41#define DRV_NAME "qla3xxx"
40#define DRV_STRING "QLogic ISP3XXX Network Driver" 42#define DRV_STRING "QLogic ISP3XXX Network Driver"
41#define DRV_VERSION "v2.03.00-k5" 43#define DRV_VERSION "v2.03.00-k5"
42#define PFX DRV_NAME " "
43 44
44static const char ql3xxx_driver_name[] = DRV_NAME; 45static const char ql3xxx_driver_name[] = DRV_NAME;
45static const char ql3xxx_driver_version[] = DRV_VERSION; 46static const char ql3xxx_driver_version[] = DRV_VERSION;
46 47
48#define TIMED_OUT_MSG \
49"Timed out waiting for management port to get free before issuing command\n"
50
47MODULE_AUTHOR("QLogic Corporation"); 51MODULE_AUTHOR("QLogic Corporation");
48MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); 52MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
49MODULE_LICENSE("GPL"); 53MODULE_LICENSE("GPL");
@@ -139,27 +143,22 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
139{ 143{
140 int i = 0; 144 int i = 0;
141 145
142 while (1) { 146 while (i < 10) {
143 if (!ql_sem_lock(qdev, 147 if (i)
144 QL_DRVR_SEM_MASK, 148 ssleep(1);
145 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 149
146 * 2) << 1)) { 150 if (ql_sem_lock(qdev,
147 if (i < 10) { 151 QL_DRVR_SEM_MASK,
148 ssleep(1); 152 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
149 i++; 153 * 2) << 1)) {
150 } else { 154 netdev_printk(KERN_DEBUG, qdev->ndev,
151 printk(KERN_ERR PFX "%s: Timed out waiting for " 155 "driver lock acquired\n");
152 "driver lock...\n",
153 qdev->ndev->name);
154 return 0;
155 }
156 } else {
157 printk(KERN_DEBUG PFX
158 "%s: driver lock acquired.\n",
159 qdev->ndev->name);
160 return 1; 156 return 1;
161 } 157 }
162 } 158 }
159
160 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
161 return 0;
163} 162}
164 163
165static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) 164static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
@@ -308,8 +307,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
308 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 307 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
309 qdev->lrg_buffer_len); 308 qdev->lrg_buffer_len);
310 if (unlikely(!lrg_buf_cb->skb)) { 309 if (unlikely(!lrg_buf_cb->skb)) {
311 printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n", 310 netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n");
312 qdev->ndev->name);
313 qdev->lrg_buf_skb_check++; 311 qdev->lrg_buf_skb_check++;
314 } else { 312 } else {
315 /* 313 /*
@@ -324,8 +322,9 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
324 PCI_DMA_FROMDEVICE); 322 PCI_DMA_FROMDEVICE);
325 err = pci_dma_mapping_error(qdev->pdev, map); 323 err = pci_dma_mapping_error(qdev->pdev, map);
326 if(err) { 324 if(err) {
327 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 325 netdev_err(qdev->ndev,
328 qdev->ndev->name, err); 326 "PCI mapping failed with error: %d\n",
327 err);
329 dev_kfree_skb(lrg_buf_cb->skb); 328 dev_kfree_skb(lrg_buf_cb->skb);
330 lrg_buf_cb->skb = NULL; 329 lrg_buf_cb->skb = NULL;
331 330
@@ -556,8 +555,7 @@ static int ql_get_nvram_params(struct ql3_adapter *qdev)
556 if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, 555 if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
557 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 556 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
558 2) << 10)) { 557 2) << 10)) {
559 printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n", 558 pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
560 __func__);
561 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 559 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
562 return -1; 560 return -1;
563 } 561 }
@@ -570,8 +568,8 @@ static int ql_get_nvram_params(struct ql3_adapter *qdev)
570 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); 568 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
571 569
572 if (checksum != 0) { 570 if (checksum != 0) {
573 printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n", 571 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
574 qdev->ndev->name, checksum); 572 checksum);
575 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 573 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
576 return -1; 574 return -1;
577 } 575 }
@@ -668,11 +666,7 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
668 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 666 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
669 667
670 if (ql_wait_for_mii_ready(qdev)) { 668 if (ql_wait_for_mii_ready(qdev)) {
671 if (netif_msg_link(qdev)) 669 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
672 printk(KERN_WARNING PFX
673 "%s Timed out waiting for management port to "
674 "get free before issuing command.\n",
675 qdev->ndev->name);
676 return -1; 670 return -1;
677 } 671 }
678 672
@@ -683,11 +677,7 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
683 677
684 /* Wait for write to complete 9/10/04 SJP */ 678 /* Wait for write to complete 9/10/04 SJP */
685 if (ql_wait_for_mii_ready(qdev)) { 679 if (ql_wait_for_mii_ready(qdev)) {
686 if (netif_msg_link(qdev)) 680 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
687 printk(KERN_WARNING PFX
688 "%s: Timed out waiting for management port to "
689 "get free before issuing command.\n",
690 qdev->ndev->name);
691 return -1; 681 return -1;
692 } 682 }
693 683
@@ -708,11 +698,7 @@ static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
708 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 698 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
709 699
710 if (ql_wait_for_mii_ready(qdev)) { 700 if (ql_wait_for_mii_ready(qdev)) {
711 if (netif_msg_link(qdev)) 701 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
712 printk(KERN_WARNING PFX
713 "%s: Timed out waiting for management port to "
714 "get free before issuing command.\n",
715 qdev->ndev->name);
716 return -1; 702 return -1;
717 } 703 }
718 704
@@ -727,11 +713,7 @@ static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
727 713
728 /* Wait for the read to complete */ 714 /* Wait for the read to complete */
729 if (ql_wait_for_mii_ready(qdev)) { 715 if (ql_wait_for_mii_ready(qdev)) {
730 if (netif_msg_link(qdev)) 716 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
731 printk(KERN_WARNING PFX
732 "%s: Timed out waiting for management port to "
733 "get free after issuing command.\n",
734 qdev->ndev->name);
735 return -1; 717 return -1;
736 } 718 }
737 719
@@ -752,11 +734,7 @@ static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
752 ql_mii_disable_scan_mode(qdev); 734 ql_mii_disable_scan_mode(qdev);
753 735
754 if (ql_wait_for_mii_ready(qdev)) { 736 if (ql_wait_for_mii_ready(qdev)) {
755 if (netif_msg_link(qdev)) 737 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
756 printk(KERN_WARNING PFX
757 "%s: Timed out waiting for management port to "
758 "get free before issuing command.\n",
759 qdev->ndev->name);
760 return -1; 738 return -1;
761 } 739 }
762 740
@@ -767,11 +745,7 @@ static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
767 745
768 /* Wait for write to complete. */ 746 /* Wait for write to complete. */
769 if (ql_wait_for_mii_ready(qdev)) { 747 if (ql_wait_for_mii_ready(qdev)) {
770 if (netif_msg_link(qdev)) 748 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
771 printk(KERN_WARNING PFX
772 "%s: Timed out waiting for management port to "
773 "get free before issuing command.\n",
774 qdev->ndev->name);
775 return -1; 749 return -1;
776 } 750 }
777 751
@@ -789,11 +763,7 @@ static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
789 ql_mii_disable_scan_mode(qdev); 763 ql_mii_disable_scan_mode(qdev);
790 764
791 if (ql_wait_for_mii_ready(qdev)) { 765 if (ql_wait_for_mii_ready(qdev)) {
792 if (netif_msg_link(qdev)) 766 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
793 printk(KERN_WARNING PFX
794 "%s: Timed out waiting for management port to "
795 "get free before issuing command.\n",
796 qdev->ndev->name);
797 return -1; 767 return -1;
798 } 768 }
799 769
@@ -808,11 +778,7 @@ static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
808 778
809 /* Wait for the read to complete */ 779 /* Wait for the read to complete */
810 if (ql_wait_for_mii_ready(qdev)) { 780 if (ql_wait_for_mii_ready(qdev)) {
811 if (netif_msg_link(qdev)) 781 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
812 printk(KERN_WARNING PFX
813 "%s: Timed out waiting for management port to "
814 "get free before issuing command.\n",
815 qdev->ndev->name);
816 return -1; 782 return -1;
817 } 783 }
818 784
@@ -898,7 +864,7 @@ static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
898 864
899static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) 865static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
900{ 866{
901 printk(KERN_INFO "%s: enabling Agere specific PHY\n", qdev->ndev->name); 867 netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
902 /* power down device bit 11 = 1 */ 868 /* power down device bit 11 = 1 */
903 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); 869 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
904 /* enable diagnostic mode bit 2 = 1 */ 870 /* enable diagnostic mode bit 2 = 1 */
@@ -952,12 +918,12 @@ static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
952 918
953 /* Scan table for this PHY */ 919 /* Scan table for this PHY */
954 for(i = 0; i < MAX_PHY_DEV_TYPES; i++) { 920 for(i = 0; i < MAX_PHY_DEV_TYPES; i++) {
955 if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel)) 921 if ((oui == PHY_DEVICES[i].phyIdOUI) &&
956 { 922 (model == PHY_DEVICES[i].phyIdModel)) {
957 result = PHY_DEVICES[i].phyDevice; 923 result = PHY_DEVICES[i].phyDevice;
958 924
959 printk(KERN_INFO "%s: Phy: %s\n", 925 netdev_info(qdev->ndev, "Phy: %s\n",
960 qdev->ndev->name, PHY_DEVICES[i].name); 926 PHY_DEVICES[i].name);
961 927
962 break; 928 break;
963 } 929 }
@@ -1041,15 +1007,13 @@ static int PHY_Setup(struct ql3_adapter *qdev)
1041 /* Determine the PHY we are using by reading the ID's */ 1007 /* Determine the PHY we are using by reading the ID's */
1042 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1); 1008 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
1043 if(err != 0) { 1009 if(err != 0) {
1044 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n", 1010 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
1045 qdev->ndev->name);
1046 return err; 1011 return err;
1047 } 1012 }
1048 1013
1049 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2); 1014 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
1050 if(err != 0) { 1015 if(err != 0) {
1051 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n", 1016 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
1052 qdev->ndev->name);
1053 return err; 1017 return err;
1054 } 1018 }
1055 1019
@@ -1066,15 +1030,14 @@ static int PHY_Setup(struct ql3_adapter *qdev)
1066 1030
1067 err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr); 1031 err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
1068 if(err != 0) { 1032 if(err != 0) {
1069 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", 1033 netdev_err(qdev->ndev,
1070 qdev->ndev->name); 1034 "Could not read from reg PHY_ID_0_REG after Agere detected\n");
1071 return err; 1035 return err;
1072 } 1036 }
1073 1037
1074 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr); 1038 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
1075 if(err != 0) { 1039 if(err != 0) {
1076 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", 1040 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
1077 qdev->ndev->name);
1078 return err; 1041 return err;
1079 } 1042 }
1080 1043
@@ -1090,7 +1053,7 @@ static int PHY_Setup(struct ql3_adapter *qdev)
1090 /* need this here so address gets changed */ 1053 /* need this here so address gets changed */
1091 phyAgereSpecificInit(qdev, miiAddr); 1054 phyAgereSpecificInit(qdev, miiAddr);
1092 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { 1055 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1093 printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name); 1056 netdev_err(qdev->ndev, "PHY is unknown\n");
1094 return -EIO; 1057 return -EIO;
1095 } 1058 }
1096 1059
@@ -1250,18 +1213,11 @@ static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1250 1213
1251 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1214 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1252 if (temp & bitToCheck) { 1215 if (temp & bitToCheck) {
1253 if (netif_msg_link(qdev)) 1216 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
1254 printk(KERN_INFO PFX
1255 "%s: Auto-Negotiate complete.\n",
1256 qdev->ndev->name);
1257 return 1; 1217 return 1;
1258 } else {
1259 if (netif_msg_link(qdev))
1260 printk(KERN_WARNING PFX
1261 "%s: Auto-Negotiate incomplete.\n",
1262 qdev->ndev->name);
1263 return 0;
1264 } 1218 }
1219 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
1220 return 0;
1265} 1221}
1266 1222
1267/* 1223/*
@@ -1387,16 +1343,13 @@ static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1387 1343
1388 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1344 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1389 if (temp & bitToCheck) { 1345 if (temp & bitToCheck) {
1390 if (netif_msg_link(qdev)) 1346 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1391 printk(KERN_DEBUG PFX 1347 "not link master\n");
1392 "%s: is not link master.\n", qdev->ndev->name);
1393 return 0; 1348 return 0;
1394 } else {
1395 if (netif_msg_link(qdev))
1396 printk(KERN_DEBUG PFX
1397 "%s: is link master.\n", qdev->ndev->name);
1398 return 1;
1399 } 1349 }
1350
1351 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
1352 return 1;
1400} 1353}
1401 1354
1402static void ql_phy_reset_ex(struct ql3_adapter *qdev) 1355static void ql_phy_reset_ex(struct ql3_adapter *qdev)
@@ -1518,8 +1471,7 @@ static int ql_port_start(struct ql3_adapter *qdev)
1518 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1471 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1519 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1472 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1520 2) << 7)) { 1473 2) << 7)) {
1521 printk(KERN_ERR "%s: Could not get hw lock for GIO\n", 1474 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
1522 qdev->ndev->name);
1523 return -1; 1475 return -1;
1524 } 1476 }
1525 1477
@@ -1545,11 +1497,8 @@ static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1545 if (!ql_auto_neg_error(qdev)) { 1497 if (!ql_auto_neg_error(qdev)) {
1546 if (test_bit(QL_LINK_MASTER,&qdev->flags)) { 1498 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1547 /* configure the MAC */ 1499 /* configure the MAC */
1548 if (netif_msg_link(qdev)) 1500 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1549 printk(KERN_DEBUG PFX 1501 "Configuring link\n");
1550 "%s: Configuring link.\n",
1551 qdev->ndev->
1552 name);
1553 ql_mac_cfg_soft_reset(qdev, 1); 1502 ql_mac_cfg_soft_reset(qdev, 1);
1554 ql_mac_cfg_gig(qdev, 1503 ql_mac_cfg_gig(qdev,
1555 (ql_get_link_speed 1504 (ql_get_link_speed
@@ -1564,34 +1513,24 @@ static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1564 ql_mac_cfg_soft_reset(qdev, 0); 1513 ql_mac_cfg_soft_reset(qdev, 0);
1565 1514
1566 /* enable the MAC */ 1515 /* enable the MAC */
1567 if (netif_msg_link(qdev)) 1516 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1568 printk(KERN_DEBUG PFX 1517 "Enabling mac\n");
1569 "%s: Enabling mac.\n",
1570 qdev->ndev->
1571 name);
1572 ql_mac_enable(qdev, 1); 1518 ql_mac_enable(qdev, 1);
1573 } 1519 }
1574 1520
1575 qdev->port_link_state = LS_UP; 1521 qdev->port_link_state = LS_UP;
1576 netif_start_queue(qdev->ndev); 1522 netif_start_queue(qdev->ndev);
1577 netif_carrier_on(qdev->ndev); 1523 netif_carrier_on(qdev->ndev);
1578 if (netif_msg_link(qdev)) 1524 netif_info(qdev, link, qdev->ndev,
1579 printk(KERN_INFO PFX 1525 "Link is up at %d Mbps, %s duplex\n",
1580 "%s: Link is up at %d Mbps, %s duplex.\n", 1526 ql_get_link_speed(qdev),
1581 qdev->ndev->name, 1527 ql_is_link_full_dup(qdev) ? "full" : "half");
1582 ql_get_link_speed(qdev),
1583 ql_is_link_full_dup(qdev)
1584 ? "full" : "half");
1585 1528
1586 } else { /* Remote error detected */ 1529 } else { /* Remote error detected */
1587 1530
1588 if (test_bit(QL_LINK_MASTER,&qdev->flags)) { 1531 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1589 if (netif_msg_link(qdev)) 1532 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1590 printk(KERN_DEBUG PFX 1533 "Remote error detected. Calling ql_port_start()\n");
1591 "%s: Remote error detected. "
1592 "Calling ql_port_start().\n",
1593 qdev->ndev->
1594 name);
1595 /* 1534 /*
1596 * ql_port_start() is shared code and needs 1535 * ql_port_start() is shared code and needs
1597 * to lock the PHY on it's own. 1536 * to lock the PHY on it's own.
@@ -1620,15 +1559,13 @@ static void ql_link_state_machine_work(struct work_struct *work)
1620 curr_link_state = ql_get_link_state(qdev); 1559 curr_link_state = ql_get_link_state(qdev);
1621 1560
1622 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) { 1561 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
1623 if (netif_msg_link(qdev)) 1562 netif_info(qdev, link, qdev->ndev,
1624 printk(KERN_INFO PFX 1563 "Reset in progress, skip processing link state\n");
1625 "%s: Reset in progress, skip processing link "
1626 "state.\n", qdev->ndev->name);
1627 1564
1628 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1565 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1629 1566
1630 /* Restart timer on 2 second interval. */ 1567 /* Restart timer on 2 second interval. */
1631 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);\ 1568 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1632 1569
1633 return; 1570 return;
1634 } 1571 }
@@ -1643,9 +1580,7 @@ static void ql_link_state_machine_work(struct work_struct *work)
1643 1580
1644 case LS_DOWN: 1581 case LS_DOWN:
1645 if (curr_link_state == LS_UP) { 1582 if (curr_link_state == LS_UP) {
1646 if (netif_msg_link(qdev)) 1583 netif_info(qdev, link, qdev->ndev, "Link is up\n");
1647 printk(KERN_INFO PFX "%s: Link is up.\n",
1648 qdev->ndev->name);
1649 if (ql_is_auto_neg_complete(qdev)) 1584 if (ql_is_auto_neg_complete(qdev))
1650 ql_finish_auto_neg(qdev); 1585 ql_finish_auto_neg(qdev);
1651 1586
@@ -1662,9 +1597,7 @@ static void ql_link_state_machine_work(struct work_struct *work)
1662 * back up 1597 * back up
1663 */ 1598 */
1664 if (curr_link_state == LS_DOWN) { 1599 if (curr_link_state == LS_DOWN) {
1665 if (netif_msg_link(qdev)) 1600 netif_info(qdev, link, qdev->ndev, "Link is down\n");
1666 printk(KERN_INFO PFX "%s: Link is down.\n",
1667 qdev->ndev->name);
1668 qdev->port_link_state = LS_DOWN; 1601 qdev->port_link_state = LS_DOWN;
1669 } 1602 }
1670 if (ql_link_down_detect(qdev)) 1603 if (ql_link_down_detect(qdev))
@@ -1888,9 +1821,8 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1888 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 1821 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
1889 qdev->lrg_buffer_len); 1822 qdev->lrg_buffer_len);
1890 if (unlikely(!lrg_buf_cb->skb)) { 1823 if (unlikely(!lrg_buf_cb->skb)) {
1891 printk(KERN_DEBUG PFX 1824 netdev_printk(KERN_DEBUG, qdev->ndev,
1892 "%s: Failed netdev_alloc_skb().\n", 1825 "Failed netdev_alloc_skb()\n");
1893 qdev->ndev->name);
1894 break; 1826 break;
1895 } else { 1827 } else {
1896 /* 1828 /*
@@ -1906,8 +1838,9 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1906 1838
1907 err = pci_dma_mapping_error(qdev->pdev, map); 1839 err = pci_dma_mapping_error(qdev->pdev, map);
1908 if(err) { 1840 if(err) {
1909 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 1841 netdev_err(qdev->ndev,
1910 qdev->ndev->name, err); 1842 "PCI mapping failed with error: %d\n",
1843 err);
1911 dev_kfree_skb(lrg_buf_cb->skb); 1844 dev_kfree_skb(lrg_buf_cb->skb);
1912 lrg_buf_cb->skb = NULL; 1845 lrg_buf_cb->skb = NULL;
1913 break; 1846 break;
@@ -2012,14 +1945,16 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
2012 int retval = 0; 1945 int retval = 0;
2013 1946
2014 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1947 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
2015 printk(KERN_WARNING "Frame short but, frame was padded and sent.\n"); 1948 netdev_warn(qdev->ndev,
1949 "Frame too short but it was padded and sent\n");
2016 } 1950 }
2017 1951
2018 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1952 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
2019 1953
2020 /* Check the transmit response flags for any errors */ 1954 /* Check the transmit response flags for any errors */
2021 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1955 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
2022 printk(KERN_ERR "Frame too short to be legal, frame not sent.\n"); 1956 netdev_err(qdev->ndev,
1957 "Frame too short to be legal, frame not sent\n");
2023 1958
2024 qdev->ndev->stats.tx_errors++; 1959 qdev->ndev->stats.tx_errors++;
2025 retval = -EIO; 1960 retval = -EIO;
@@ -2027,7 +1962,8 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
2027 } 1962 }
2028 1963
2029 if(tx_cb->seg_count == 0) { 1964 if(tx_cb->seg_count == 0) {
2030 printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id); 1965 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
1966 mac_rsp->transaction_id);
2031 1967
2032 qdev->ndev->stats.tx_errors++; 1968 qdev->ndev->stats.tx_errors++;
2033 retval = -EIO; 1969 retval = -EIO;
@@ -2177,12 +2113,11 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2177 if (checksum & 2113 if (checksum &
2178 (IB_IP_IOCB_RSP_3032_ICE | 2114 (IB_IP_IOCB_RSP_3032_ICE |
2179 IB_IP_IOCB_RSP_3032_CE)) { 2115 IB_IP_IOCB_RSP_3032_CE)) {
2180 printk(KERN_ERR 2116 netdev_err(ndev,
2181 "%s: Bad checksum for this %s packet, checksum = %x.\n", 2117 "%s: Bad checksum for this %s packet, checksum = %x\n",
2182 __func__, 2118 __func__,
2183 ((checksum & 2119 ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
2184 IB_IP_IOCB_RSP_3032_TCP) ? "TCP" : 2120 "TCP" : "UDP"), checksum);
2185 "UDP"),checksum);
2186 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || 2121 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
2187 (checksum & IB_IP_IOCB_RSP_3032_UDP && 2122 (checksum & IB_IP_IOCB_RSP_3032_UDP &&
2188 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { 2123 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
@@ -2245,18 +2180,15 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2245 default: 2180 default:
2246 { 2181 {
2247 u32 *tmp = (u32 *) net_rsp; 2182 u32 *tmp = (u32 *) net_rsp;
2248 printk(KERN_ERR PFX 2183 netdev_err(ndev,
2249 "%s: Hit default case, not " 2184 "Hit default case, not handled!\n"
2250 "handled!\n" 2185 " dropping the packet, opcode = %x\n"
2251 " dropping the packet, opcode = " 2186 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
2252 "%x.\n", 2187 net_rsp->opcode,
2253 ndev->name, net_rsp->opcode); 2188 (unsigned long int)tmp[0],
2254 printk(KERN_ERR PFX 2189 (unsigned long int)tmp[1],
2255 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", 2190 (unsigned long int)tmp[2],
2256 (unsigned long int)tmp[0], 2191 (unsigned long int)tmp[3]);
2257 (unsigned long int)tmp[1],
2258 (unsigned long int)tmp[2],
2259 (unsigned long int)tmp[3]);
2260 } 2192 }
2261 } 2193 }
2262 2194
@@ -2328,18 +2260,18 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2328 var = 2260 var =
2329 ql_read_page0_reg_l(qdev, 2261 ql_read_page0_reg_l(qdev,
2330 &port_regs->PortFatalErrStatus); 2262 &port_regs->PortFatalErrStatus);
2331 printk(KERN_WARNING PFX 2263 netdev_warn(ndev,
2332 "%s: Resetting chip. PortFatalErrStatus " 2264 "Resetting chip. PortFatalErrStatus register = 0x%x\n",
2333 "register = 0x%x\n", ndev->name, var); 2265 var);
2334 set_bit(QL_RESET_START,&qdev->flags) ; 2266 set_bit(QL_RESET_START,&qdev->flags) ;
2335 } else { 2267 } else {
2336 /* 2268 /*
2337 * Soft Reset Requested. 2269 * Soft Reset Requested.
2338 */ 2270 */
2339 set_bit(QL_RESET_PER_SCSI,&qdev->flags) ; 2271 set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
2340 printk(KERN_ERR PFX 2272 netdev_err(ndev,
2341 "%s: Another function issued a reset to the " 2273 "Another function issued a reset to the chip. ISR value = %x\n",
2342 "chip. ISR value = %x.\n", ndev->name, value); 2274 value);
2343 } 2275 }
2344 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); 2276 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2345 spin_unlock(&qdev->adapter_lock); 2277 spin_unlock(&qdev->adapter_lock);
@@ -2438,8 +2370,8 @@ static int ql_send_map(struct ql3_adapter *qdev,
2438 2370
2439 err = pci_dma_mapping_error(qdev->pdev, map); 2371 err = pci_dma_mapping_error(qdev->pdev, map);
2440 if(err) { 2372 if(err) {
2441 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 2373 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
2442 qdev->ndev->name, err); 2374 err);
2443 2375
2444 return NETDEV_TX_BUSY; 2376 return NETDEV_TX_BUSY;
2445 } 2377 }
@@ -2472,8 +2404,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
2472 err = pci_dma_mapping_error(qdev->pdev, map); 2404 err = pci_dma_mapping_error(qdev->pdev, map);
2473 if(err) { 2405 if(err) {
2474 2406
2475 printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", 2407 netdev_err(qdev->ndev,
2476 qdev->ndev->name, err); 2408 "PCI mapping outbound address list with error: %d\n",
2409 err);
2477 goto map_error; 2410 goto map_error;
2478 } 2411 }
2479 2412
@@ -2498,8 +2431,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
2498 2431
2499 err = pci_dma_mapping_error(qdev->pdev, map); 2432 err = pci_dma_mapping_error(qdev->pdev, map);
2500 if(err) { 2433 if(err) {
2501 printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", 2434 netdev_err(qdev->ndev,
2502 qdev->ndev->name, err); 2435 "PCI mapping frags failed with error: %d\n",
2436 err);
2503 goto map_error; 2437 goto map_error;
2504 } 2438 }
2505 2439
@@ -2582,7 +2516,7 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2582 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; 2516 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2583 if((tx_cb->seg_count = ql_get_seg_count(qdev, 2517 if((tx_cb->seg_count = ql_get_seg_count(qdev,
2584 (skb_shinfo(skb)->nr_frags))) == -1) { 2518 (skb_shinfo(skb)->nr_frags))) == -1) {
2585 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); 2519 netdev_err(ndev, "%s: invalid segment count!\n", __func__);
2586 return NETDEV_TX_OK; 2520 return NETDEV_TX_OK;
2587 } 2521 }
2588 2522
@@ -2599,7 +2533,7 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2599 ql_hw_csum_setup(skb, mac_iocb_ptr); 2533 ql_hw_csum_setup(skb, mac_iocb_ptr);
2600 2534
2601 if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) { 2535 if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
2602 printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__); 2536 netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
2603 return NETDEV_TX_BUSY; 2537 return NETDEV_TX_BUSY;
2604 } 2538 }
2605 2539
@@ -2612,9 +2546,9 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2612 &port_regs->CommonRegs.reqQProducerIndex, 2546 &port_regs->CommonRegs.reqQProducerIndex,
2613 qdev->req_producer_index); 2547 qdev->req_producer_index);
2614 2548
2615 if (netif_msg_tx_queued(qdev)) 2549 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
2616 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n", 2550 "tx queued, slot %d, len %d\n",
2617 ndev->name, qdev->req_producer_index, skb->len); 2551 qdev->req_producer_index, skb->len);
2618 2552
2619 atomic_dec(&qdev->tx_count); 2553 atomic_dec(&qdev->tx_count);
2620 return NETDEV_TX_OK; 2554 return NETDEV_TX_OK;
@@ -2632,8 +2566,7 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2632 2566
2633 if ((qdev->req_q_virt_addr == NULL) || 2567 if ((qdev->req_q_virt_addr == NULL) ||
2634 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { 2568 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2635 printk(KERN_ERR PFX "%s: reqQ failed.\n", 2569 netdev_err(qdev->ndev, "reqQ failed\n");
2636 qdev->ndev->name);
2637 return -ENOMEM; 2570 return -ENOMEM;
2638 } 2571 }
2639 2572
@@ -2646,9 +2579,7 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2646 2579
2647 if ((qdev->rsp_q_virt_addr == NULL) || 2580 if ((qdev->rsp_q_virt_addr == NULL) ||
2648 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { 2581 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2649 printk(KERN_ERR PFX 2582 netdev_err(qdev->ndev, "rspQ allocation failed\n");
2650 "%s: rspQ allocation failed\n",
2651 qdev->ndev->name);
2652 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, 2583 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2653 qdev->req_q_virt_addr, 2584 qdev->req_q_virt_addr,
2654 qdev->req_q_phy_addr); 2585 qdev->req_q_phy_addr);
@@ -2663,8 +2594,7 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2663static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) 2594static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2664{ 2595{
2665 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) { 2596 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
2666 printk(KERN_INFO PFX 2597 netdev_info(qdev->ndev, "Already done\n");
2667 "%s: Already done.\n", qdev->ndev->name);
2668 return; 2598 return;
2669 } 2599 }
2670 2600
@@ -2695,8 +2625,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2695 2625
2696 qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL); 2626 qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
2697 if (qdev->lrg_buf == NULL) { 2627 if (qdev->lrg_buf == NULL) {
2698 printk(KERN_ERR PFX 2628 netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n");
2699 "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
2700 return -ENOMEM; 2629 return -ENOMEM;
2701 } 2630 }
2702 2631
@@ -2706,8 +2635,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2706 &qdev->lrg_buf_q_alloc_phy_addr); 2635 &qdev->lrg_buf_q_alloc_phy_addr);
2707 2636
2708 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { 2637 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2709 printk(KERN_ERR PFX 2638 netdev_err(qdev->ndev, "lBufQ failed\n");
2710 "%s: lBufQ failed\n", qdev->ndev->name);
2711 return -ENOMEM; 2639 return -ENOMEM;
2712 } 2640 }
2713 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; 2641 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
@@ -2727,9 +2655,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2727 &qdev->small_buf_q_alloc_phy_addr); 2655 &qdev->small_buf_q_alloc_phy_addr);
2728 2656
2729 if (qdev->small_buf_q_alloc_virt_addr == NULL) { 2657 if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2730 printk(KERN_ERR PFX 2658 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
2731 "%s: Small Buffer Queue allocation failed.\n",
2732 qdev->ndev->name);
2733 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, 2659 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2734 qdev->lrg_buf_q_alloc_virt_addr, 2660 qdev->lrg_buf_q_alloc_virt_addr,
2735 qdev->lrg_buf_q_alloc_phy_addr); 2661 qdev->lrg_buf_q_alloc_phy_addr);
@@ -2745,8 +2671,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2745static void ql_free_buffer_queues(struct ql3_adapter *qdev) 2671static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2746{ 2672{
2747 if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) { 2673 if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
2748 printk(KERN_INFO PFX 2674 netdev_info(qdev->ndev, "Already done\n");
2749 "%s: Already done.\n", qdev->ndev->name);
2750 return; 2675 return;
2751 } 2676 }
2752 if(qdev->lrg_buf) kfree(qdev->lrg_buf); 2677 if(qdev->lrg_buf) kfree(qdev->lrg_buf);
@@ -2783,9 +2708,7 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2783 &qdev->small_buf_phy_addr); 2708 &qdev->small_buf_phy_addr);
2784 2709
2785 if (qdev->small_buf_virt_addr == NULL) { 2710 if (qdev->small_buf_virt_addr == NULL) {
2786 printk(KERN_ERR PFX 2711 netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
2787 "%s: Failed to get small buffer memory.\n",
2788 qdev->ndev->name);
2789 return -ENOMEM; 2712 return -ENOMEM;
2790 } 2713 }
2791 2714
@@ -2811,8 +2734,7 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2811static void ql_free_small_buffers(struct ql3_adapter *qdev) 2734static void ql_free_small_buffers(struct ql3_adapter *qdev)
2812{ 2735{
2813 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) { 2736 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
2814 printk(KERN_INFO PFX 2737 netdev_info(qdev->ndev, "Already done\n");
2815 "%s: Already done.\n", qdev->ndev->name);
2816 return; 2738 return;
2817 } 2739 }
2818 if (qdev->small_buf_virt_addr != NULL) { 2740 if (qdev->small_buf_virt_addr != NULL) {
@@ -2874,11 +2796,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2874 qdev->lrg_buffer_len); 2796 qdev->lrg_buffer_len);
2875 if (unlikely(!skb)) { 2797 if (unlikely(!skb)) {
2876 /* Better luck next round */ 2798 /* Better luck next round */
2877 printk(KERN_ERR PFX 2799 netdev_err(qdev->ndev,
2878 "%s: large buff alloc failed, " 2800 "large buff alloc failed for %d bytes at index %d\n",
2879 "for %d bytes at index %d.\n", 2801 qdev->lrg_buffer_len * 2, i);
2880 qdev->ndev->name,
2881 qdev->lrg_buffer_len * 2, i);
2882 ql_free_large_buffers(qdev); 2802 ql_free_large_buffers(qdev);
2883 return -ENOMEM; 2803 return -ENOMEM;
2884 } else { 2804 } else {
@@ -2900,8 +2820,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2900 2820
2901 err = pci_dma_mapping_error(qdev->pdev, map); 2821 err = pci_dma_mapping_error(qdev->pdev, map);
2902 if(err) { 2822 if(err) {
2903 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 2823 netdev_err(qdev->ndev,
2904 qdev->ndev->name, err); 2824 "PCI mapping failed with error: %d\n",
2825 err);
2905 ql_free_large_buffers(qdev); 2826 ql_free_large_buffers(qdev);
2906 return -ENOMEM; 2827 return -ENOMEM;
2907 } 2828 }
@@ -2968,9 +2889,8 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2968 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; 2889 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2969 qdev->lrg_buffer_len = JUMBO_MTU_SIZE; 2890 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2970 } else { 2891 } else {
2971 printk(KERN_ERR PFX 2892 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n",
2972 "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n", 2893 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
2973 qdev->ndev->name);
2974 return -ENOMEM; 2894 return -ENOMEM;
2975 } 2895 }
2976 qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; 2896 qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
@@ -3001,34 +2921,27 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
3001 qdev->rsp_producer_index_phy_addr_low = 2921 qdev->rsp_producer_index_phy_addr_low =
3002 qdev->req_consumer_index_phy_addr_low + 8; 2922 qdev->req_consumer_index_phy_addr_low + 8;
3003 } else { 2923 } else {
3004 printk(KERN_ERR PFX 2924 netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
3005 "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
3006 return -ENOMEM; 2925 return -ENOMEM;
3007 } 2926 }
3008 2927
3009 if (ql_alloc_net_req_rsp_queues(qdev) != 0) { 2928 if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
3010 printk(KERN_ERR PFX 2929 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
3011 "%s: ql_alloc_net_req_rsp_queues failed.\n",
3012 qdev->ndev->name);
3013 goto err_req_rsp; 2930 goto err_req_rsp;
3014 } 2931 }
3015 2932
3016 if (ql_alloc_buffer_queues(qdev) != 0) { 2933 if (ql_alloc_buffer_queues(qdev) != 0) {
3017 printk(KERN_ERR PFX 2934 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
3018 "%s: ql_alloc_buffer_queues failed.\n",
3019 qdev->ndev->name);
3020 goto err_buffer_queues; 2935 goto err_buffer_queues;
3021 } 2936 }
3022 2937
3023 if (ql_alloc_small_buffers(qdev) != 0) { 2938 if (ql_alloc_small_buffers(qdev) != 0) {
3024 printk(KERN_ERR PFX 2939 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
3025 "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
3026 goto err_small_buffers; 2940 goto err_small_buffers;
3027 } 2941 }
3028 2942
3029 if (ql_alloc_large_buffers(qdev) != 0) { 2943 if (ql_alloc_large_buffers(qdev) != 0) {
3030 printk(KERN_ERR PFX 2944 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
3031 "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
3032 goto err_small_buffers; 2945 goto err_small_buffers;
3033 } 2946 }
3034 2947
@@ -3353,8 +3266,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3353 } while (--delay); 3266 } while (--delay);
3354 3267
3355 if (delay == 0) { 3268 if (delay == 0) {
3356 printk(KERN_ERR PFX 3269 netdev_err(qdev->ndev, "Hw Initialization timeout\n");
3357 "%s: Hw Initialization timeout.\n", qdev->ndev->name);
3358 status = -1; 3270 status = -1;
3359 goto out; 3271 goto out;
3360 } 3272 }
@@ -3396,17 +3308,14 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
3396 /* 3308 /*
3397 * Issue soft reset to chip. 3309 * Issue soft reset to chip.
3398 */ 3310 */
3399 printk(KERN_DEBUG PFX 3311 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
3400 "%s: Issue soft reset to chip.\n",
3401 qdev->ndev->name);
3402 ql_write_common_reg(qdev, 3312 ql_write_common_reg(qdev,
3403 &port_regs->CommonRegs.ispControlStatus, 3313 &port_regs->CommonRegs.ispControlStatus,
3404 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); 3314 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
3405 3315
3406 /* Wait 3 seconds for reset to complete. */ 3316 /* Wait 3 seconds for reset to complete. */
3407 printk(KERN_DEBUG PFX 3317 netdev_printk(KERN_DEBUG, qdev->ndev,
3408 "%s: Wait 10 milliseconds for reset to complete.\n", 3318 "Wait 10 milliseconds for reset to complete\n");
3409 qdev->ndev->name);
3410 3319
3411 /* Wait until the firmware tells us the Soft Reset is done */ 3320 /* Wait until the firmware tells us the Soft Reset is done */
3412 max_wait_time = 5; 3321 max_wait_time = 5;
@@ -3427,8 +3336,8 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
3427 value = 3336 value =
3428 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 3337 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3429 if (value & ISP_CONTROL_RI) { 3338 if (value & ISP_CONTROL_RI) {
3430 printk(KERN_DEBUG PFX 3339 netdev_printk(KERN_DEBUG, qdev->ndev,
3431 "ql_adapter_reset: clearing RI after reset.\n"); 3340 "clearing RI after reset\n");
3432 ql_write_common_reg(qdev, 3341 ql_write_common_reg(qdev,
3433 &port_regs->CommonRegs. 3342 &port_regs->CommonRegs.
3434 ispControlStatus, 3343 ispControlStatus,
@@ -3503,9 +3412,9 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
3503 case ISP_CONTROL_FN0_SCSI: 3412 case ISP_CONTROL_FN0_SCSI:
3504 case ISP_CONTROL_FN1_SCSI: 3413 case ISP_CONTROL_FN1_SCSI:
3505 default: 3414 default:
3506 printk(KERN_DEBUG PFX 3415 netdev_printk(KERN_DEBUG, qdev->ndev,
3507 "%s: Invalid function number, ispControlStatus = 0x%x\n", 3416 "Invalid function number, ispControlStatus = 0x%x\n",
3508 qdev->ndev->name,value); 3417 value);
3509 break; 3418 break;
3510 } 3419 }
3511 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; 3420 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
@@ -3516,32 +3425,26 @@ static void ql_display_dev_info(struct net_device *ndev)
3516 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3425 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3517 struct pci_dev *pdev = qdev->pdev; 3426 struct pci_dev *pdev = qdev->pdev;
3518 3427
3519 printk(KERN_INFO PFX 3428 netdev_info(ndev,
3520 "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n", 3429 "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
3521 DRV_NAME, qdev->index, qdev->chip_rev_id, 3430 DRV_NAME, qdev->index, qdev->chip_rev_id,
3522 (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022", 3431 (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
3523 qdev->pci_slot); 3432 qdev->pci_slot);
3524 printk(KERN_INFO PFX 3433 netdev_info(ndev, "%s Interface\n",
3525 "%s Interface.\n", 3434 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
3526 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
3527 3435
3528 /* 3436 /*
3529 * Print PCI bus width/type. 3437 * Print PCI bus width/type.
3530 */ 3438 */
3531 printk(KERN_INFO PFX 3439 netdev_info(ndev, "Bus interface is %s %s\n",
3532 "Bus interface is %s %s.\n", 3440 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3533 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), 3441 ((qdev->pci_x) ? "PCI-X" : "PCI"));
3534 ((qdev->pci_x) ? "PCI-X" : "PCI"));
3535 3442
3536 printk(KERN_INFO PFX 3443 netdev_info(ndev, "mem IO base address adjusted = 0x%p\n",
3537 "mem IO base address adjusted = 0x%p\n", 3444 qdev->mem_map_registers);
3538 qdev->mem_map_registers); 3445 netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
3539 printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
3540 3446
3541 if (netif_msg_probe(qdev)) 3447 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
3542 printk(KERN_INFO PFX
3543 "%s: MAC address %pM\n",
3544 ndev->name, ndev->dev_addr);
3545} 3448}
3546 3449
3547static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) 3450static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
@@ -3560,8 +3463,7 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3560 free_irq(qdev->pdev->irq, ndev); 3463 free_irq(qdev->pdev->irq, ndev);
3561 3464
3562 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { 3465 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3563 printk(KERN_INFO PFX 3466 netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
3564 "%s: calling pci_disable_msi().\n", qdev->ndev->name);
3565 clear_bit(QL_MSI_ENABLED,&qdev->flags); 3467 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3566 pci_disable_msi(qdev->pdev); 3468 pci_disable_msi(qdev->pdev);
3567 } 3469 }
@@ -3577,16 +3479,14 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3577 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3479 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3578 if (ql_wait_for_drvr_lock(qdev)) { 3480 if (ql_wait_for_drvr_lock(qdev)) {
3579 if ((soft_reset = ql_adapter_reset(qdev))) { 3481 if ((soft_reset = ql_adapter_reset(qdev))) {
3580 printk(KERN_ERR PFX 3482 netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
3581 "%s: ql_adapter_reset(%d) FAILED!\n", 3483 qdev->index);
3582 ndev->name, qdev->index);
3583 } 3484 }
3584 printk(KERN_ERR PFX 3485 netdev_err(ndev,
3585 "%s: Releaseing driver lock via chip reset.\n",ndev->name); 3486 "Releasing driver lock via chip reset\n");
3586 } else { 3487 } else {
3587 printk(KERN_ERR PFX 3488 netdev_err(ndev,
3588 "%s: Could not acquire driver lock to do " 3489 "Could not acquire driver lock to do reset!\n");
3589 "reset!\n", ndev->name);
3590 retval = -1; 3490 retval = -1;
3591 } 3491 }
3592 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3492 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -3603,20 +3503,17 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
3603 unsigned long hw_flags; 3503 unsigned long hw_flags;
3604 3504
3605 if (ql_alloc_mem_resources(qdev)) { 3505 if (ql_alloc_mem_resources(qdev)) {
3606 printk(KERN_ERR PFX 3506 netdev_err(ndev, "Unable to allocate buffers\n");
3607 "%s Unable to allocate buffers.\n", ndev->name);
3608 return -ENOMEM; 3507 return -ENOMEM;
3609 } 3508 }
3610 3509
3611 if (qdev->msi) { 3510 if (qdev->msi) {
3612 if (pci_enable_msi(qdev->pdev)) { 3511 if (pci_enable_msi(qdev->pdev)) {
3613 printk(KERN_ERR PFX 3512 netdev_err(ndev,
3614 "%s: User requested MSI, but MSI failed to " 3513 "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n");
3615 "initialize. Continuing without MSI.\n",
3616 qdev->ndev->name);
3617 qdev->msi = 0; 3514 qdev->msi = 0;
3618 } else { 3515 } else {
3619 printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name); 3516 netdev_info(ndev, "MSI Enabled...\n");
3620 set_bit(QL_MSI_ENABLED,&qdev->flags); 3517 set_bit(QL_MSI_ENABLED,&qdev->flags);
3621 irq_flags &= ~IRQF_SHARED; 3518 irq_flags &= ~IRQF_SHARED;
3622 } 3519 }
@@ -3625,9 +3522,9 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
3625 if ((err = request_irq(qdev->pdev->irq, 3522 if ((err = request_irq(qdev->pdev->irq,
3626 ql3xxx_isr, 3523 ql3xxx_isr,
3627 irq_flags, ndev->name, ndev))) { 3524 irq_flags, ndev->name, ndev))) {
3628 printk(KERN_ERR PFX 3525 netdev_err(ndev,
3629 "%s: Failed to reserve interrupt %d already in use.\n", 3526 "Failed to reserve interrupt %d already in use\n",
3630 ndev->name, qdev->pdev->irq); 3527 qdev->pdev->irq);
3631 goto err_irq; 3528 goto err_irq;
3632 } 3529 }
3633 3530
@@ -3635,18 +3532,13 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
3635 3532
3636 if ((err = ql_wait_for_drvr_lock(qdev))) { 3533 if ((err = ql_wait_for_drvr_lock(qdev))) {
3637 if ((err = ql_adapter_initialize(qdev))) { 3534 if ((err = ql_adapter_initialize(qdev))) {
3638 printk(KERN_ERR PFX 3535 netdev_err(ndev, "Unable to initialize adapter\n");
3639 "%s: Unable to initialize adapter.\n",
3640 ndev->name);
3641 goto err_init; 3536 goto err_init;
3642 } 3537 }
3643 printk(KERN_ERR PFX 3538 netdev_err(ndev, "Releasing driver lock\n");
3644 "%s: Releaseing driver lock.\n",ndev->name);
3645 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3539 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3646 } else { 3540 } else {
3647 printk(KERN_ERR PFX 3541 netdev_err(ndev, "Could not acquire driver lock\n");
3648 "%s: Could not acquire driver lock.\n",
3649 ndev->name);
3650 goto err_lock; 3542 goto err_lock;
3651 } 3543 }
3652 3544
@@ -3667,9 +3559,7 @@ err_lock:
3667 free_irq(qdev->pdev->irq, ndev); 3559 free_irq(qdev->pdev->irq, ndev);
3668err_irq: 3560err_irq:
3669 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { 3561 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3670 printk(KERN_INFO PFX 3562 netdev_info(ndev, "calling pci_disable_msi()\n");
3671 "%s: calling pci_disable_msi().\n",
3672 qdev->ndev->name);
3673 clear_bit(QL_MSI_ENABLED,&qdev->flags); 3563 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3674 pci_disable_msi(qdev->pdev); 3564 pci_disable_msi(qdev->pdev);
3675 } 3565 }
@@ -3679,9 +3569,8 @@ err_irq:
3679static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) 3569static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3680{ 3570{
3681 if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) { 3571 if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
3682 printk(KERN_ERR PFX 3572 netdev_err(qdev->ndev,
3683 "%s: Driver up/down cycle failed, " 3573 "Driver up/down cycle failed, closing device\n");
3684 "closing device\n",qdev->ndev->name);
3685 rtnl_lock(); 3574 rtnl_lock();
3686 dev_close(qdev->ndev); 3575 dev_close(qdev->ndev);
3687 rtnl_unlock(); 3576 rtnl_unlock();
@@ -3750,7 +3639,7 @@ static void ql3xxx_tx_timeout(struct net_device *ndev)
3750{ 3639{
3751 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3640 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3752 3641
3753 printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name); 3642 netdev_err(ndev, "Resetting...\n");
3754 /* 3643 /*
3755 * Stop the queues, we've got a problem. 3644 * Stop the queues, we've got a problem.
3756 */ 3645 */
@@ -3783,9 +3672,8 @@ static void ql_reset_work(struct work_struct *work)
3783 int j; 3672 int j;
3784 tx_cb = &qdev->tx_buf[i]; 3673 tx_cb = &qdev->tx_buf[i];
3785 if (tx_cb->skb) { 3674 if (tx_cb->skb) {
3786 printk(KERN_DEBUG PFX 3675 netdev_printk(KERN_DEBUG, ndev,
3787 "%s: Freeing lost SKB.\n", 3676 "Freeing lost SKB\n");
3788 qdev->ndev->name);
3789 pci_unmap_single(qdev->pdev, 3677 pci_unmap_single(qdev->pdev,
3790 dma_unmap_addr(&tx_cb->map[0], mapaddr), 3678 dma_unmap_addr(&tx_cb->map[0], mapaddr),
3791 dma_unmap_len(&tx_cb->map[0], maplen), 3679 dma_unmap_len(&tx_cb->map[0], maplen),
@@ -3801,8 +3689,7 @@ static void ql_reset_work(struct work_struct *work)
3801 } 3689 }
3802 } 3690 }
3803 3691
3804 printk(KERN_ERR PFX 3692 netdev_err(ndev, "Clearing NRI after reset\n");
3805 "%s: Clearing NRI after reset.\n", qdev->ndev->name);
3806 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3693 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3807 ql_write_common_reg(qdev, 3694 ql_write_common_reg(qdev,
3808 &port_regs->CommonRegs. 3695 &port_regs->CommonRegs.
@@ -3818,16 +3705,14 @@ static void ql_reset_work(struct work_struct *work)
3818 3705
3819 ispControlStatus); 3706 ispControlStatus);
3820 if ((value & ISP_CONTROL_SR) == 0) { 3707 if ((value & ISP_CONTROL_SR) == 0) {
3821 printk(KERN_DEBUG PFX 3708 netdev_printk(KERN_DEBUG, ndev,
3822 "%s: reset completed.\n", 3709 "reset completed\n");
3823 qdev->ndev->name);
3824 break; 3710 break;
3825 } 3711 }
3826 3712
3827 if (value & ISP_CONTROL_RI) { 3713 if (value & ISP_CONTROL_RI) {
3828 printk(KERN_DEBUG PFX 3714 netdev_printk(KERN_DEBUG, ndev,
3829 "%s: clearing NRI after reset.\n", 3715 "clearing NRI after reset\n");
3830 qdev->ndev->name);
3831 ql_write_common_reg(qdev, 3716 ql_write_common_reg(qdev,
3832 &port_regs-> 3717 &port_regs->
3833 CommonRegs. 3718 CommonRegs.
@@ -3848,11 +3733,9 @@ static void ql_reset_work(struct work_struct *work)
3848 * Set the reset flags and clear the board again. 3733 * Set the reset flags and clear the board again.
3849 * Nothing else to do... 3734 * Nothing else to do...
3850 */ 3735 */
3851 printk(KERN_ERR PFX 3736 netdev_err(ndev,
3852 "%s: Timed out waiting for reset to " 3737 "Timed out waiting for reset to complete\n");
3853 "complete.\n", ndev->name); 3738 netdev_err(ndev, "Do a reset\n");
3854 printk(KERN_ERR PFX
3855 "%s: Do a reset.\n", ndev->name);
3856 clear_bit(QL_RESET_PER_SCSI,&qdev->flags); 3739 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3857 clear_bit(QL_RESET_START,&qdev->flags); 3740 clear_bit(QL_RESET_START,&qdev->flags);
3858 ql_cycle_adapter(qdev,QL_DO_RESET); 3741 ql_cycle_adapter(qdev,QL_DO_RESET);
@@ -3920,15 +3803,13 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3920 3803
3921 err = pci_enable_device(pdev); 3804 err = pci_enable_device(pdev);
3922 if (err) { 3805 if (err) {
3923 printk(KERN_ERR PFX "%s cannot enable PCI device\n", 3806 pr_err("%s cannot enable PCI device\n", pci_name(pdev));
3924 pci_name(pdev));
3925 goto err_out; 3807 goto err_out;
3926 } 3808 }
3927 3809
3928 err = pci_request_regions(pdev, DRV_NAME); 3810 err = pci_request_regions(pdev, DRV_NAME);
3929 if (err) { 3811 if (err) {
3930 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", 3812 pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
3931 pci_name(pdev));
3932 goto err_out_disable_pdev; 3813 goto err_out_disable_pdev;
3933 } 3814 }
3934 3815
@@ -3943,15 +3824,13 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3943 } 3824 }
3944 3825
3945 if (err) { 3826 if (err) {
3946 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3827 pr_err("%s no usable DMA configuration\n", pci_name(pdev));
3947 pci_name(pdev));
3948 goto err_out_free_regions; 3828 goto err_out_free_regions;
3949 } 3829 }
3950 3830
3951 ndev = alloc_etherdev(sizeof(struct ql3_adapter)); 3831 ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3952 if (!ndev) { 3832 if (!ndev) {
3953 printk(KERN_ERR PFX "%s could not alloc etherdev\n", 3833 pr_err("%s could not alloc etherdev\n", pci_name(pdev));
3954 pci_name(pdev));
3955 err = -ENOMEM; 3834 err = -ENOMEM;
3956 goto err_out_free_regions; 3835 goto err_out_free_regions;
3957 } 3836 }
@@ -3978,8 +3857,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3978 3857
3979 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); 3858 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
3980 if (!qdev->mem_map_registers) { 3859 if (!qdev->mem_map_registers) {
3981 printk(KERN_ERR PFX "%s: cannot map device registers\n", 3860 pr_err("%s: cannot map device registers\n", pci_name(pdev));
3982 pci_name(pdev));
3983 err = -EIO; 3861 err = -EIO;
3984 goto err_out_free_ndev; 3862 goto err_out_free_ndev;
3985 } 3863 }
@@ -3998,9 +3876,8 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3998 3876
3999 /* make sure the EEPROM is good */ 3877 /* make sure the EEPROM is good */
4000 if (ql_get_nvram_params(qdev)) { 3878 if (ql_get_nvram_params(qdev)) {
4001 printk(KERN_ALERT PFX 3879 pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
4002 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n", 3880 __func__, qdev->index);
4003 qdev->index);
4004 err = -EIO; 3881 err = -EIO;
4005 goto err_out_iounmap; 3882 goto err_out_iounmap;
4006 } 3883 }
@@ -4032,8 +3909,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
4032 3909
4033 err = register_netdev(ndev); 3910 err = register_netdev(ndev);
4034 if (err) { 3911 if (err) {
4035 printk(KERN_ERR PFX "%s: cannot register net device\n", 3912 pr_err("%s: cannot register net device\n", pci_name(pdev));
4036 pci_name(pdev));
4037 goto err_out_iounmap; 3913 goto err_out_iounmap;
4038 } 3914 }
4039 3915
@@ -4052,10 +3928,10 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
4052 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ 3928 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
4053 qdev->adapter_timer.data = (unsigned long)qdev; 3929 qdev->adapter_timer.data = (unsigned long)qdev;
4054 3930
4055 if(!cards_found) { 3931 if (!cards_found) {
4056 printk(KERN_ALERT PFX "%s\n", DRV_STRING); 3932 pr_alert("%s\n", DRV_STRING);
4057 printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n", 3933 pr_alert("Driver name: %s, Version: %s\n",
4058 DRV_NAME, DRV_VERSION); 3934 DRV_NAME, DRV_VERSION);
4059 } 3935 }
4060 ql_display_dev_info(ndev); 3936 ql_display_dev_info(ndev);
4061 3937