diff options
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r-- | drivers/net/qlge/qlge.h | 8 | ||||
-rw-r--r-- | drivers/net/qlge/qlge_dbg.c | 58 | ||||
-rw-r--r-- | drivers/net/qlge/qlge_ethtool.c | 49 | ||||
-rw-r--r-- | drivers/net/qlge/qlge_main.c | 841 | ||||
-rw-r--r-- | drivers/net/qlge/qlge_mpi.c | 183 |
5 files changed, 580 insertions, 559 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index ebfd17776b53..57d135e3bfaf 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h | |||
@@ -19,14 +19,6 @@ | |||
19 | #define DRV_VERSION "v1.00.00.23.00.00-01" | 19 | #define DRV_VERSION "v1.00.00.23.00.00-01" |
20 | 20 | ||
21 | #define PFX "qlge: " | 21 | #define PFX "qlge: " |
22 | #define QPRINTK(qdev, nlevel, klevel, fmt, args...) \ | ||
23 | do { \ | ||
24 | if (!((qdev)->msg_enable & NETIF_MSG_##nlevel)) \ | ||
25 | ; \ | ||
26 | else \ | ||
27 | dev_printk(KERN_##klevel, &((qdev)->pdev->dev), \ | ||
28 | "%s: " fmt, __func__, ##args); \ | ||
29 | } while (0) | ||
30 | 22 | ||
31 | #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ | 23 | #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ |
32 | 24 | ||
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c index 57df835147eb..ff8550d2ca82 100644 --- a/drivers/net/qlge/qlge_dbg.c +++ b/drivers/net/qlge/qlge_dbg.c | |||
@@ -443,8 +443,8 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf) | |||
443 | status = ql_get_mac_addr_reg(qdev, | 443 | status = ql_get_mac_addr_reg(qdev, |
444 | MAC_ADDR_TYPE_CAM_MAC, i, value); | 444 | MAC_ADDR_TYPE_CAM_MAC, i, value); |
445 | if (status) { | 445 | if (status) { |
446 | QPRINTK(qdev, DRV, ERR, | 446 | netif_err(qdev, drv, qdev->ndev, |
447 | "Failed read of mac index register.\n"); | 447 | "Failed read of mac index register.\n"); |
448 | goto err; | 448 | goto err; |
449 | } | 449 | } |
450 | *buf++ = value[0]; /* lower MAC address */ | 450 | *buf++ = value[0]; /* lower MAC address */ |
@@ -455,8 +455,8 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf) | |||
455 | status = ql_get_mac_addr_reg(qdev, | 455 | status = ql_get_mac_addr_reg(qdev, |
456 | MAC_ADDR_TYPE_MULTI_MAC, i, value); | 456 | MAC_ADDR_TYPE_MULTI_MAC, i, value); |
457 | if (status) { | 457 | if (status) { |
458 | QPRINTK(qdev, DRV, ERR, | 458 | netif_err(qdev, drv, qdev->ndev, |
459 | "Failed read of mac index register.\n"); | 459 | "Failed read of mac index register.\n"); |
460 | goto err; | 460 | goto err; |
461 | } | 461 | } |
462 | *buf++ = value[0]; /* lower Mcast address */ | 462 | *buf++ = value[0]; /* lower Mcast address */ |
@@ -479,8 +479,8 @@ static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf) | |||
479 | for (i = 0; i < 16; i++) { | 479 | for (i = 0; i < 16; i++) { |
480 | status = ql_get_routing_reg(qdev, i, &value); | 480 | status = ql_get_routing_reg(qdev, i, &value); |
481 | if (status) { | 481 | if (status) { |
482 | QPRINTK(qdev, DRV, ERR, | 482 | netif_err(qdev, drv, qdev->ndev, |
483 | "Failed read of routing index register.\n"); | 483 | "Failed read of routing index register.\n"); |
484 | goto err; | 484 | goto err; |
485 | } else { | 485 | } else { |
486 | *buf++ = value; | 486 | *buf++ = value; |
@@ -736,8 +736,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) | |||
736 | int i; | 736 | int i; |
737 | 737 | ||
738 | if (!mpi_coredump) { | 738 | if (!mpi_coredump) { |
739 | QPRINTK(qdev, DRV, ERR, | 739 | netif_err(qdev, drv, qdev->ndev, "No memory available.\n"); |
740 | "No memory available.\n"); | ||
741 | return -ENOMEM; | 740 | return -ENOMEM; |
742 | } | 741 | } |
743 | 742 | ||
@@ -749,8 +748,8 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) | |||
749 | 748 | ||
750 | status = ql_pause_mpi_risc(qdev); | 749 | status = ql_pause_mpi_risc(qdev); |
751 | if (status) { | 750 | if (status) { |
752 | QPRINTK(qdev, DRV, ERR, | 751 | netif_err(qdev, drv, qdev->ndev, |
753 | "Failed RISC pause. Status = 0x%.08x\n", status); | 752 | "Failed RISC pause. Status = 0x%.08x\n", status); |
754 | goto err; | 753 | goto err; |
755 | } | 754 | } |
756 | 755 | ||
@@ -911,9 +910,9 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) | |||
911 | 910 | ||
912 | status = ql_get_serdes_regs(qdev, mpi_coredump); | 911 | status = ql_get_serdes_regs(qdev, mpi_coredump); |
913 | if (status) { | 912 | if (status) { |
914 | QPRINTK(qdev, DRV, ERR, | 913 | netif_err(qdev, drv, qdev->ndev, |
915 | "Failed Dump of Serdes Registers. Status = 0x%.08x\n", | 914 | "Failed Dump of Serdes Registers. Status = 0x%.08x\n", |
916 | status); | 915 | status); |
917 | goto err; | 916 | goto err; |
918 | } | 917 | } |
919 | 918 | ||
@@ -1177,16 +1176,16 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) | |||
1177 | /* clear the pause */ | 1176 | /* clear the pause */ |
1178 | status = ql_unpause_mpi_risc(qdev); | 1177 | status = ql_unpause_mpi_risc(qdev); |
1179 | if (status) { | 1178 | if (status) { |
1180 | QPRINTK(qdev, DRV, ERR, | 1179 | netif_err(qdev, drv, qdev->ndev, |
1181 | "Failed RISC unpause. Status = 0x%.08x\n", status); | 1180 | "Failed RISC unpause. Status = 0x%.08x\n", status); |
1182 | goto err; | 1181 | goto err; |
1183 | } | 1182 | } |
1184 | 1183 | ||
1185 | /* Reset the RISC so we can dump RAM */ | 1184 | /* Reset the RISC so we can dump RAM */ |
1186 | status = ql_hard_reset_mpi_risc(qdev); | 1185 | status = ql_hard_reset_mpi_risc(qdev); |
1187 | if (status) { | 1186 | if (status) { |
1188 | QPRINTK(qdev, DRV, ERR, | 1187 | netif_err(qdev, drv, qdev->ndev, |
1189 | "Failed RISC reset. Status = 0x%.08x\n", status); | 1188 | "Failed RISC reset. Status = 0x%.08x\n", status); |
1190 | goto err; | 1189 | goto err; |
1191 | } | 1190 | } |
1192 | 1191 | ||
@@ -1198,8 +1197,9 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) | |||
1198 | status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0], | 1197 | status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0], |
1199 | CODE_RAM_ADDR, CODE_RAM_CNT); | 1198 | CODE_RAM_ADDR, CODE_RAM_CNT); |
1200 | if (status) { | 1199 | if (status) { |
1201 | QPRINTK(qdev, DRV, ERR, | 1200 | netif_err(qdev, drv, qdev->ndev, |
1202 | "Failed Dump of CODE RAM. Status = 0x%.08x\n", status); | 1201 | "Failed Dump of CODE RAM. Status = 0x%.08x\n", |
1202 | status); | ||
1203 | goto err; | 1203 | goto err; |
1204 | } | 1204 | } |
1205 | 1205 | ||
@@ -1212,8 +1212,9 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) | |||
1212 | status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0], | 1212 | status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0], |
1213 | MEMC_RAM_ADDR, MEMC_RAM_CNT); | 1213 | MEMC_RAM_ADDR, MEMC_RAM_CNT); |
1214 | if (status) { | 1214 | if (status) { |
1215 | QPRINTK(qdev, DRV, ERR, | 1215 | netif_err(qdev, drv, qdev->ndev, |
1216 | "Failed Dump of MEMC RAM. Status = 0x%.08x\n", status); | 1216 | "Failed Dump of MEMC RAM. Status = 0x%.08x\n", |
1217 | status); | ||
1217 | goto err; | 1218 | goto err; |
1218 | } | 1219 | } |
1219 | err: | 1220 | err: |
@@ -1225,21 +1226,19 @@ err: | |||
1225 | static void ql_get_core_dump(struct ql_adapter *qdev) | 1226 | static void ql_get_core_dump(struct ql_adapter *qdev) |
1226 | { | 1227 | { |
1227 | if (!ql_own_firmware(qdev)) { | 1228 | if (!ql_own_firmware(qdev)) { |
1228 | QPRINTK(qdev, DRV, ERR, "%s: Don't own firmware!\n", | 1229 | netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); |
1229 | qdev->ndev->name); | ||
1230 | return; | 1230 | return; |
1231 | } | 1231 | } |
1232 | 1232 | ||
1233 | if (!netif_running(qdev->ndev)) { | 1233 | if (!netif_running(qdev->ndev)) { |
1234 | QPRINTK(qdev, IFUP, ERR, | 1234 | netif_err(qdev, ifup, qdev->ndev, |
1235 | "Force Coredump can only be done from interface " | 1235 | "Force Coredump can only be done from interface that is up.\n"); |
1236 | "that is up.\n"); | ||
1237 | return; | 1236 | return; |
1238 | } | 1237 | } |
1239 | 1238 | ||
1240 | if (ql_mb_sys_err(qdev)) { | 1239 | if (ql_mb_sys_err(qdev)) { |
1241 | QPRINTK(qdev, IFUP, ERR, | 1240 | netif_err(qdev, ifup, qdev->ndev, |
1242 | "Fail force coredump with ql_mb_sys_err().\n"); | 1241 | "Fail force coredump with ql_mb_sys_err().\n"); |
1243 | return; | 1242 | return; |
1244 | } | 1243 | } |
1245 | } | 1244 | } |
@@ -1334,7 +1333,8 @@ void ql_mpi_core_to_log(struct work_struct *work) | |||
1334 | 1333 | ||
1335 | count = sizeof(struct ql_mpi_coredump) / sizeof(u32); | 1334 | count = sizeof(struct ql_mpi_coredump) / sizeof(u32); |
1336 | tmp = (u32 *)qdev->mpi_coredump; | 1335 | tmp = (u32 *)qdev->mpi_coredump; |
1337 | QPRINTK(qdev, DRV, DEBUG, "Core is dumping to log file!\n"); | 1336 | netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, |
1337 | "Core is dumping to log file!\n"); | ||
1338 | 1338 | ||
1339 | for (i = 0; i < count; i += 8) { | 1339 | for (i = 0; i < count; i += 8) { |
1340 | printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x " | 1340 | printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x " |
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c index 058fa0a48c6f..4f26afeb0f38 100644 --- a/drivers/net/qlge/qlge_ethtool.c +++ b/drivers/net/qlge/qlge_ethtool.c | |||
@@ -67,8 +67,8 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev) | |||
67 | status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), | 67 | status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), |
68 | CFG_LCQ, rx_ring->cq_id); | 68 | CFG_LCQ, rx_ring->cq_id); |
69 | if (status) { | 69 | if (status) { |
70 | QPRINTK(qdev, IFUP, ERR, | 70 | netif_err(qdev, ifup, qdev->ndev, |
71 | "Failed to load CQICB.\n"); | 71 | "Failed to load CQICB.\n"); |
72 | goto exit; | 72 | goto exit; |
73 | } | 73 | } |
74 | } | 74 | } |
@@ -89,8 +89,8 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev) | |||
89 | status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), | 89 | status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), |
90 | CFG_LCQ, rx_ring->cq_id); | 90 | CFG_LCQ, rx_ring->cq_id); |
91 | if (status) { | 91 | if (status) { |
92 | QPRINTK(qdev, IFUP, ERR, | 92 | netif_err(qdev, ifup, qdev->ndev, |
93 | "Failed to load CQICB.\n"); | 93 | "Failed to load CQICB.\n"); |
94 | goto exit; | 94 | goto exit; |
95 | } | 95 | } |
96 | } | 96 | } |
@@ -107,8 +107,8 @@ static void ql_update_stats(struct ql_adapter *qdev) | |||
107 | 107 | ||
108 | spin_lock(&qdev->stats_lock); | 108 | spin_lock(&qdev->stats_lock); |
109 | if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { | 109 | if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { |
110 | QPRINTK(qdev, DRV, ERR, | 110 | netif_err(qdev, drv, qdev->ndev, |
111 | "Couldn't get xgmac sem.\n"); | 111 | "Couldn't get xgmac sem.\n"); |
112 | goto quit; | 112 | goto quit; |
113 | } | 113 | } |
114 | /* | 114 | /* |
@@ -116,8 +116,9 @@ static void ql_update_stats(struct ql_adapter *qdev) | |||
116 | */ | 116 | */ |
117 | for (i = 0x200; i < 0x280; i += 8) { | 117 | for (i = 0x200; i < 0x280; i += 8) { |
118 | if (ql_read_xgmac_reg64(qdev, i, &data)) { | 118 | if (ql_read_xgmac_reg64(qdev, i, &data)) { |
119 | QPRINTK(qdev, DRV, ERR, | 119 | netif_err(qdev, drv, qdev->ndev, |
120 | "Error reading status register 0x%.04x.\n", i); | 120 | "Error reading status register 0x%.04x.\n", |
121 | i); | ||
121 | goto end; | 122 | goto end; |
122 | } else | 123 | } else |
123 | *iter = data; | 124 | *iter = data; |
@@ -129,8 +130,9 @@ static void ql_update_stats(struct ql_adapter *qdev) | |||
129 | */ | 130 | */ |
130 | for (i = 0x300; i < 0x3d0; i += 8) { | 131 | for (i = 0x300; i < 0x3d0; i += 8) { |
131 | if (ql_read_xgmac_reg64(qdev, i, &data)) { | 132 | if (ql_read_xgmac_reg64(qdev, i, &data)) { |
132 | QPRINTK(qdev, DRV, ERR, | 133 | netif_err(qdev, drv, qdev->ndev, |
133 | "Error reading status register 0x%.04x.\n", i); | 134 | "Error reading status register 0x%.04x.\n", |
135 | i); | ||
134 | goto end; | 136 | goto end; |
135 | } else | 137 | } else |
136 | *iter = data; | 138 | *iter = data; |
@@ -142,8 +144,9 @@ static void ql_update_stats(struct ql_adapter *qdev) | |||
142 | */ | 144 | */ |
143 | for (i = 0x500; i < 0x540; i += 8) { | 145 | for (i = 0x500; i < 0x540; i += 8) { |
144 | if (ql_read_xgmac_reg64(qdev, i, &data)) { | 146 | if (ql_read_xgmac_reg64(qdev, i, &data)) { |
145 | QPRINTK(qdev, DRV, ERR, | 147 | netif_err(qdev, drv, qdev->ndev, |
146 | "Error reading status register 0x%.04x.\n", i); | 148 | "Error reading status register 0x%.04x.\n", |
149 | i); | ||
147 | goto end; | 150 | goto end; |
148 | } else | 151 | } else |
149 | *iter = data; | 152 | *iter = data; |
@@ -155,8 +158,9 @@ static void ql_update_stats(struct ql_adapter *qdev) | |||
155 | */ | 158 | */ |
156 | for (i = 0x568; i < 0x5a8; i += 8) { | 159 | for (i = 0x568; i < 0x5a8; i += 8) { |
157 | if (ql_read_xgmac_reg64(qdev, i, &data)) { | 160 | if (ql_read_xgmac_reg64(qdev, i, &data)) { |
158 | QPRINTK(qdev, DRV, ERR, | 161 | netif_err(qdev, drv, qdev->ndev, |
159 | "Error reading status register 0x%.04x.\n", i); | 162 | "Error reading status register 0x%.04x.\n", |
163 | i); | ||
160 | goto end; | 164 | goto end; |
161 | } else | 165 | } else |
162 | *iter = data; | 166 | *iter = data; |
@@ -167,8 +171,8 @@ static void ql_update_stats(struct ql_adapter *qdev) | |||
167 | * Get RX NIC FIFO DROP statistics. | 171 | * Get RX NIC FIFO DROP statistics. |
168 | */ | 172 | */ |
169 | if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) { | 173 | if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) { |
170 | QPRINTK(qdev, DRV, ERR, | 174 | netif_err(qdev, drv, qdev->ndev, |
171 | "Error reading status register 0x%.04x.\n", i); | 175 | "Error reading status register 0x%.04x.\n", i); |
172 | goto end; | 176 | goto end; |
173 | } else | 177 | } else |
174 | *iter = data; | 178 | *iter = data; |
@@ -396,14 +400,13 @@ static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) | |||
396 | return -EINVAL; | 400 | return -EINVAL; |
397 | qdev->wol = wol->wolopts; | 401 | qdev->wol = wol->wolopts; |
398 | 402 | ||
399 | QPRINTK(qdev, DRV, INFO, "Set wol option 0x%x on %s\n", | 403 | netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol); |
400 | qdev->wol, ndev->name); | ||
401 | if (!qdev->wol) { | 404 | if (!qdev->wol) { |
402 | u32 wol = 0; | 405 | u32 wol = 0; |
403 | status = ql_mb_wol_mode(qdev, wol); | 406 | status = ql_mb_wol_mode(qdev, wol); |
404 | QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n", | 407 | netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n", |
405 | (status == 0) ? "cleared sucessfully" : "clear failed", | 408 | status == 0 ? "cleared sucessfully" : "clear failed", |
406 | wol, qdev->ndev->name); | 409 | wol); |
407 | } | 410 | } |
408 | 411 | ||
409 | return 0; | 412 | return 0; |
@@ -534,8 +537,8 @@ static void ql_self_test(struct net_device *ndev, | |||
534 | } | 537 | } |
535 | clear_bit(QL_SELFTEST, &qdev->flags); | 538 | clear_bit(QL_SELFTEST, &qdev->flags); |
536 | } else { | 539 | } else { |
537 | QPRINTK(qdev, DRV, ERR, | 540 | netif_err(qdev, drv, qdev->ndev, |
538 | "%s: is down, Loopback test will fail.\n", ndev->name); | 541 | "is down, Loopback test will fail.\n"); |
539 | eth_test->flags |= ETH_TEST_FL_FAILED; | 542 | eth_test->flags |= ETH_TEST_FL_FAILED; |
540 | } | 543 | } |
541 | } | 544 | } |
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index dd3e0f1b2965..2c052caee884 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -128,7 +128,7 @@ static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) | |||
128 | sem_bits = SEM_SET << SEM_PROC_REG_SHIFT; | 128 | sem_bits = SEM_SET << SEM_PROC_REG_SHIFT; |
129 | break; | 129 | break; |
130 | default: | 130 | default: |
131 | QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n"); | 131 | netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n"); |
132 | return -EINVAL; | 132 | return -EINVAL; |
133 | } | 133 | } |
134 | 134 | ||
@@ -168,17 +168,17 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) | |||
168 | 168 | ||
169 | /* check for errors */ | 169 | /* check for errors */ |
170 | if (temp & err_bit) { | 170 | if (temp & err_bit) { |
171 | QPRINTK(qdev, PROBE, ALERT, | 171 | netif_alert(qdev, probe, qdev->ndev, |
172 | "register 0x%.08x access error, value = 0x%.08x!.\n", | 172 | "register 0x%.08x access error, value = 0x%.08x!.\n", |
173 | reg, temp); | 173 | reg, temp); |
174 | return -EIO; | 174 | return -EIO; |
175 | } else if (temp & bit) | 175 | } else if (temp & bit) |
176 | return 0; | 176 | return 0; |
177 | udelay(UDELAY_DELAY); | 177 | udelay(UDELAY_DELAY); |
178 | count--; | 178 | count--; |
179 | } | 179 | } |
180 | QPRINTK(qdev, PROBE, ALERT, | 180 | netif_alert(qdev, probe, qdev->ndev, |
181 | "Timed out waiting for reg %x to come ready.\n", reg); | 181 | "Timed out waiting for reg %x to come ready.\n", reg); |
182 | return -ETIMEDOUT; | 182 | return -ETIMEDOUT; |
183 | } | 183 | } |
184 | 184 | ||
@@ -221,7 +221,7 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, | |||
221 | 221 | ||
222 | map = pci_map_single(qdev->pdev, ptr, size, direction); | 222 | map = pci_map_single(qdev->pdev, ptr, size, direction); |
223 | if (pci_dma_mapping_error(qdev->pdev, map)) { | 223 | if (pci_dma_mapping_error(qdev->pdev, map)) { |
224 | QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n"); | 224 | netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n"); |
225 | return -ENOMEM; | 225 | return -ENOMEM; |
226 | } | 226 | } |
227 | 227 | ||
@@ -231,8 +231,8 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, | |||
231 | 231 | ||
232 | status = ql_wait_cfg(qdev, bit); | 232 | status = ql_wait_cfg(qdev, bit); |
233 | if (status) { | 233 | if (status) { |
234 | QPRINTK(qdev, IFUP, ERR, | 234 | netif_err(qdev, ifup, qdev->ndev, |
235 | "Timed out waiting for CFG to come ready.\n"); | 235 | "Timed out waiting for CFG to come ready.\n"); |
236 | goto exit; | 236 | goto exit; |
237 | } | 237 | } |
238 | 238 | ||
@@ -313,8 +313,8 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, | |||
313 | case MAC_ADDR_TYPE_VLAN: | 313 | case MAC_ADDR_TYPE_VLAN: |
314 | case MAC_ADDR_TYPE_MULTI_FLTR: | 314 | case MAC_ADDR_TYPE_MULTI_FLTR: |
315 | default: | 315 | default: |
316 | QPRINTK(qdev, IFUP, CRIT, | 316 | netif_crit(qdev, ifup, qdev->ndev, |
317 | "Address type %d not yet supported.\n", type); | 317 | "Address type %d not yet supported.\n", type); |
318 | status = -EPERM; | 318 | status = -EPERM; |
319 | } | 319 | } |
320 | exit: | 320 | exit: |
@@ -371,12 +371,11 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, | |||
371 | (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | | 371 | (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | |
372 | (addr[5]); | 372 | (addr[5]); |
373 | 373 | ||
374 | QPRINTK(qdev, IFUP, DEBUG, | 374 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
375 | "Adding %s address %pM" | 375 | "Adding %s address %pM at index %d in the CAM.\n", |
376 | " at index %d in the CAM.\n", | 376 | type == MAC_ADDR_TYPE_MULTI_MAC ? |
377 | ((type == | 377 | "MULTICAST" : "UNICAST", |
378 | MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" : | 378 | addr, index); |
379 | "UNICAST"), addr, index); | ||
380 | 379 | ||
381 | status = | 380 | status = |
382 | ql_wait_reg_rdy(qdev, | 381 | ql_wait_reg_rdy(qdev, |
@@ -426,9 +425,11 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, | |||
426 | * addressing. It's either MAC_ADDR_E on or off. | 425 | * addressing. It's either MAC_ADDR_E on or off. |
427 | * That's bit-27 we're talking about. | 426 | * That's bit-27 we're talking about. |
428 | */ | 427 | */ |
429 | QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n", | 428 | netif_info(qdev, ifup, qdev->ndev, |
430 | (enable_bit ? "Adding" : "Removing"), | 429 | "%s VLAN ID %d %s the CAM.\n", |
431 | index, (enable_bit ? "to" : "from")); | 430 | enable_bit ? "Adding" : "Removing", |
431 | index, | ||
432 | enable_bit ? "to" : "from"); | ||
432 | 433 | ||
433 | status = | 434 | status = |
434 | ql_wait_reg_rdy(qdev, | 435 | ql_wait_reg_rdy(qdev, |
@@ -443,8 +444,8 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, | |||
443 | } | 444 | } |
444 | case MAC_ADDR_TYPE_MULTI_FLTR: | 445 | case MAC_ADDR_TYPE_MULTI_FLTR: |
445 | default: | 446 | default: |
446 | QPRINTK(qdev, IFUP, CRIT, | 447 | netif_crit(qdev, ifup, qdev->ndev, |
447 | "Address type %d not yet supported.\n", type); | 448 | "Address type %d not yet supported.\n", type); |
448 | status = -EPERM; | 449 | status = -EPERM; |
449 | } | 450 | } |
450 | exit: | 451 | exit: |
@@ -463,14 +464,13 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set) | |||
463 | 464 | ||
464 | if (set) { | 465 | if (set) { |
465 | addr = &qdev->ndev->dev_addr[0]; | 466 | addr = &qdev->ndev->dev_addr[0]; |
466 | QPRINTK(qdev, IFUP, DEBUG, | 467 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
467 | "Set Mac addr %pM\n", addr); | 468 | "Set Mac addr %pM\n", addr); |
468 | } else { | 469 | } else { |
469 | memset(zero_mac_addr, 0, ETH_ALEN); | 470 | memset(zero_mac_addr, 0, ETH_ALEN); |
470 | addr = &zero_mac_addr[0]; | 471 | addr = &zero_mac_addr[0]; |
471 | QPRINTK(qdev, IFUP, DEBUG, | 472 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
472 | "Clearing MAC address on %s\n", | 473 | "Clearing MAC address\n"); |
473 | qdev->ndev->name); | ||
474 | } | 474 | } |
475 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); | 475 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); |
476 | if (status) | 476 | if (status) |
@@ -479,23 +479,21 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set) | |||
479 | MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); | 479 | MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); |
480 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | 480 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
481 | if (status) | 481 | if (status) |
482 | QPRINTK(qdev, IFUP, ERR, "Failed to init mac " | 482 | netif_err(qdev, ifup, qdev->ndev, |
483 | "address.\n"); | 483 | "Failed to init mac address.\n"); |
484 | return status; | 484 | return status; |
485 | } | 485 | } |
486 | 486 | ||
487 | void ql_link_on(struct ql_adapter *qdev) | 487 | void ql_link_on(struct ql_adapter *qdev) |
488 | { | 488 | { |
489 | QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n", | 489 | netif_err(qdev, link, qdev->ndev, "Link is up.\n"); |
490 | qdev->ndev->name); | ||
491 | netif_carrier_on(qdev->ndev); | 490 | netif_carrier_on(qdev->ndev); |
492 | ql_set_mac_addr(qdev, 1); | 491 | ql_set_mac_addr(qdev, 1); |
493 | } | 492 | } |
494 | 493 | ||
495 | void ql_link_off(struct ql_adapter *qdev) | 494 | void ql_link_off(struct ql_adapter *qdev) |
496 | { | 495 | { |
497 | QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n", | 496 | netif_err(qdev, link, qdev->ndev, "Link is down.\n"); |
498 | qdev->ndev->name); | ||
499 | netif_carrier_off(qdev->ndev); | 497 | netif_carrier_off(qdev->ndev); |
500 | ql_set_mac_addr(qdev, 0); | 498 | ql_set_mac_addr(qdev, 0); |
501 | } | 499 | } |
@@ -532,27 +530,27 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, | |||
532 | int status = -EINVAL; /* Return error if no mask match. */ | 530 | int status = -EINVAL; /* Return error if no mask match. */ |
533 | u32 value = 0; | 531 | u32 value = 0; |
534 | 532 | ||
535 | QPRINTK(qdev, IFUP, DEBUG, | 533 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
536 | "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n", | 534 | "%s %s mask %s the routing reg.\n", |
537 | (enable ? "Adding" : "Removing"), | 535 | enable ? "Adding" : "Removing", |
538 | ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""), | 536 | index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" : |
539 | ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""), | 537 | index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" : |
540 | ((index == | 538 | index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" : |
541 | RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""), | 539 | index == RT_IDX_BCAST_SLOT ? "BROADCAST" : |
542 | ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""), | 540 | index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" : |
543 | ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""), | 541 | index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" : |
544 | ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""), | 542 | index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" : |
545 | ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""), | 543 | index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" : |
546 | ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""), | 544 | index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" : |
547 | ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""), | 545 | index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" : |
548 | ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""), | 546 | index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" : |
549 | ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""), | 547 | index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" : |
550 | ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""), | 548 | index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" : |
551 | ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""), | 549 | index == RT_IDX_UNUSED013 ? "UNUSED13" : |
552 | ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""), | 550 | index == RT_IDX_UNUSED014 ? "UNUSED14" : |
553 | ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""), | 551 | index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" : |
554 | ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""), | 552 | "(Bad index != RT_IDX)", |
555 | (enable ? "to" : "from")); | 553 | enable ? "to" : "from"); |
556 | 554 | ||
557 | switch (mask) { | 555 | switch (mask) { |
558 | case RT_IDX_CAM_HIT: | 556 | case RT_IDX_CAM_HIT: |
@@ -612,8 +610,8 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, | |||
612 | break; | 610 | break; |
613 | } | 611 | } |
614 | default: | 612 | default: |
615 | QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n", | 613 | netif_err(qdev, ifup, qdev->ndev, |
616 | mask); | 614 | "Mask type %d not yet supported.\n", mask); |
617 | status = -EPERM; | 615 | status = -EPERM; |
618 | goto exit; | 616 | goto exit; |
619 | } | 617 | } |
@@ -719,7 +717,7 @@ static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str) | |||
719 | 717 | ||
720 | status = strncmp((char *)&qdev->flash, str, 4); | 718 | status = strncmp((char *)&qdev->flash, str, 4); |
721 | if (status) { | 719 | if (status) { |
722 | QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n"); | 720 | netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n"); |
723 | return status; | 721 | return status; |
724 | } | 722 | } |
725 | 723 | ||
@@ -727,8 +725,8 @@ static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str) | |||
727 | csum += le16_to_cpu(*flash++); | 725 | csum += le16_to_cpu(*flash++); |
728 | 726 | ||
729 | if (csum) | 727 | if (csum) |
730 | QPRINTK(qdev, IFUP, ERR, | 728 | netif_err(qdev, ifup, qdev->ndev, |
731 | "Invalid flash checksum, csum = 0x%.04x.\n", csum); | 729 | "Invalid flash checksum, csum = 0x%.04x.\n", csum); |
732 | 730 | ||
733 | return csum; | 731 | return csum; |
734 | } | 732 | } |
@@ -780,7 +778,8 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev) | |||
780 | for (i = 0; i < size; i++, p++) { | 778 | for (i = 0; i < size; i++, p++) { |
781 | status = ql_read_flash_word(qdev, i+offset, p); | 779 | status = ql_read_flash_word(qdev, i+offset, p); |
782 | if (status) { | 780 | if (status) { |
783 | QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n"); | 781 | netif_err(qdev, ifup, qdev->ndev, |
782 | "Error reading flash.\n"); | ||
784 | goto exit; | 783 | goto exit; |
785 | } | 784 | } |
786 | } | 785 | } |
@@ -789,7 +788,7 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev) | |||
789 | sizeof(struct flash_params_8000) / sizeof(u16), | 788 | sizeof(struct flash_params_8000) / sizeof(u16), |
790 | "8000"); | 789 | "8000"); |
791 | if (status) { | 790 | if (status) { |
792 | QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n"); | 791 | netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); |
793 | status = -EINVAL; | 792 | status = -EINVAL; |
794 | goto exit; | 793 | goto exit; |
795 | } | 794 | } |
@@ -807,7 +806,7 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev) | |||
807 | qdev->ndev->addr_len); | 806 | qdev->ndev->addr_len); |
808 | 807 | ||
809 | if (!is_valid_ether_addr(mac_addr)) { | 808 | if (!is_valid_ether_addr(mac_addr)) { |
810 | QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n"); | 809 | netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n"); |
811 | status = -EINVAL; | 810 | status = -EINVAL; |
812 | goto exit; | 811 | goto exit; |
813 | } | 812 | } |
@@ -841,7 +840,8 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev) | |||
841 | for (i = 0; i < size; i++, p++) { | 840 | for (i = 0; i < size; i++, p++) { |
842 | status = ql_read_flash_word(qdev, i+offset, p); | 841 | status = ql_read_flash_word(qdev, i+offset, p); |
843 | if (status) { | 842 | if (status) { |
844 | QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n"); | 843 | netif_err(qdev, ifup, qdev->ndev, |
844 | "Error reading flash.\n"); | ||
845 | goto exit; | 845 | goto exit; |
846 | } | 846 | } |
847 | 847 | ||
@@ -851,7 +851,7 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev) | |||
851 | sizeof(struct flash_params_8012) / sizeof(u16), | 851 | sizeof(struct flash_params_8012) / sizeof(u16), |
852 | "8012"); | 852 | "8012"); |
853 | if (status) { | 853 | if (status) { |
854 | QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n"); | 854 | netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); |
855 | status = -EINVAL; | 855 | status = -EINVAL; |
856 | goto exit; | 856 | goto exit; |
857 | } | 857 | } |
@@ -969,17 +969,17 @@ static int ql_8012_port_initialize(struct ql_adapter *qdev) | |||
969 | /* Another function has the semaphore, so | 969 | /* Another function has the semaphore, so |
970 | * wait for the port init bit to come ready. | 970 | * wait for the port init bit to come ready. |
971 | */ | 971 | */ |
972 | QPRINTK(qdev, LINK, INFO, | 972 | netif_info(qdev, link, qdev->ndev, |
973 | "Another function has the semaphore, so wait for the port init bit to come ready.\n"); | 973 | "Another function has the semaphore, so wait for the port init bit to come ready.\n"); |
974 | status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0); | 974 | status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0); |
975 | if (status) { | 975 | if (status) { |
976 | QPRINTK(qdev, LINK, CRIT, | 976 | netif_crit(qdev, link, qdev->ndev, |
977 | "Port initialize timed out.\n"); | 977 | "Port initialize timed out.\n"); |
978 | } | 978 | } |
979 | return status; | 979 | return status; |
980 | } | 980 | } |
981 | 981 | ||
982 | QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n"); | 982 | netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n"); |
983 | /* Set the core reset. */ | 983 | /* Set the core reset. */ |
984 | status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); | 984 | status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); |
985 | if (status) | 985 | if (status) |
@@ -1109,8 +1109,8 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, | |||
1109 | GFP_ATOMIC, | 1109 | GFP_ATOMIC, |
1110 | qdev->lbq_buf_order); | 1110 | qdev->lbq_buf_order); |
1111 | if (unlikely(!rx_ring->pg_chunk.page)) { | 1111 | if (unlikely(!rx_ring->pg_chunk.page)) { |
1112 | QPRINTK(qdev, DRV, ERR, | 1112 | netif_err(qdev, drv, qdev->ndev, |
1113 | "page allocation failed.\n"); | 1113 | "page allocation failed.\n"); |
1114 | return -ENOMEM; | 1114 | return -ENOMEM; |
1115 | } | 1115 | } |
1116 | rx_ring->pg_chunk.offset = 0; | 1116 | rx_ring->pg_chunk.offset = 0; |
@@ -1120,8 +1120,8 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, | |||
1120 | if (pci_dma_mapping_error(qdev->pdev, map)) { | 1120 | if (pci_dma_mapping_error(qdev->pdev, map)) { |
1121 | __free_pages(rx_ring->pg_chunk.page, | 1121 | __free_pages(rx_ring->pg_chunk.page, |
1122 | qdev->lbq_buf_order); | 1122 | qdev->lbq_buf_order); |
1123 | QPRINTK(qdev, DRV, ERR, | 1123 | netif_err(qdev, drv, qdev->ndev, |
1124 | "PCI mapping failed.\n"); | 1124 | "PCI mapping failed.\n"); |
1125 | return -ENOMEM; | 1125 | return -ENOMEM; |
1126 | } | 1126 | } |
1127 | rx_ring->pg_chunk.map = map; | 1127 | rx_ring->pg_chunk.map = map; |
@@ -1158,15 +1158,15 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
1158 | 1158 | ||
1159 | while (rx_ring->lbq_free_cnt > 32) { | 1159 | while (rx_ring->lbq_free_cnt > 32) { |
1160 | for (i = 0; i < 16; i++) { | 1160 | for (i = 0; i < 16; i++) { |
1161 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1161 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1162 | "lbq: try cleaning clean_idx = %d.\n", | 1162 | "lbq: try cleaning clean_idx = %d.\n", |
1163 | clean_idx); | 1163 | clean_idx); |
1164 | lbq_desc = &rx_ring->lbq[clean_idx]; | 1164 | lbq_desc = &rx_ring->lbq[clean_idx]; |
1165 | if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { | 1165 | if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { |
1166 | QPRINTK(qdev, IFUP, ERR, | 1166 | netif_err(qdev, ifup, qdev->ndev, |
1167 | "Could not get a page chunk.\n"); | 1167 | "Could not get a page chunk.\n"); |
1168 | return; | 1168 | return; |
1169 | } | 1169 | } |
1170 | 1170 | ||
1171 | map = lbq_desc->p.pg_chunk.map + | 1171 | map = lbq_desc->p.pg_chunk.map + |
1172 | lbq_desc->p.pg_chunk.offset; | 1172 | lbq_desc->p.pg_chunk.offset; |
@@ -1191,9 +1191,9 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
1191 | } | 1191 | } |
1192 | 1192 | ||
1193 | if (start_idx != clean_idx) { | 1193 | if (start_idx != clean_idx) { |
1194 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1194 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1195 | "lbq: updating prod idx = %d.\n", | 1195 | "lbq: updating prod idx = %d.\n", |
1196 | rx_ring->lbq_prod_idx); | 1196 | rx_ring->lbq_prod_idx); |
1197 | ql_write_db_reg(rx_ring->lbq_prod_idx, | 1197 | ql_write_db_reg(rx_ring->lbq_prod_idx, |
1198 | rx_ring->lbq_prod_idx_db_reg); | 1198 | rx_ring->lbq_prod_idx_db_reg); |
1199 | } | 1199 | } |
@@ -1211,19 +1211,20 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
1211 | while (rx_ring->sbq_free_cnt > 16) { | 1211 | while (rx_ring->sbq_free_cnt > 16) { |
1212 | for (i = 0; i < 16; i++) { | 1212 | for (i = 0; i < 16; i++) { |
1213 | sbq_desc = &rx_ring->sbq[clean_idx]; | 1213 | sbq_desc = &rx_ring->sbq[clean_idx]; |
1214 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1214 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1215 | "sbq: try cleaning clean_idx = %d.\n", | 1215 | "sbq: try cleaning clean_idx = %d.\n", |
1216 | clean_idx); | 1216 | clean_idx); |
1217 | if (sbq_desc->p.skb == NULL) { | 1217 | if (sbq_desc->p.skb == NULL) { |
1218 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1218 | netif_printk(qdev, rx_status, KERN_DEBUG, |
1219 | "sbq: getting new skb for index %d.\n", | 1219 | qdev->ndev, |
1220 | sbq_desc->index); | 1220 | "sbq: getting new skb for index %d.\n", |
1221 | sbq_desc->index); | ||
1221 | sbq_desc->p.skb = | 1222 | sbq_desc->p.skb = |
1222 | netdev_alloc_skb(qdev->ndev, | 1223 | netdev_alloc_skb(qdev->ndev, |
1223 | SMALL_BUFFER_SIZE); | 1224 | SMALL_BUFFER_SIZE); |
1224 | if (sbq_desc->p.skb == NULL) { | 1225 | if (sbq_desc->p.skb == NULL) { |
1225 | QPRINTK(qdev, PROBE, ERR, | 1226 | netif_err(qdev, probe, qdev->ndev, |
1226 | "Couldn't get an skb.\n"); | 1227 | "Couldn't get an skb.\n"); |
1227 | rx_ring->sbq_clean_idx = clean_idx; | 1228 | rx_ring->sbq_clean_idx = clean_idx; |
1228 | return; | 1229 | return; |
1229 | } | 1230 | } |
@@ -1233,7 +1234,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
1233 | rx_ring->sbq_buf_size, | 1234 | rx_ring->sbq_buf_size, |
1234 | PCI_DMA_FROMDEVICE); | 1235 | PCI_DMA_FROMDEVICE); |
1235 | if (pci_dma_mapping_error(qdev->pdev, map)) { | 1236 | if (pci_dma_mapping_error(qdev->pdev, map)) { |
1236 | QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n"); | 1237 | netif_err(qdev, ifup, qdev->ndev, |
1238 | "PCI mapping failed.\n"); | ||
1237 | rx_ring->sbq_clean_idx = clean_idx; | 1239 | rx_ring->sbq_clean_idx = clean_idx; |
1238 | dev_kfree_skb_any(sbq_desc->p.skb); | 1240 | dev_kfree_skb_any(sbq_desc->p.skb); |
1239 | sbq_desc->p.skb = NULL; | 1241 | sbq_desc->p.skb = NULL; |
@@ -1257,9 +1259,9 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
1257 | } | 1259 | } |
1258 | 1260 | ||
1259 | if (start_idx != clean_idx) { | 1261 | if (start_idx != clean_idx) { |
1260 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1262 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1261 | "sbq: updating prod idx = %d.\n", | 1263 | "sbq: updating prod idx = %d.\n", |
1262 | rx_ring->sbq_prod_idx); | 1264 | rx_ring->sbq_prod_idx); |
1263 | ql_write_db_reg(rx_ring->sbq_prod_idx, | 1265 | ql_write_db_reg(rx_ring->sbq_prod_idx, |
1264 | rx_ring->sbq_prod_idx_db_reg); | 1266 | rx_ring->sbq_prod_idx_db_reg); |
1265 | } | 1267 | } |
@@ -1291,8 +1293,9 @@ static void ql_unmap_send(struct ql_adapter *qdev, | |||
1291 | * then its an OAL. | 1293 | * then its an OAL. |
1292 | */ | 1294 | */ |
1293 | if (i == 7) { | 1295 | if (i == 7) { |
1294 | QPRINTK(qdev, TX_DONE, DEBUG, | 1296 | netif_printk(qdev, tx_done, KERN_DEBUG, |
1295 | "unmapping OAL area.\n"); | 1297 | qdev->ndev, |
1298 | "unmapping OAL area.\n"); | ||
1296 | } | 1299 | } |
1297 | pci_unmap_single(qdev->pdev, | 1300 | pci_unmap_single(qdev->pdev, |
1298 | pci_unmap_addr(&tx_ring_desc->map[i], | 1301 | pci_unmap_addr(&tx_ring_desc->map[i], |
@@ -1301,8 +1304,8 @@ static void ql_unmap_send(struct ql_adapter *qdev, | |||
1301 | maplen), | 1304 | maplen), |
1302 | PCI_DMA_TODEVICE); | 1305 | PCI_DMA_TODEVICE); |
1303 | } else { | 1306 | } else { |
1304 | QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n", | 1307 | netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, |
1305 | i); | 1308 | "unmapping frag %d.\n", i); |
1306 | pci_unmap_page(qdev->pdev, | 1309 | pci_unmap_page(qdev->pdev, |
1307 | pci_unmap_addr(&tx_ring_desc->map[i], | 1310 | pci_unmap_addr(&tx_ring_desc->map[i], |
1308 | mapaddr), | 1311 | mapaddr), |
@@ -1327,7 +1330,8 @@ static int ql_map_send(struct ql_adapter *qdev, | |||
1327 | int frag_cnt = skb_shinfo(skb)->nr_frags; | 1330 | int frag_cnt = skb_shinfo(skb)->nr_frags; |
1328 | 1331 | ||
1329 | if (frag_cnt) { | 1332 | if (frag_cnt) { |
1330 | QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt); | 1333 | netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, |
1334 | "frag_cnt = %d.\n", frag_cnt); | ||
1331 | } | 1335 | } |
1332 | /* | 1336 | /* |
1333 | * Map the skb buffer first. | 1337 | * Map the skb buffer first. |
@@ -1336,8 +1340,8 @@ static int ql_map_send(struct ql_adapter *qdev, | |||
1336 | 1340 | ||
1337 | err = pci_dma_mapping_error(qdev->pdev, map); | 1341 | err = pci_dma_mapping_error(qdev->pdev, map); |
1338 | if (err) { | 1342 | if (err) { |
1339 | QPRINTK(qdev, TX_QUEUED, ERR, | 1343 | netif_err(qdev, tx_queued, qdev->ndev, |
1340 | "PCI mapping failed with error: %d\n", err); | 1344 | "PCI mapping failed with error: %d\n", err); |
1341 | 1345 | ||
1342 | return NETDEV_TX_BUSY; | 1346 | return NETDEV_TX_BUSY; |
1343 | } | 1347 | } |
@@ -1383,9 +1387,9 @@ static int ql_map_send(struct ql_adapter *qdev, | |||
1383 | PCI_DMA_TODEVICE); | 1387 | PCI_DMA_TODEVICE); |
1384 | err = pci_dma_mapping_error(qdev->pdev, map); | 1388 | err = pci_dma_mapping_error(qdev->pdev, map); |
1385 | if (err) { | 1389 | if (err) { |
1386 | QPRINTK(qdev, TX_QUEUED, ERR, | 1390 | netif_err(qdev, tx_queued, qdev->ndev, |
1387 | "PCI mapping outbound address list with error: %d\n", | 1391 | "PCI mapping outbound address list with error: %d\n", |
1388 | err); | 1392 | err); |
1389 | goto map_error; | 1393 | goto map_error; |
1390 | } | 1394 | } |
1391 | 1395 | ||
@@ -1413,9 +1417,9 @@ static int ql_map_send(struct ql_adapter *qdev, | |||
1413 | 1417 | ||
1414 | err = pci_dma_mapping_error(qdev->pdev, map); | 1418 | err = pci_dma_mapping_error(qdev->pdev, map); |
1415 | if (err) { | 1419 | if (err) { |
1416 | QPRINTK(qdev, TX_QUEUED, ERR, | 1420 | netif_err(qdev, tx_queued, qdev->ndev, |
1417 | "PCI mapping frags failed with error: %d.\n", | 1421 | "PCI mapping frags failed with error: %d.\n", |
1418 | err); | 1422 | err); |
1419 | goto map_error; | 1423 | goto map_error; |
1420 | } | 1424 | } |
1421 | 1425 | ||
@@ -1460,7 +1464,8 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, | |||
1460 | 1464 | ||
1461 | skb = napi_get_frags(napi); | 1465 | skb = napi_get_frags(napi); |
1462 | if (!skb) { | 1466 | if (!skb) { |
1463 | QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, exiting.\n"); | 1467 | netif_err(qdev, drv, qdev->ndev, |
1468 | "Couldn't get an skb, exiting.\n"); | ||
1464 | rx_ring->rx_dropped++; | 1469 | rx_ring->rx_dropped++; |
1465 | put_page(lbq_desc->p.pg_chunk.page); | 1470 | put_page(lbq_desc->p.pg_chunk.page); |
1466 | return; | 1471 | return; |
@@ -1503,8 +1508,8 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, | |||
1503 | 1508 | ||
1504 | skb = netdev_alloc_skb(ndev, length); | 1509 | skb = netdev_alloc_skb(ndev, length); |
1505 | if (!skb) { | 1510 | if (!skb) { |
1506 | QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, " | 1511 | netif_err(qdev, drv, qdev->ndev, |
1507 | "need to unwind!.\n"); | 1512 | "Couldn't get an skb, need to unwind!.\n"); |
1508 | rx_ring->rx_dropped++; | 1513 | rx_ring->rx_dropped++; |
1509 | put_page(lbq_desc->p.pg_chunk.page); | 1514 | put_page(lbq_desc->p.pg_chunk.page); |
1510 | return; | 1515 | return; |
@@ -1516,8 +1521,8 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, | |||
1516 | 1521 | ||
1517 | /* Frame error, so drop the packet. */ | 1522 | /* Frame error, so drop the packet. */ |
1518 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | 1523 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { |
1519 | QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n", | 1524 | netif_err(qdev, drv, qdev->ndev, |
1520 | ib_mac_rsp->flags2); | 1525 | "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); |
1521 | rx_ring->rx_errors++; | 1526 | rx_ring->rx_errors++; |
1522 | goto err_out; | 1527 | goto err_out; |
1523 | } | 1528 | } |
@@ -1526,14 +1531,15 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, | |||
1526 | * MTU since FCoE uses 2k frames. | 1531 | * MTU since FCoE uses 2k frames. |
1527 | */ | 1532 | */ |
1528 | if (skb->len > ndev->mtu + ETH_HLEN) { | 1533 | if (skb->len > ndev->mtu + ETH_HLEN) { |
1529 | QPRINTK(qdev, DRV, ERR, "Segment too small, dropping.\n"); | 1534 | netif_err(qdev, drv, qdev->ndev, |
1535 | "Segment too small, dropping.\n"); | ||
1530 | rx_ring->rx_dropped++; | 1536 | rx_ring->rx_dropped++; |
1531 | goto err_out; | 1537 | goto err_out; |
1532 | } | 1538 | } |
1533 | memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN); | 1539 | memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN); |
1534 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1540 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1535 | "%d bytes of headers and data in large. Chain " | 1541 | "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", |
1536 | "page to new skb and pull tail.\n", length); | 1542 | length); |
1537 | skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, | 1543 | skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, |
1538 | lbq_desc->p.pg_chunk.offset+ETH_HLEN, | 1544 | lbq_desc->p.pg_chunk.offset+ETH_HLEN, |
1539 | length-ETH_HLEN); | 1545 | length-ETH_HLEN); |
@@ -1550,8 +1556,8 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, | |||
1550 | !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { | 1556 | !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { |
1551 | /* TCP frame. */ | 1557 | /* TCP frame. */ |
1552 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { | 1558 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { |
1553 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1559 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1554 | "TCP checksum done!\n"); | 1560 | "TCP checksum done!\n"); |
1555 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1561 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1556 | } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && | 1562 | } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && |
1557 | (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { | 1563 | (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { |
@@ -1560,8 +1566,9 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, | |||
1560 | if (!(iph->frag_off & | 1566 | if (!(iph->frag_off & |
1561 | cpu_to_be16(IP_MF|IP_OFFSET))) { | 1567 | cpu_to_be16(IP_MF|IP_OFFSET))) { |
1562 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1568 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1563 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1569 | netif_printk(qdev, rx_status, KERN_DEBUG, |
1564 | "TCP checksum done!\n"); | 1570 | qdev->ndev, |
1571 | "TCP checksum done!\n"); | ||
1565 | } | 1572 | } |
1566 | } | 1573 | } |
1567 | } | 1574 | } |
@@ -1600,8 +1607,8 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, | |||
1600 | /* Allocate new_skb and copy */ | 1607 | /* Allocate new_skb and copy */ |
1601 | new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); | 1608 | new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); |
1602 | if (new_skb == NULL) { | 1609 | if (new_skb == NULL) { |
1603 | QPRINTK(qdev, PROBE, ERR, | 1610 | netif_err(qdev, probe, qdev->ndev, |
1604 | "No skb available, drop the packet.\n"); | 1611 | "No skb available, drop the packet.\n"); |
1605 | rx_ring->rx_dropped++; | 1612 | rx_ring->rx_dropped++; |
1606 | return; | 1613 | return; |
1607 | } | 1614 | } |
@@ -1611,8 +1618,8 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, | |||
1611 | 1618 | ||
1612 | /* Frame error, so drop the packet. */ | 1619 | /* Frame error, so drop the packet. */ |
1613 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | 1620 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { |
1614 | QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n", | 1621 | netif_err(qdev, drv, qdev->ndev, |
1615 | ib_mac_rsp->flags2); | 1622 | "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); |
1616 | dev_kfree_skb_any(skb); | 1623 | dev_kfree_skb_any(skb); |
1617 | rx_ring->rx_errors++; | 1624 | rx_ring->rx_errors++; |
1618 | return; | 1625 | return; |
@@ -1637,16 +1644,18 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, | |||
1637 | prefetch(skb->data); | 1644 | prefetch(skb->data); |
1638 | skb->dev = ndev; | 1645 | skb->dev = ndev; |
1639 | if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { | 1646 | if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { |
1640 | QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n", | 1647 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1641 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | 1648 | "%s Multicast.\n", |
1642 | IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", | 1649 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == |
1643 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | 1650 | IB_MAC_IOCB_RSP_M_HASH ? "Hash" : |
1644 | IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", | 1651 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == |
1645 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | 1652 | IB_MAC_IOCB_RSP_M_REG ? "Registered" : |
1646 | IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); | 1653 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == |
1654 | IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); | ||
1647 | } | 1655 | } |
1648 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) | 1656 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) |
1649 | QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n"); | 1657 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1658 | "Promiscuous Packet.\n"); | ||
1650 | 1659 | ||
1651 | rx_ring->rx_packets++; | 1660 | rx_ring->rx_packets++; |
1652 | rx_ring->rx_bytes += skb->len; | 1661 | rx_ring->rx_bytes += skb->len; |
@@ -1660,8 +1669,8 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, | |||
1660 | !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { | 1669 | !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { |
1661 | /* TCP frame. */ | 1670 | /* TCP frame. */ |
1662 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { | 1671 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { |
1663 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1672 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1664 | "TCP checksum done!\n"); | 1673 | "TCP checksum done!\n"); |
1665 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1674 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1666 | } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && | 1675 | } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && |
1667 | (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { | 1676 | (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { |
@@ -1670,8 +1679,9 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, | |||
1670 | if (!(iph->frag_off & | 1679 | if (!(iph->frag_off & |
1671 | cpu_to_be16(IP_MF|IP_OFFSET))) { | 1680 | cpu_to_be16(IP_MF|IP_OFFSET))) { |
1672 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1681 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1673 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1682 | netif_printk(qdev, rx_status, KERN_DEBUG, |
1674 | "TCP checksum done!\n"); | 1683 | qdev->ndev, |
1684 | "TCP checksum done!\n"); | ||
1675 | } | 1685 | } |
1676 | } | 1686 | } |
1677 | } | 1687 | } |
@@ -1725,7 +1735,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1725 | */ | 1735 | */ |
1726 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV && | 1736 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV && |
1727 | ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { | 1737 | ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { |
1728 | QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len); | 1738 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1739 | "Header of %d bytes in small buffer.\n", hdr_len); | ||
1729 | /* | 1740 | /* |
1730 | * Headers fit nicely into a small buffer. | 1741 | * Headers fit nicely into a small buffer. |
1731 | */ | 1742 | */ |
@@ -1744,15 +1755,16 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1744 | * Handle the data buffer(s). | 1755 | * Handle the data buffer(s). |
1745 | */ | 1756 | */ |
1746 | if (unlikely(!length)) { /* Is there data too? */ | 1757 | if (unlikely(!length)) { /* Is there data too? */ |
1747 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1758 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1748 | "No Data buffer in this packet.\n"); | 1759 | "No Data buffer in this packet.\n"); |
1749 | return skb; | 1760 | return skb; |
1750 | } | 1761 | } |
1751 | 1762 | ||
1752 | if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { | 1763 | if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { |
1753 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { | 1764 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { |
1754 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1765 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1755 | "Headers in small, data of %d bytes in small, combine them.\n", length); | 1766 | "Headers in small, data of %d bytes in small, combine them.\n", |
1767 | length); | ||
1756 | /* | 1768 | /* |
1757 | * Data is less than small buffer size so it's | 1769 | * Data is less than small buffer size so it's |
1758 | * stuffed in a small buffer. | 1770 | * stuffed in a small buffer. |
@@ -1778,8 +1790,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1778 | maplen), | 1790 | maplen), |
1779 | PCI_DMA_FROMDEVICE); | 1791 | PCI_DMA_FROMDEVICE); |
1780 | } else { | 1792 | } else { |
1781 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1793 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1782 | "%d bytes in a single small buffer.\n", length); | 1794 | "%d bytes in a single small buffer.\n", |
1795 | length); | ||
1783 | sbq_desc = ql_get_curr_sbuf(rx_ring); | 1796 | sbq_desc = ql_get_curr_sbuf(rx_ring); |
1784 | skb = sbq_desc->p.skb; | 1797 | skb = sbq_desc->p.skb; |
1785 | ql_realign_skb(skb, length); | 1798 | ql_realign_skb(skb, length); |
@@ -1794,18 +1807,18 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1794 | } | 1807 | } |
1795 | } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { | 1808 | } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { |
1796 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { | 1809 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { |
1797 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1810 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1798 | "Header in small, %d bytes in large. Chain large to small!\n", length); | 1811 | "Header in small, %d bytes in large. Chain large to small!\n", |
1812 | length); | ||
1799 | /* | 1813 | /* |
1800 | * The data is in a single large buffer. We | 1814 | * The data is in a single large buffer. We |
1801 | * chain it to the header buffer's skb and let | 1815 | * chain it to the header buffer's skb and let |
1802 | * it rip. | 1816 | * it rip. |
1803 | */ | 1817 | */ |
1804 | lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); | 1818 | lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); |
1805 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1819 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1806 | "Chaining page at offset = %d," | 1820 | "Chaining page at offset = %d, for %d bytes to skb.\n", |
1807 | "for %d bytes to skb.\n", | 1821 | lbq_desc->p.pg_chunk.offset, length); |
1808 | lbq_desc->p.pg_chunk.offset, length); | ||
1809 | skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, | 1822 | skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, |
1810 | lbq_desc->p.pg_chunk.offset, | 1823 | lbq_desc->p.pg_chunk.offset, |
1811 | length); | 1824 | length); |
@@ -1821,8 +1834,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1821 | lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); | 1834 | lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); |
1822 | skb = netdev_alloc_skb(qdev->ndev, length); | 1835 | skb = netdev_alloc_skb(qdev->ndev, length); |
1823 | if (skb == NULL) { | 1836 | if (skb == NULL) { |
1824 | QPRINTK(qdev, PROBE, DEBUG, | 1837 | netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev, |
1825 | "No skb available, drop the packet.\n"); | 1838 | "No skb available, drop the packet.\n"); |
1826 | return NULL; | 1839 | return NULL; |
1827 | } | 1840 | } |
1828 | pci_unmap_page(qdev->pdev, | 1841 | pci_unmap_page(qdev->pdev, |
@@ -1831,8 +1844,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1831 | pci_unmap_len(lbq_desc, maplen), | 1844 | pci_unmap_len(lbq_desc, maplen), |
1832 | PCI_DMA_FROMDEVICE); | 1845 | PCI_DMA_FROMDEVICE); |
1833 | skb_reserve(skb, NET_IP_ALIGN); | 1846 | skb_reserve(skb, NET_IP_ALIGN); |
1834 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1847 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1835 | "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length); | 1848 | "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", |
1849 | length); | ||
1836 | skb_fill_page_desc(skb, 0, | 1850 | skb_fill_page_desc(skb, 0, |
1837 | lbq_desc->p.pg_chunk.page, | 1851 | lbq_desc->p.pg_chunk.page, |
1838 | lbq_desc->p.pg_chunk.offset, | 1852 | lbq_desc->p.pg_chunk.offset, |
@@ -1873,8 +1887,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1873 | * a local buffer and use it to find the | 1887 | * a local buffer and use it to find the |
1874 | * pages to chain. | 1888 | * pages to chain. |
1875 | */ | 1889 | */ |
1876 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1890 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1877 | "%d bytes of headers & data in chain of large.\n", length); | 1891 | "%d bytes of headers & data in chain of large.\n", |
1892 | length); | ||
1878 | skb = sbq_desc->p.skb; | 1893 | skb = sbq_desc->p.skb; |
1879 | sbq_desc->p.skb = NULL; | 1894 | sbq_desc->p.skb = NULL; |
1880 | skb_reserve(skb, NET_IP_ALIGN); | 1895 | skb_reserve(skb, NET_IP_ALIGN); |
@@ -1884,9 +1899,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1884 | size = (length < rx_ring->lbq_buf_size) ? length : | 1899 | size = (length < rx_ring->lbq_buf_size) ? length : |
1885 | rx_ring->lbq_buf_size; | 1900 | rx_ring->lbq_buf_size; |
1886 | 1901 | ||
1887 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1902 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1888 | "Adding page %d to skb for %d bytes.\n", | 1903 | "Adding page %d to skb for %d bytes.\n", |
1889 | i, size); | 1904 | i, size); |
1890 | skb_fill_page_desc(skb, i, | 1905 | skb_fill_page_desc(skb, i, |
1891 | lbq_desc->p.pg_chunk.page, | 1906 | lbq_desc->p.pg_chunk.page, |
1892 | lbq_desc->p.pg_chunk.offset, | 1907 | lbq_desc->p.pg_chunk.offset, |
@@ -1916,16 +1931,16 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, | |||
1916 | 1931 | ||
1917 | skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); | 1932 | skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); |
1918 | if (unlikely(!skb)) { | 1933 | if (unlikely(!skb)) { |
1919 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1934 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1920 | "No skb available, drop packet.\n"); | 1935 | "No skb available, drop packet.\n"); |
1921 | rx_ring->rx_dropped++; | 1936 | rx_ring->rx_dropped++; |
1922 | return; | 1937 | return; |
1923 | } | 1938 | } |
1924 | 1939 | ||
1925 | /* Frame error, so drop the packet. */ | 1940 | /* Frame error, so drop the packet. */ |
1926 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | 1941 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { |
1927 | QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n", | 1942 | netif_err(qdev, drv, qdev->ndev, |
1928 | ib_mac_rsp->flags2); | 1943 | "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); |
1929 | dev_kfree_skb_any(skb); | 1944 | dev_kfree_skb_any(skb); |
1930 | rx_ring->rx_errors++; | 1945 | rx_ring->rx_errors++; |
1931 | return; | 1946 | return; |
@@ -1950,17 +1965,18 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, | |||
1950 | prefetch(skb->data); | 1965 | prefetch(skb->data); |
1951 | skb->dev = ndev; | 1966 | skb->dev = ndev; |
1952 | if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { | 1967 | if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { |
1953 | QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n", | 1968 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n", |
1954 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | 1969 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == |
1955 | IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", | 1970 | IB_MAC_IOCB_RSP_M_HASH ? "Hash" : |
1956 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | 1971 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == |
1957 | IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", | 1972 | IB_MAC_IOCB_RSP_M_REG ? "Registered" : |
1958 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | 1973 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == |
1959 | IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); | 1974 | IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); |
1960 | rx_ring->rx_multicast++; | 1975 | rx_ring->rx_multicast++; |
1961 | } | 1976 | } |
1962 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { | 1977 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { |
1963 | QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n"); | 1978 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1979 | "Promiscuous Packet.\n"); | ||
1964 | } | 1980 | } |
1965 | 1981 | ||
1966 | skb->protocol = eth_type_trans(skb, ndev); | 1982 | skb->protocol = eth_type_trans(skb, ndev); |
@@ -1973,8 +1989,8 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, | |||
1973 | !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { | 1989 | !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { |
1974 | /* TCP frame. */ | 1990 | /* TCP frame. */ |
1975 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { | 1991 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { |
1976 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1992 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1977 | "TCP checksum done!\n"); | 1993 | "TCP checksum done!\n"); |
1978 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1994 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1979 | } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && | 1995 | } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && |
1980 | (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { | 1996 | (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { |
@@ -1983,8 +1999,8 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, | |||
1983 | if (!(iph->frag_off & | 1999 | if (!(iph->frag_off & |
1984 | cpu_to_be16(IP_MF|IP_OFFSET))) { | 2000 | cpu_to_be16(IP_MF|IP_OFFSET))) { |
1985 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 2001 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1986 | QPRINTK(qdev, RX_STATUS, DEBUG, | 2002 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1987 | "TCP checksum done!\n"); | 2003 | "TCP checksum done!\n"); |
1988 | } | 2004 | } |
1989 | } | 2005 | } |
1990 | } | 2006 | } |
@@ -2054,8 +2070,9 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, | |||
2054 | 2070 | ||
2055 | /* Free small buffer that holds the IAL */ | 2071 | /* Free small buffer that holds the IAL */ |
2056 | lbq_desc = ql_get_curr_sbuf(rx_ring); | 2072 | lbq_desc = ql_get_curr_sbuf(rx_ring); |
2057 | QPRINTK(qdev, RX_ERR, ERR, "Dropping frame, len %d > mtu %d\n", | 2073 | netif_err(qdev, rx_err, qdev->ndev, |
2058 | length, qdev->ndev->mtu); | 2074 | "Dropping frame, len %d > mtu %d\n", |
2075 | length, qdev->ndev->mtu); | ||
2059 | 2076 | ||
2060 | /* Unwind the large buffers for this frame. */ | 2077 | /* Unwind the large buffers for this frame. */ |
2061 | while (length > 0) { | 2078 | while (length > 0) { |
@@ -2090,20 +2107,20 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev, | |||
2090 | OB_MAC_IOCB_RSP_L | | 2107 | OB_MAC_IOCB_RSP_L | |
2091 | OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) { | 2108 | OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) { |
2092 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) { | 2109 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) { |
2093 | QPRINTK(qdev, TX_DONE, WARNING, | 2110 | netif_warn(qdev, tx_done, qdev->ndev, |
2094 | "Total descriptor length did not match transfer length.\n"); | 2111 | "Total descriptor length did not match transfer length.\n"); |
2095 | } | 2112 | } |
2096 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) { | 2113 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) { |
2097 | QPRINTK(qdev, TX_DONE, WARNING, | 2114 | netif_warn(qdev, tx_done, qdev->ndev, |
2098 | "Frame too short to be legal, not sent.\n"); | 2115 | "Frame too short to be valid, not sent.\n"); |
2099 | } | 2116 | } |
2100 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) { | 2117 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) { |
2101 | QPRINTK(qdev, TX_DONE, WARNING, | 2118 | netif_warn(qdev, tx_done, qdev->ndev, |
2102 | "Frame too long, but sent anyway.\n"); | 2119 | "Frame too long, but sent anyway.\n"); |
2103 | } | 2120 | } |
2104 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) { | 2121 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) { |
2105 | QPRINTK(qdev, TX_DONE, WARNING, | 2122 | netif_warn(qdev, tx_done, qdev->ndev, |
2106 | "PCI backplane error. Frame not sent.\n"); | 2123 | "PCI backplane error. Frame not sent.\n"); |
2107 | } | 2124 | } |
2108 | } | 2125 | } |
2109 | atomic_inc(&tx_ring->tx_count); | 2126 | atomic_inc(&tx_ring->tx_count); |
@@ -2133,33 +2150,35 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev, | |||
2133 | { | 2150 | { |
2134 | switch (ib_ae_rsp->event) { | 2151 | switch (ib_ae_rsp->event) { |
2135 | case MGMT_ERR_EVENT: | 2152 | case MGMT_ERR_EVENT: |
2136 | QPRINTK(qdev, RX_ERR, ERR, | 2153 | netif_err(qdev, rx_err, qdev->ndev, |
2137 | "Management Processor Fatal Error.\n"); | 2154 | "Management Processor Fatal Error.\n"); |
2138 | ql_queue_fw_error(qdev); | 2155 | ql_queue_fw_error(qdev); |
2139 | return; | 2156 | return; |
2140 | 2157 | ||
2141 | case CAM_LOOKUP_ERR_EVENT: | 2158 | case CAM_LOOKUP_ERR_EVENT: |
2142 | QPRINTK(qdev, LINK, ERR, | 2159 | netif_err(qdev, link, qdev->ndev, |
2143 | "Multiple CAM hits lookup occurred.\n"); | 2160 | "Multiple CAM hits lookup occurred.\n"); |
2144 | QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n"); | 2161 | netif_err(qdev, drv, qdev->ndev, |
2162 | "This event shouldn't occur.\n"); | ||
2145 | ql_queue_asic_error(qdev); | 2163 | ql_queue_asic_error(qdev); |
2146 | return; | 2164 | return; |
2147 | 2165 | ||
2148 | case SOFT_ECC_ERROR_EVENT: | 2166 | case SOFT_ECC_ERROR_EVENT: |
2149 | QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n"); | 2167 | netif_err(qdev, rx_err, qdev->ndev, |
2168 | "Soft ECC error detected.\n"); | ||
2150 | ql_queue_asic_error(qdev); | 2169 | ql_queue_asic_error(qdev); |
2151 | break; | 2170 | break; |
2152 | 2171 | ||
2153 | case PCI_ERR_ANON_BUF_RD: | 2172 | case PCI_ERR_ANON_BUF_RD: |
2154 | QPRINTK(qdev, RX_ERR, ERR, | 2173 | netif_err(qdev, rx_err, qdev->ndev, |
2155 | "PCI error occurred when reading anonymous buffers from rx_ring %d.\n", | 2174 | "PCI error occurred when reading anonymous buffers from rx_ring %d.\n", |
2156 | ib_ae_rsp->q_id); | 2175 | ib_ae_rsp->q_id); |
2157 | ql_queue_asic_error(qdev); | 2176 | ql_queue_asic_error(qdev); |
2158 | break; | 2177 | break; |
2159 | 2178 | ||
2160 | default: | 2179 | default: |
2161 | QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n", | 2180 | netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n", |
2162 | ib_ae_rsp->event); | 2181 | ib_ae_rsp->event); |
2163 | ql_queue_asic_error(qdev); | 2182 | ql_queue_asic_error(qdev); |
2164 | break; | 2183 | break; |
2165 | } | 2184 | } |
@@ -2176,9 +2195,9 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) | |||
2176 | /* While there are entries in the completion queue. */ | 2195 | /* While there are entries in the completion queue. */ |
2177 | while (prod != rx_ring->cnsmr_idx) { | 2196 | while (prod != rx_ring->cnsmr_idx) { |
2178 | 2197 | ||
2179 | QPRINTK(qdev, RX_STATUS, DEBUG, | 2198 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
2180 | "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id, | 2199 | "cq_id = %d, prod = %d, cnsmr = %d.\n.", |
2181 | prod, rx_ring->cnsmr_idx); | 2200 | rx_ring->cq_id, prod, rx_ring->cnsmr_idx); |
2182 | 2201 | ||
2183 | net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; | 2202 | net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; |
2184 | rmb(); | 2203 | rmb(); |
@@ -2189,9 +2208,9 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) | |||
2189 | ql_process_mac_tx_intr(qdev, net_rsp); | 2208 | ql_process_mac_tx_intr(qdev, net_rsp); |
2190 | break; | 2209 | break; |
2191 | default: | 2210 | default: |
2192 | QPRINTK(qdev, RX_STATUS, DEBUG, | 2211 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
2193 | "Hit default case, not handled! dropping the packet, opcode = %x.\n", | 2212 | "Hit default case, not handled! dropping the packet, opcode = %x.\n", |
2194 | net_rsp->opcode); | 2213 | net_rsp->opcode); |
2195 | } | 2214 | } |
2196 | count++; | 2215 | count++; |
2197 | ql_update_cq(rx_ring); | 2216 | ql_update_cq(rx_ring); |
@@ -2223,9 +2242,9 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) | |||
2223 | /* While there are entries in the completion queue. */ | 2242 | /* While there are entries in the completion queue. */ |
2224 | while (prod != rx_ring->cnsmr_idx) { | 2243 | while (prod != rx_ring->cnsmr_idx) { |
2225 | 2244 | ||
2226 | QPRINTK(qdev, RX_STATUS, DEBUG, | 2245 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
2227 | "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id, | 2246 | "cq_id = %d, prod = %d, cnsmr = %d.\n.", |
2228 | prod, rx_ring->cnsmr_idx); | 2247 | rx_ring->cq_id, prod, rx_ring->cnsmr_idx); |
2229 | 2248 | ||
2230 | net_rsp = rx_ring->curr_entry; | 2249 | net_rsp = rx_ring->curr_entry; |
2231 | rmb(); | 2250 | rmb(); |
@@ -2241,11 +2260,10 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) | |||
2241 | net_rsp); | 2260 | net_rsp); |
2242 | break; | 2261 | break; |
2243 | default: | 2262 | default: |
2244 | { | 2263 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
2245 | QPRINTK(qdev, RX_STATUS, DEBUG, | 2264 | "Hit default case, not handled! dropping the packet, opcode = %x.\n", |
2246 | "Hit default case, not handled! dropping the packet, opcode = %x.\n", | 2265 | net_rsp->opcode); |
2247 | net_rsp->opcode); | 2266 | break; |
2248 | } | ||
2249 | } | 2267 | } |
2250 | count++; | 2268 | count++; |
2251 | ql_update_cq(rx_ring); | 2269 | ql_update_cq(rx_ring); |
@@ -2266,8 +2284,8 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget) | |||
2266 | int i, work_done = 0; | 2284 | int i, work_done = 0; |
2267 | struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id]; | 2285 | struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id]; |
2268 | 2286 | ||
2269 | QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n", | 2287 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
2270 | rx_ring->cq_id); | 2288 | "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id); |
2271 | 2289 | ||
2272 | /* Service the TX rings first. They start | 2290 | /* Service the TX rings first. They start |
2273 | * right after the RSS rings. */ | 2291 | * right after the RSS rings. */ |
@@ -2279,9 +2297,9 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget) | |||
2279 | if ((ctx->irq_mask & (1 << trx_ring->cq_id)) && | 2297 | if ((ctx->irq_mask & (1 << trx_ring->cq_id)) && |
2280 | (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) != | 2298 | (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) != |
2281 | trx_ring->cnsmr_idx)) { | 2299 | trx_ring->cnsmr_idx)) { |
2282 | QPRINTK(qdev, INTR, DEBUG, | 2300 | netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, |
2283 | "%s: Servicing TX completion ring %d.\n", | 2301 | "%s: Servicing TX completion ring %d.\n", |
2284 | __func__, trx_ring->cq_id); | 2302 | __func__, trx_ring->cq_id); |
2285 | ql_clean_outbound_rx_ring(trx_ring); | 2303 | ql_clean_outbound_rx_ring(trx_ring); |
2286 | } | 2304 | } |
2287 | } | 2305 | } |
@@ -2291,9 +2309,9 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget) | |||
2291 | */ | 2309 | */ |
2292 | if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != | 2310 | if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != |
2293 | rx_ring->cnsmr_idx) { | 2311 | rx_ring->cnsmr_idx) { |
2294 | QPRINTK(qdev, INTR, DEBUG, | 2312 | netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, |
2295 | "%s: Servicing RX completion ring %d.\n", | 2313 | "%s: Servicing RX completion ring %d.\n", |
2296 | __func__, rx_ring->cq_id); | 2314 | __func__, rx_ring->cq_id); |
2297 | work_done = ql_clean_inbound_rx_ring(rx_ring, budget); | 2315 | work_done = ql_clean_inbound_rx_ring(rx_ring, budget); |
2298 | } | 2316 | } |
2299 | 2317 | ||
@@ -2310,12 +2328,13 @@ static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *gr | |||
2310 | 2328 | ||
2311 | qdev->vlgrp = grp; | 2329 | qdev->vlgrp = grp; |
2312 | if (grp) { | 2330 | if (grp) { |
2313 | QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n"); | 2331 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
2332 | "Turning on VLAN in NIC_RCV_CFG.\n"); | ||
2314 | ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | | 2333 | ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | |
2315 | NIC_RCV_CFG_VLAN_MATCH_AND_NON); | 2334 | NIC_RCV_CFG_VLAN_MATCH_AND_NON); |
2316 | } else { | 2335 | } else { |
2317 | QPRINTK(qdev, IFUP, DEBUG, | 2336 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
2318 | "Turning off VLAN in NIC_RCV_CFG.\n"); | 2337 | "Turning off VLAN in NIC_RCV_CFG.\n"); |
2319 | ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); | 2338 | ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); |
2320 | } | 2339 | } |
2321 | } | 2340 | } |
@@ -2331,7 +2350,8 @@ static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid) | |||
2331 | return; | 2350 | return; |
2332 | if (ql_set_mac_addr_reg | 2351 | if (ql_set_mac_addr_reg |
2333 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { | 2352 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { |
2334 | QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n"); | 2353 | netif_err(qdev, ifup, qdev->ndev, |
2354 | "Failed to init vlan address.\n"); | ||
2335 | } | 2355 | } |
2336 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | 2356 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
2337 | } | 2357 | } |
@@ -2348,7 +2368,8 @@ static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) | |||
2348 | 2368 | ||
2349 | if (ql_set_mac_addr_reg | 2369 | if (ql_set_mac_addr_reg |
2350 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { | 2370 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { |
2351 | QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n"); | 2371 | netif_err(qdev, ifup, qdev->ndev, |
2372 | "Failed to clear vlan address.\n"); | ||
2352 | } | 2373 | } |
2353 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | 2374 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
2354 | 2375 | ||
@@ -2377,7 +2398,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) | |||
2377 | 2398 | ||
2378 | spin_lock(&qdev->hw_lock); | 2399 | spin_lock(&qdev->hw_lock); |
2379 | if (atomic_read(&qdev->intr_context[0].irq_cnt)) { | 2400 | if (atomic_read(&qdev->intr_context[0].irq_cnt)) { |
2380 | QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n"); | 2401 | netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, |
2402 | "Shared Interrupt, Not ours!\n"); | ||
2381 | spin_unlock(&qdev->hw_lock); | 2403 | spin_unlock(&qdev->hw_lock); |
2382 | return IRQ_NONE; | 2404 | return IRQ_NONE; |
2383 | } | 2405 | } |
@@ -2390,10 +2412,11 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) | |||
2390 | */ | 2412 | */ |
2391 | if (var & STS_FE) { | 2413 | if (var & STS_FE) { |
2392 | ql_queue_asic_error(qdev); | 2414 | ql_queue_asic_error(qdev); |
2393 | QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var); | 2415 | netif_err(qdev, intr, qdev->ndev, |
2416 | "Got fatal error, STS = %x.\n", var); | ||
2394 | var = ql_read32(qdev, ERR_STS); | 2417 | var = ql_read32(qdev, ERR_STS); |
2395 | QPRINTK(qdev, INTR, ERR, | 2418 | netif_err(qdev, intr, qdev->ndev, |
2396 | "Resetting chip. Error Status Register = 0x%x\n", var); | 2419 | "Resetting chip. Error Status Register = 0x%x\n", var); |
2397 | return IRQ_HANDLED; | 2420 | return IRQ_HANDLED; |
2398 | } | 2421 | } |
2399 | 2422 | ||
@@ -2406,7 +2429,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) | |||
2406 | * We've got an async event or mailbox completion. | 2429 | * We've got an async event or mailbox completion. |
2407 | * Handle it and clear the source of the interrupt. | 2430 | * Handle it and clear the source of the interrupt. |
2408 | */ | 2431 | */ |
2409 | QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n"); | 2432 | netif_err(qdev, intr, qdev->ndev, |
2433 | "Got MPI processor interrupt.\n"); | ||
2410 | ql_disable_completion_interrupt(qdev, intr_context->intr); | 2434 | ql_disable_completion_interrupt(qdev, intr_context->intr); |
2411 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); | 2435 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); |
2412 | queue_delayed_work_on(smp_processor_id(), | 2436 | queue_delayed_work_on(smp_processor_id(), |
@@ -2421,8 +2445,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) | |||
2421 | */ | 2445 | */ |
2422 | var = ql_read32(qdev, ISR1); | 2446 | var = ql_read32(qdev, ISR1); |
2423 | if (var & intr_context->irq_mask) { | 2447 | if (var & intr_context->irq_mask) { |
2424 | QPRINTK(qdev, INTR, INFO, | 2448 | netif_info(qdev, intr, qdev->ndev, |
2425 | "Waking handler for rx_ring[0].\n"); | 2449 | "Waking handler for rx_ring[0].\n"); |
2426 | ql_disable_completion_interrupt(qdev, intr_context->intr); | 2450 | ql_disable_completion_interrupt(qdev, intr_context->intr); |
2427 | napi_schedule(&rx_ring->napi); | 2451 | napi_schedule(&rx_ring->napi); |
2428 | work_done++; | 2452 | work_done++; |
@@ -2519,9 +2543,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) | |||
2519 | return NETDEV_TX_OK; | 2543 | return NETDEV_TX_OK; |
2520 | 2544 | ||
2521 | if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { | 2545 | if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { |
2522 | QPRINTK(qdev, TX_QUEUED, INFO, | 2546 | netif_info(qdev, tx_queued, qdev->ndev, |
2523 | "%s: shutting down tx queue %d du to lack of resources.\n", | 2547 | "%s: shutting down tx queue %d du to lack of resources.\n", |
2524 | __func__, tx_ring_idx); | 2548 | __func__, tx_ring_idx); |
2525 | netif_stop_subqueue(ndev, tx_ring->wq_id); | 2549 | netif_stop_subqueue(ndev, tx_ring->wq_id); |
2526 | atomic_inc(&tx_ring->queue_stopped); | 2550 | atomic_inc(&tx_ring->queue_stopped); |
2527 | tx_ring->tx_errors++; | 2551 | tx_ring->tx_errors++; |
@@ -2542,8 +2566,8 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) | |||
2542 | mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); | 2566 | mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); |
2543 | 2567 | ||
2544 | if (qdev->vlgrp && vlan_tx_tag_present(skb)) { | 2568 | if (qdev->vlgrp && vlan_tx_tag_present(skb)) { |
2545 | QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n", | 2569 | netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, |
2546 | vlan_tx_tag_get(skb)); | 2570 | "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb)); |
2547 | mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; | 2571 | mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; |
2548 | mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb)); | 2572 | mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb)); |
2549 | } | 2573 | } |
@@ -2557,8 +2581,8 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) | |||
2557 | } | 2581 | } |
2558 | if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != | 2582 | if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != |
2559 | NETDEV_TX_OK) { | 2583 | NETDEV_TX_OK) { |
2560 | QPRINTK(qdev, TX_QUEUED, ERR, | 2584 | netif_err(qdev, tx_queued, qdev->ndev, |
2561 | "Could not map the segments.\n"); | 2585 | "Could not map the segments.\n"); |
2562 | tx_ring->tx_errors++; | 2586 | tx_ring->tx_errors++; |
2563 | return NETDEV_TX_BUSY; | 2587 | return NETDEV_TX_BUSY; |
2564 | } | 2588 | } |
@@ -2569,8 +2593,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) | |||
2569 | wmb(); | 2593 | wmb(); |
2570 | 2594 | ||
2571 | ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); | 2595 | ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); |
2572 | QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n", | 2596 | netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, |
2573 | tx_ring->prod_idx, skb->len); | 2597 | "tx queued, slot %d, len %d\n", |
2598 | tx_ring->prod_idx, skb->len); | ||
2574 | 2599 | ||
2575 | atomic_dec(&tx_ring->tx_count); | 2600 | atomic_dec(&tx_ring->tx_count); |
2576 | return NETDEV_TX_OK; | 2601 | return NETDEV_TX_OK; |
@@ -2601,8 +2626,8 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev) | |||
2601 | pci_alloc_consistent(qdev->pdev, | 2626 | pci_alloc_consistent(qdev->pdev, |
2602 | PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma); | 2627 | PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma); |
2603 | if (qdev->rx_ring_shadow_reg_area == NULL) { | 2628 | if (qdev->rx_ring_shadow_reg_area == NULL) { |
2604 | QPRINTK(qdev, IFUP, ERR, | 2629 | netif_err(qdev, ifup, qdev->ndev, |
2605 | "Allocation of RX shadow space failed.\n"); | 2630 | "Allocation of RX shadow space failed.\n"); |
2606 | return -ENOMEM; | 2631 | return -ENOMEM; |
2607 | } | 2632 | } |
2608 | memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE); | 2633 | memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE); |
@@ -2610,8 +2635,8 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev) | |||
2610 | pci_alloc_consistent(qdev->pdev, PAGE_SIZE, | 2635 | pci_alloc_consistent(qdev->pdev, PAGE_SIZE, |
2611 | &qdev->tx_ring_shadow_reg_dma); | 2636 | &qdev->tx_ring_shadow_reg_dma); |
2612 | if (qdev->tx_ring_shadow_reg_area == NULL) { | 2637 | if (qdev->tx_ring_shadow_reg_area == NULL) { |
2613 | QPRINTK(qdev, IFUP, ERR, | 2638 | netif_err(qdev, ifup, qdev->ndev, |
2614 | "Allocation of TX shadow space failed.\n"); | 2639 | "Allocation of TX shadow space failed.\n"); |
2615 | goto err_wqp_sh_area; | 2640 | goto err_wqp_sh_area; |
2616 | } | 2641 | } |
2617 | memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE); | 2642 | memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE); |
@@ -2665,7 +2690,7 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev, | |||
2665 | 2690 | ||
2666 | if ((tx_ring->wq_base == NULL) || | 2691 | if ((tx_ring->wq_base == NULL) || |
2667 | tx_ring->wq_base_dma & WQ_ADDR_ALIGN) { | 2692 | tx_ring->wq_base_dma & WQ_ADDR_ALIGN) { |
2668 | QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n"); | 2693 | netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n"); |
2669 | return -ENOMEM; | 2694 | return -ENOMEM; |
2670 | } | 2695 | } |
2671 | tx_ring->q = | 2696 | tx_ring->q = |
@@ -2716,7 +2741,8 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring | |||
2716 | for (i = 0; i < rx_ring->sbq_len; i++) { | 2741 | for (i = 0; i < rx_ring->sbq_len; i++) { |
2717 | sbq_desc = &rx_ring->sbq[i]; | 2742 | sbq_desc = &rx_ring->sbq[i]; |
2718 | if (sbq_desc == NULL) { | 2743 | if (sbq_desc == NULL) { |
2719 | QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i); | 2744 | netif_err(qdev, ifup, qdev->ndev, |
2745 | "sbq_desc %d is NULL.\n", i); | ||
2720 | return; | 2746 | return; |
2721 | } | 2747 | } |
2722 | if (sbq_desc->p.skb) { | 2748 | if (sbq_desc->p.skb) { |
@@ -2843,7 +2869,7 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev, | |||
2843 | &rx_ring->cq_base_dma); | 2869 | &rx_ring->cq_base_dma); |
2844 | 2870 | ||
2845 | if (rx_ring->cq_base == NULL) { | 2871 | if (rx_ring->cq_base == NULL) { |
2846 | QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n"); | 2872 | netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n"); |
2847 | return -ENOMEM; | 2873 | return -ENOMEM; |
2848 | } | 2874 | } |
2849 | 2875 | ||
@@ -2856,8 +2882,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev, | |||
2856 | &rx_ring->sbq_base_dma); | 2882 | &rx_ring->sbq_base_dma); |
2857 | 2883 | ||
2858 | if (rx_ring->sbq_base == NULL) { | 2884 | if (rx_ring->sbq_base == NULL) { |
2859 | QPRINTK(qdev, IFUP, ERR, | 2885 | netif_err(qdev, ifup, qdev->ndev, |
2860 | "Small buffer queue allocation failed.\n"); | 2886 | "Small buffer queue allocation failed.\n"); |
2861 | goto err_mem; | 2887 | goto err_mem; |
2862 | } | 2888 | } |
2863 | 2889 | ||
@@ -2868,8 +2894,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev, | |||
2868 | kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc), | 2894 | kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc), |
2869 | GFP_KERNEL); | 2895 | GFP_KERNEL); |
2870 | if (rx_ring->sbq == NULL) { | 2896 | if (rx_ring->sbq == NULL) { |
2871 | QPRINTK(qdev, IFUP, ERR, | 2897 | netif_err(qdev, ifup, qdev->ndev, |
2872 | "Small buffer queue control block allocation failed.\n"); | 2898 | "Small buffer queue control block allocation failed.\n"); |
2873 | goto err_mem; | 2899 | goto err_mem; |
2874 | } | 2900 | } |
2875 | 2901 | ||
@@ -2885,8 +2911,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev, | |||
2885 | &rx_ring->lbq_base_dma); | 2911 | &rx_ring->lbq_base_dma); |
2886 | 2912 | ||
2887 | if (rx_ring->lbq_base == NULL) { | 2913 | if (rx_ring->lbq_base == NULL) { |
2888 | QPRINTK(qdev, IFUP, ERR, | 2914 | netif_err(qdev, ifup, qdev->ndev, |
2889 | "Large buffer queue allocation failed.\n"); | 2915 | "Large buffer queue allocation failed.\n"); |
2890 | goto err_mem; | 2916 | goto err_mem; |
2891 | } | 2917 | } |
2892 | /* | 2918 | /* |
@@ -2896,8 +2922,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev, | |||
2896 | kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc), | 2922 | kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc), |
2897 | GFP_KERNEL); | 2923 | GFP_KERNEL); |
2898 | if (rx_ring->lbq == NULL) { | 2924 | if (rx_ring->lbq == NULL) { |
2899 | QPRINTK(qdev, IFUP, ERR, | 2925 | netif_err(qdev, ifup, qdev->ndev, |
2900 | "Large buffer queue control block allocation failed.\n"); | 2926 | "Large buffer queue control block allocation failed.\n"); |
2901 | goto err_mem; | 2927 | goto err_mem; |
2902 | } | 2928 | } |
2903 | 2929 | ||
@@ -2926,10 +2952,10 @@ static void ql_tx_ring_clean(struct ql_adapter *qdev) | |||
2926 | for (i = 0; i < tx_ring->wq_len; i++) { | 2952 | for (i = 0; i < tx_ring->wq_len; i++) { |
2927 | tx_ring_desc = &tx_ring->q[i]; | 2953 | tx_ring_desc = &tx_ring->q[i]; |
2928 | if (tx_ring_desc && tx_ring_desc->skb) { | 2954 | if (tx_ring_desc && tx_ring_desc->skb) { |
2929 | QPRINTK(qdev, IFDOWN, ERR, | 2955 | netif_err(qdev, ifdown, qdev->ndev, |
2930 | "Freeing lost SKB %p, from queue %d, index %d.\n", | 2956 | "Freeing lost SKB %p, from queue %d, index %d.\n", |
2931 | tx_ring_desc->skb, j, | 2957 | tx_ring_desc->skb, j, |
2932 | tx_ring_desc->index); | 2958 | tx_ring_desc->index); |
2933 | ql_unmap_send(qdev, tx_ring_desc, | 2959 | ql_unmap_send(qdev, tx_ring_desc, |
2934 | tx_ring_desc->map_cnt); | 2960 | tx_ring_desc->map_cnt); |
2935 | dev_kfree_skb(tx_ring_desc->skb); | 2961 | dev_kfree_skb(tx_ring_desc->skb); |
@@ -2960,16 +2986,16 @@ static int ql_alloc_mem_resources(struct ql_adapter *qdev) | |||
2960 | 2986 | ||
2961 | for (i = 0; i < qdev->rx_ring_count; i++) { | 2987 | for (i = 0; i < qdev->rx_ring_count; i++) { |
2962 | if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { | 2988 | if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { |
2963 | QPRINTK(qdev, IFUP, ERR, | 2989 | netif_err(qdev, ifup, qdev->ndev, |
2964 | "RX resource allocation failed.\n"); | 2990 | "RX resource allocation failed.\n"); |
2965 | goto err_mem; | 2991 | goto err_mem; |
2966 | } | 2992 | } |
2967 | } | 2993 | } |
2968 | /* Allocate tx queue resources */ | 2994 | /* Allocate tx queue resources */ |
2969 | for (i = 0; i < qdev->tx_ring_count; i++) { | 2995 | for (i = 0; i < qdev->tx_ring_count; i++) { |
2970 | if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { | 2996 | if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { |
2971 | QPRINTK(qdev, IFUP, ERR, | 2997 | netif_err(qdev, ifup, qdev->ndev, |
2972 | "TX resource allocation failed.\n"); | 2998 | "TX resource allocation failed.\n"); |
2973 | goto err_mem; | 2999 | goto err_mem; |
2974 | } | 3000 | } |
2975 | } | 3001 | } |
@@ -3104,14 +3130,15 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
3104 | cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); | 3130 | cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); |
3105 | break; | 3131 | break; |
3106 | default: | 3132 | default: |
3107 | QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n", | 3133 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
3108 | rx_ring->type); | 3134 | "Invalid rx_ring->type = %d.\n", rx_ring->type); |
3109 | } | 3135 | } |
3110 | QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n"); | 3136 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
3137 | "Initializing rx work queue.\n"); | ||
3111 | err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), | 3138 | err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), |
3112 | CFG_LCQ, rx_ring->cq_id); | 3139 | CFG_LCQ, rx_ring->cq_id); |
3113 | if (err) { | 3140 | if (err) { |
3114 | QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n"); | 3141 | netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n"); |
3115 | return err; | 3142 | return err; |
3116 | } | 3143 | } |
3117 | return err; | 3144 | return err; |
@@ -3157,10 +3184,11 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) | |||
3157 | err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ, | 3184 | err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ, |
3158 | (u16) tx_ring->wq_id); | 3185 | (u16) tx_ring->wq_id); |
3159 | if (err) { | 3186 | if (err) { |
3160 | QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n"); | 3187 | netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n"); |
3161 | return err; | 3188 | return err; |
3162 | } | 3189 | } |
3163 | QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n"); | 3190 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
3191 | "Successfully loaded WQICB.\n"); | ||
3164 | return err; | 3192 | return err; |
3165 | } | 3193 | } |
3166 | 3194 | ||
@@ -3214,15 +3242,15 @@ static void ql_enable_msix(struct ql_adapter *qdev) | |||
3214 | if (err < 0) { | 3242 | if (err < 0) { |
3215 | kfree(qdev->msi_x_entry); | 3243 | kfree(qdev->msi_x_entry); |
3216 | qdev->msi_x_entry = NULL; | 3244 | qdev->msi_x_entry = NULL; |
3217 | QPRINTK(qdev, IFUP, WARNING, | 3245 | netif_warn(qdev, ifup, qdev->ndev, |
3218 | "MSI-X Enable failed, trying MSI.\n"); | 3246 | "MSI-X Enable failed, trying MSI.\n"); |
3219 | qdev->intr_count = 1; | 3247 | qdev->intr_count = 1; |
3220 | qlge_irq_type = MSI_IRQ; | 3248 | qlge_irq_type = MSI_IRQ; |
3221 | } else if (err == 0) { | 3249 | } else if (err == 0) { |
3222 | set_bit(QL_MSIX_ENABLED, &qdev->flags); | 3250 | set_bit(QL_MSIX_ENABLED, &qdev->flags); |
3223 | QPRINTK(qdev, IFUP, INFO, | 3251 | netif_info(qdev, ifup, qdev->ndev, |
3224 | "MSI-X Enabled, got %d vectors.\n", | 3252 | "MSI-X Enabled, got %d vectors.\n", |
3225 | qdev->intr_count); | 3253 | qdev->intr_count); |
3226 | return; | 3254 | return; |
3227 | } | 3255 | } |
3228 | } | 3256 | } |
@@ -3231,13 +3259,14 @@ msi: | |||
3231 | if (qlge_irq_type == MSI_IRQ) { | 3259 | if (qlge_irq_type == MSI_IRQ) { |
3232 | if (!pci_enable_msi(qdev->pdev)) { | 3260 | if (!pci_enable_msi(qdev->pdev)) { |
3233 | set_bit(QL_MSI_ENABLED, &qdev->flags); | 3261 | set_bit(QL_MSI_ENABLED, &qdev->flags); |
3234 | QPRINTK(qdev, IFUP, INFO, | 3262 | netif_info(qdev, ifup, qdev->ndev, |
3235 | "Running with MSI interrupts.\n"); | 3263 | "Running with MSI interrupts.\n"); |
3236 | return; | 3264 | return; |
3237 | } | 3265 | } |
3238 | } | 3266 | } |
3239 | qlge_irq_type = LEG_IRQ; | 3267 | qlge_irq_type = LEG_IRQ; |
3240 | QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); | 3268 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
3269 | "Running with legacy interrupts.\n"); | ||
3241 | } | 3270 | } |
3242 | 3271 | ||
3243 | /* Each vector services 1 RSS ring and and 1 or more | 3272 | /* Each vector services 1 RSS ring and and 1 or more |
@@ -3409,12 +3438,12 @@ static void ql_free_irq(struct ql_adapter *qdev) | |||
3409 | if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { | 3438 | if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { |
3410 | free_irq(qdev->msi_x_entry[i].vector, | 3439 | free_irq(qdev->msi_x_entry[i].vector, |
3411 | &qdev->rx_ring[i]); | 3440 | &qdev->rx_ring[i]); |
3412 | QPRINTK(qdev, IFDOWN, DEBUG, | 3441 | netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev, |
3413 | "freeing msix interrupt %d.\n", i); | 3442 | "freeing msix interrupt %d.\n", i); |
3414 | } else { | 3443 | } else { |
3415 | free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); | 3444 | free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); |
3416 | QPRINTK(qdev, IFDOWN, DEBUG, | 3445 | netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev, |
3417 | "freeing msi interrupt %d.\n", i); | 3446 | "freeing msi interrupt %d.\n", i); |
3418 | } | 3447 | } |
3419 | } | 3448 | } |
3420 | } | 3449 | } |
@@ -3439,32 +3468,33 @@ static int ql_request_irq(struct ql_adapter *qdev) | |||
3439 | intr_context->name, | 3468 | intr_context->name, |
3440 | &qdev->rx_ring[i]); | 3469 | &qdev->rx_ring[i]); |
3441 | if (status) { | 3470 | if (status) { |
3442 | QPRINTK(qdev, IFUP, ERR, | 3471 | netif_err(qdev, ifup, qdev->ndev, |
3443 | "Failed request for MSIX interrupt %d.\n", | 3472 | "Failed request for MSIX interrupt %d.\n", |
3444 | i); | 3473 | i); |
3445 | goto err_irq; | 3474 | goto err_irq; |
3446 | } else { | 3475 | } else { |
3447 | QPRINTK(qdev, IFUP, DEBUG, | 3476 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
3448 | "Hooked intr %d, queue type %s%s%s, with name %s.\n", | 3477 | "Hooked intr %d, queue type %s, with name %s.\n", |
3449 | i, | 3478 | i, |
3450 | qdev->rx_ring[i].type == | 3479 | qdev->rx_ring[i].type == DEFAULT_Q ? |
3451 | DEFAULT_Q ? "DEFAULT_Q" : "", | 3480 | "DEFAULT_Q" : |
3452 | qdev->rx_ring[i].type == | 3481 | qdev->rx_ring[i].type == TX_Q ? |
3453 | TX_Q ? "TX_Q" : "", | 3482 | "TX_Q" : |
3454 | qdev->rx_ring[i].type == | 3483 | qdev->rx_ring[i].type == RX_Q ? |
3455 | RX_Q ? "RX_Q" : "", intr_context->name); | 3484 | "RX_Q" : "", |
3485 | intr_context->name); | ||
3456 | } | 3486 | } |
3457 | } else { | 3487 | } else { |
3458 | QPRINTK(qdev, IFUP, DEBUG, | 3488 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
3459 | "trying msi or legacy interrupts.\n"); | 3489 | "trying msi or legacy interrupts.\n"); |
3460 | QPRINTK(qdev, IFUP, DEBUG, | 3490 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
3461 | "%s: irq = %d.\n", __func__, pdev->irq); | 3491 | "%s: irq = %d.\n", __func__, pdev->irq); |
3462 | QPRINTK(qdev, IFUP, DEBUG, | 3492 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
3463 | "%s: context->name = %s.\n", __func__, | 3493 | "%s: context->name = %s.\n", __func__, |
3464 | intr_context->name); | 3494 | intr_context->name); |
3465 | QPRINTK(qdev, IFUP, DEBUG, | 3495 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
3466 | "%s: dev_id = 0x%p.\n", __func__, | 3496 | "%s: dev_id = 0x%p.\n", __func__, |
3467 | &qdev->rx_ring[0]); | 3497 | &qdev->rx_ring[0]); |
3468 | status = | 3498 | status = |
3469 | request_irq(pdev->irq, qlge_isr, | 3499 | request_irq(pdev->irq, qlge_isr, |
3470 | test_bit(QL_MSI_ENABLED, | 3500 | test_bit(QL_MSI_ENABLED, |
@@ -3474,20 +3504,20 @@ static int ql_request_irq(struct ql_adapter *qdev) | |||
3474 | if (status) | 3504 | if (status) |
3475 | goto err_irq; | 3505 | goto err_irq; |
3476 | 3506 | ||
3477 | QPRINTK(qdev, IFUP, ERR, | 3507 | netif_err(qdev, ifup, qdev->ndev, |
3478 | "Hooked intr %d, queue type %s%s%s, with name %s.\n", | 3508 | "Hooked intr %d, queue type %s, with name %s.\n", |
3479 | i, | 3509 | i, |
3480 | qdev->rx_ring[0].type == | 3510 | qdev->rx_ring[0].type == DEFAULT_Q ? |
3481 | DEFAULT_Q ? "DEFAULT_Q" : "", | 3511 | "DEFAULT_Q" : |
3482 | qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "", | 3512 | qdev->rx_ring[0].type == TX_Q ? "TX_Q" : |
3483 | qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", | 3513 | qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", |
3484 | intr_context->name); | 3514 | intr_context->name); |
3485 | } | 3515 | } |
3486 | intr_context->hooked = 1; | 3516 | intr_context->hooked = 1; |
3487 | } | 3517 | } |
3488 | return status; | 3518 | return status; |
3489 | err_irq: | 3519 | err_irq: |
3490 | QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n"); | 3520 | netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n"); |
3491 | ql_free_irq(qdev); | 3521 | ql_free_irq(qdev); |
3492 | return status; | 3522 | return status; |
3493 | } | 3523 | } |
@@ -3521,14 +3551,15 @@ static int ql_start_rss(struct ql_adapter *qdev) | |||
3521 | memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); | 3551 | memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); |
3522 | memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); | 3552 | memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); |
3523 | 3553 | ||
3524 | QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n"); | 3554 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n"); |
3525 | 3555 | ||
3526 | status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); | 3556 | status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); |
3527 | if (status) { | 3557 | if (status) { |
3528 | QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n"); | 3558 | netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n"); |
3529 | return status; | 3559 | return status; |
3530 | } | 3560 | } |
3531 | QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n"); | 3561 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
3562 | "Successfully loaded RICB.\n"); | ||
3532 | return status; | 3563 | return status; |
3533 | } | 3564 | } |
3534 | 3565 | ||
@@ -3543,9 +3574,8 @@ static int ql_clear_routing_entries(struct ql_adapter *qdev) | |||
3543 | for (i = 0; i < 16; i++) { | 3574 | for (i = 0; i < 16; i++) { |
3544 | status = ql_set_routing_reg(qdev, i, 0, 0); | 3575 | status = ql_set_routing_reg(qdev, i, 0, 0); |
3545 | if (status) { | 3576 | if (status) { |
3546 | QPRINTK(qdev, IFUP, ERR, | 3577 | netif_err(qdev, ifup, qdev->ndev, |
3547 | "Failed to init routing register for CAM " | 3578 | "Failed to init routing register for CAM packets.\n"); |
3548 | "packets.\n"); | ||
3549 | break; | 3579 | break; |
3550 | } | 3580 | } |
3551 | } | 3581 | } |
@@ -3569,14 +3599,14 @@ static int ql_route_initialize(struct ql_adapter *qdev) | |||
3569 | 3599 | ||
3570 | status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1); | 3600 | status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1); |
3571 | if (status) { | 3601 | if (status) { |
3572 | QPRINTK(qdev, IFUP, ERR, | 3602 | netif_err(qdev, ifup, qdev->ndev, |
3573 | "Failed to init routing register for error packets.\n"); | 3603 | "Failed to init routing register for error packets.\n"); |
3574 | goto exit; | 3604 | goto exit; |
3575 | } | 3605 | } |
3576 | status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); | 3606 | status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); |
3577 | if (status) { | 3607 | if (status) { |
3578 | QPRINTK(qdev, IFUP, ERR, | 3608 | netif_err(qdev, ifup, qdev->ndev, |
3579 | "Failed to init routing register for broadcast packets.\n"); | 3609 | "Failed to init routing register for broadcast packets.\n"); |
3580 | goto exit; | 3610 | goto exit; |
3581 | } | 3611 | } |
3582 | /* If we have more than one inbound queue, then turn on RSS in the | 3612 | /* If we have more than one inbound queue, then turn on RSS in the |
@@ -3586,8 +3616,8 @@ static int ql_route_initialize(struct ql_adapter *qdev) | |||
3586 | status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, | 3616 | status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, |
3587 | RT_IDX_RSS_MATCH, 1); | 3617 | RT_IDX_RSS_MATCH, 1); |
3588 | if (status) { | 3618 | if (status) { |
3589 | QPRINTK(qdev, IFUP, ERR, | 3619 | netif_err(qdev, ifup, qdev->ndev, |
3590 | "Failed to init routing register for MATCH RSS packets.\n"); | 3620 | "Failed to init routing register for MATCH RSS packets.\n"); |
3591 | goto exit; | 3621 | goto exit; |
3592 | } | 3622 | } |
3593 | } | 3623 | } |
@@ -3595,8 +3625,8 @@ static int ql_route_initialize(struct ql_adapter *qdev) | |||
3595 | status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, | 3625 | status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, |
3596 | RT_IDX_CAM_HIT, 1); | 3626 | RT_IDX_CAM_HIT, 1); |
3597 | if (status) | 3627 | if (status) |
3598 | QPRINTK(qdev, IFUP, ERR, | 3628 | netif_err(qdev, ifup, qdev->ndev, |
3599 | "Failed to init routing register for CAM packets.\n"); | 3629 | "Failed to init routing register for CAM packets.\n"); |
3600 | exit: | 3630 | exit: |
3601 | ql_sem_unlock(qdev, SEM_RT_IDX_MASK); | 3631 | ql_sem_unlock(qdev, SEM_RT_IDX_MASK); |
3602 | return status; | 3632 | return status; |
@@ -3614,13 +3644,13 @@ int ql_cam_route_initialize(struct ql_adapter *qdev) | |||
3614 | set &= qdev->port_link_up; | 3644 | set &= qdev->port_link_up; |
3615 | status = ql_set_mac_addr(qdev, set); | 3645 | status = ql_set_mac_addr(qdev, set); |
3616 | if (status) { | 3646 | if (status) { |
3617 | QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n"); | 3647 | netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n"); |
3618 | return status; | 3648 | return status; |
3619 | } | 3649 | } |
3620 | 3650 | ||
3621 | status = ql_route_initialize(qdev); | 3651 | status = ql_route_initialize(qdev); |
3622 | if (status) | 3652 | if (status) |
3623 | QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n"); | 3653 | netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n"); |
3624 | 3654 | ||
3625 | return status; | 3655 | return status; |
3626 | } | 3656 | } |
@@ -3685,8 +3715,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) | |||
3685 | for (i = 0; i < qdev->rx_ring_count; i++) { | 3715 | for (i = 0; i < qdev->rx_ring_count; i++) { |
3686 | status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); | 3716 | status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); |
3687 | if (status) { | 3717 | if (status) { |
3688 | QPRINTK(qdev, IFUP, ERR, | 3718 | netif_err(qdev, ifup, qdev->ndev, |
3689 | "Failed to start rx ring[%d].\n", i); | 3719 | "Failed to start rx ring[%d].\n", i); |
3690 | return status; | 3720 | return status; |
3691 | } | 3721 | } |
3692 | } | 3722 | } |
@@ -3697,7 +3727,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) | |||
3697 | if (qdev->rss_ring_count > 1) { | 3727 | if (qdev->rss_ring_count > 1) { |
3698 | status = ql_start_rss(qdev); | 3728 | status = ql_start_rss(qdev); |
3699 | if (status) { | 3729 | if (status) { |
3700 | QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n"); | 3730 | netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n"); |
3701 | return status; | 3731 | return status; |
3702 | } | 3732 | } |
3703 | } | 3733 | } |
@@ -3706,8 +3736,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) | |||
3706 | for (i = 0; i < qdev->tx_ring_count; i++) { | 3736 | for (i = 0; i < qdev->tx_ring_count; i++) { |
3707 | status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); | 3737 | status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); |
3708 | if (status) { | 3738 | if (status) { |
3709 | QPRINTK(qdev, IFUP, ERR, | 3739 | netif_err(qdev, ifup, qdev->ndev, |
3710 | "Failed to start tx ring[%d].\n", i); | 3740 | "Failed to start tx ring[%d].\n", i); |
3711 | return status; | 3741 | return status; |
3712 | } | 3742 | } |
3713 | } | 3743 | } |
@@ -3715,20 +3745,20 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) | |||
3715 | /* Initialize the port and set the max framesize. */ | 3745 | /* Initialize the port and set the max framesize. */ |
3716 | status = qdev->nic_ops->port_initialize(qdev); | 3746 | status = qdev->nic_ops->port_initialize(qdev); |
3717 | if (status) | 3747 | if (status) |
3718 | QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n"); | 3748 | netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n"); |
3719 | 3749 | ||
3720 | /* Set up the MAC address and frame routing filter. */ | 3750 | /* Set up the MAC address and frame routing filter. */ |
3721 | status = ql_cam_route_initialize(qdev); | 3751 | status = ql_cam_route_initialize(qdev); |
3722 | if (status) { | 3752 | if (status) { |
3723 | QPRINTK(qdev, IFUP, ERR, | 3753 | netif_err(qdev, ifup, qdev->ndev, |
3724 | "Failed to init CAM/Routing tables.\n"); | 3754 | "Failed to init CAM/Routing tables.\n"); |
3725 | return status; | 3755 | return status; |
3726 | } | 3756 | } |
3727 | 3757 | ||
3728 | /* Start NAPI for the RSS queues. */ | 3758 | /* Start NAPI for the RSS queues. */ |
3729 | for (i = 0; i < qdev->rss_ring_count; i++) { | 3759 | for (i = 0; i < qdev->rss_ring_count; i++) { |
3730 | QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n", | 3760 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
3731 | i); | 3761 | "Enabling NAPI for rx_ring[%d].\n", i); |
3732 | napi_enable(&qdev->rx_ring[i].napi); | 3762 | napi_enable(&qdev->rx_ring[i].napi); |
3733 | } | 3763 | } |
3734 | 3764 | ||
@@ -3745,7 +3775,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev) | |||
3745 | /* Clear all the entries in the routing table. */ | 3775 | /* Clear all the entries in the routing table. */ |
3746 | status = ql_clear_routing_entries(qdev); | 3776 | status = ql_clear_routing_entries(qdev); |
3747 | if (status) { | 3777 | if (status) { |
3748 | QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n"); | 3778 | netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n"); |
3749 | return status; | 3779 | return status; |
3750 | } | 3780 | } |
3751 | 3781 | ||
@@ -3768,8 +3798,8 @@ static int ql_adapter_reset(struct ql_adapter *qdev) | |||
3768 | } while (time_before(jiffies, end_jiffies)); | 3798 | } while (time_before(jiffies, end_jiffies)); |
3769 | 3799 | ||
3770 | if (value & RST_FO_FR) { | 3800 | if (value & RST_FO_FR) { |
3771 | QPRINTK(qdev, IFDOWN, ERR, | 3801 | netif_err(qdev, ifdown, qdev->ndev, |
3772 | "ETIMEDOUT!!! errored out of resetting the chip!\n"); | 3802 | "ETIMEDOUT!!! errored out of resetting the chip!\n"); |
3773 | status = -ETIMEDOUT; | 3803 | status = -ETIMEDOUT; |
3774 | } | 3804 | } |
3775 | 3805 | ||
@@ -3782,16 +3812,17 @@ static void ql_display_dev_info(struct net_device *ndev) | |||
3782 | { | 3812 | { |
3783 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); | 3813 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); |
3784 | 3814 | ||
3785 | QPRINTK(qdev, PROBE, INFO, | 3815 | netif_info(qdev, probe, qdev->ndev, |
3786 | "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, " | 3816 | "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, " |
3787 | "XG Roll = %d, XG Rev = %d.\n", | 3817 | "XG Roll = %d, XG Rev = %d.\n", |
3788 | qdev->func, | 3818 | qdev->func, |
3789 | qdev->port, | 3819 | qdev->port, |
3790 | qdev->chip_rev_id & 0x0000000f, | 3820 | qdev->chip_rev_id & 0x0000000f, |
3791 | qdev->chip_rev_id >> 4 & 0x0000000f, | 3821 | qdev->chip_rev_id >> 4 & 0x0000000f, |
3792 | qdev->chip_rev_id >> 8 & 0x0000000f, | 3822 | qdev->chip_rev_id >> 8 & 0x0000000f, |
3793 | qdev->chip_rev_id >> 12 & 0x0000000f); | 3823 | qdev->chip_rev_id >> 12 & 0x0000000f); |
3794 | QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr); | 3824 | netif_info(qdev, probe, qdev->ndev, |
3825 | "MAC address %pM\n", ndev->dev_addr); | ||
3795 | } | 3826 | } |
3796 | 3827 | ||
3797 | int ql_wol(struct ql_adapter *qdev) | 3828 | int ql_wol(struct ql_adapter *qdev) |
@@ -3808,23 +3839,23 @@ int ql_wol(struct ql_adapter *qdev) | |||
3808 | 3839 | ||
3809 | if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | | 3840 | if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | |
3810 | WAKE_MCAST | WAKE_BCAST)) { | 3841 | WAKE_MCAST | WAKE_BCAST)) { |
3811 | QPRINTK(qdev, IFDOWN, ERR, | 3842 | netif_err(qdev, ifdown, qdev->ndev, |
3812 | "Unsupported WOL paramter. qdev->wol = 0x%x.\n", | 3843 | "Unsupported WOL paramter. qdev->wol = 0x%x.\n", |
3813 | qdev->wol); | 3844 | qdev->wol); |
3814 | return -EINVAL; | 3845 | return -EINVAL; |
3815 | } | 3846 | } |
3816 | 3847 | ||
3817 | if (qdev->wol & WAKE_MAGIC) { | 3848 | if (qdev->wol & WAKE_MAGIC) { |
3818 | status = ql_mb_wol_set_magic(qdev, 1); | 3849 | status = ql_mb_wol_set_magic(qdev, 1); |
3819 | if (status) { | 3850 | if (status) { |
3820 | QPRINTK(qdev, IFDOWN, ERR, | 3851 | netif_err(qdev, ifdown, qdev->ndev, |
3821 | "Failed to set magic packet on %s.\n", | 3852 | "Failed to set magic packet on %s.\n", |
3822 | qdev->ndev->name); | 3853 | qdev->ndev->name); |
3823 | return status; | 3854 | return status; |
3824 | } else | 3855 | } else |
3825 | QPRINTK(qdev, DRV, INFO, | 3856 | netif_info(qdev, drv, qdev->ndev, |
3826 | "Enabled magic packet successfully on %s.\n", | 3857 | "Enabled magic packet successfully on %s.\n", |
3827 | qdev->ndev->name); | 3858 | qdev->ndev->name); |
3828 | 3859 | ||
3829 | wol |= MB_WOL_MAGIC_PKT; | 3860 | wol |= MB_WOL_MAGIC_PKT; |
3830 | } | 3861 | } |
@@ -3832,9 +3863,10 @@ int ql_wol(struct ql_adapter *qdev) | |||
3832 | if (qdev->wol) { | 3863 | if (qdev->wol) { |
3833 | wol |= MB_WOL_MODE_ON; | 3864 | wol |= MB_WOL_MODE_ON; |
3834 | status = ql_mb_wol_mode(qdev, wol); | 3865 | status = ql_mb_wol_mode(qdev, wol); |
3835 | QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n", | 3866 | netif_err(qdev, drv, qdev->ndev, |
3836 | (status == 0) ? "Sucessfully set" : "Failed", wol, | 3867 | "WOL %s (wol code 0x%x) on %s\n", |
3837 | qdev->ndev->name); | 3868 | (status == 0) ? "Sucessfully set" : "Failed", |
3869 | wol, qdev->ndev->name); | ||
3838 | } | 3870 | } |
3839 | 3871 | ||
3840 | return status; | 3872 | return status; |
@@ -3875,8 +3907,8 @@ static int ql_adapter_down(struct ql_adapter *qdev) | |||
3875 | 3907 | ||
3876 | status = ql_adapter_reset(qdev); | 3908 | status = ql_adapter_reset(qdev); |
3877 | if (status) | 3909 | if (status) |
3878 | QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n", | 3910 | netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", |
3879 | qdev->func); | 3911 | qdev->func); |
3880 | return status; | 3912 | return status; |
3881 | } | 3913 | } |
3882 | 3914 | ||
@@ -3886,7 +3918,7 @@ static int ql_adapter_up(struct ql_adapter *qdev) | |||
3886 | 3918 | ||
3887 | err = ql_adapter_initialize(qdev); | 3919 | err = ql_adapter_initialize(qdev); |
3888 | if (err) { | 3920 | if (err) { |
3889 | QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n"); | 3921 | netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n"); |
3890 | goto err_init; | 3922 | goto err_init; |
3891 | } | 3923 | } |
3892 | set_bit(QL_ADAPTER_UP, &qdev->flags); | 3924 | set_bit(QL_ADAPTER_UP, &qdev->flags); |
@@ -3918,7 +3950,7 @@ static int ql_get_adapter_resources(struct ql_adapter *qdev) | |||
3918 | int status = 0; | 3950 | int status = 0; |
3919 | 3951 | ||
3920 | if (ql_alloc_mem_resources(qdev)) { | 3952 | if (ql_alloc_mem_resources(qdev)) { |
3921 | QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n"); | 3953 | netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n"); |
3922 | return -ENOMEM; | 3954 | return -ENOMEM; |
3923 | } | 3955 | } |
3924 | status = ql_request_irq(qdev); | 3956 | status = ql_request_irq(qdev); |
@@ -3934,7 +3966,7 @@ static int qlge_close(struct net_device *ndev) | |||
3934 | * brought the adapter down. | 3966 | * brought the adapter down. |
3935 | */ | 3967 | */ |
3936 | if (test_bit(QL_EEH_FATAL, &qdev->flags)) { | 3968 | if (test_bit(QL_EEH_FATAL, &qdev->flags)) { |
3937 | QPRINTK(qdev, DRV, ERR, "EEH fatal did unload.\n"); | 3969 | netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n"); |
3938 | clear_bit(QL_EEH_FATAL, &qdev->flags); | 3970 | clear_bit(QL_EEH_FATAL, &qdev->flags); |
3939 | return 0; | 3971 | return 0; |
3940 | } | 3972 | } |
@@ -4008,9 +4040,10 @@ static int ql_configure_rings(struct ql_adapter *qdev) | |||
4008 | rx_ring->lbq_size = | 4040 | rx_ring->lbq_size = |
4009 | rx_ring->lbq_len * sizeof(__le64); | 4041 | rx_ring->lbq_len * sizeof(__le64); |
4010 | rx_ring->lbq_buf_size = (u16)lbq_buf_len; | 4042 | rx_ring->lbq_buf_size = (u16)lbq_buf_len; |
4011 | QPRINTK(qdev, IFUP, DEBUG, | 4043 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
4012 | "lbq_buf_size %d, order = %d\n", | 4044 | "lbq_buf_size %d, order = %d\n", |
4013 | rx_ring->lbq_buf_size, qdev->lbq_buf_order); | 4045 | rx_ring->lbq_buf_size, |
4046 | qdev->lbq_buf_order); | ||
4014 | rx_ring->sbq_len = NUM_SMALL_BUFFERS; | 4047 | rx_ring->sbq_len = NUM_SMALL_BUFFERS; |
4015 | rx_ring->sbq_size = | 4048 | rx_ring->sbq_size = |
4016 | rx_ring->sbq_len * sizeof(__le64); | 4049 | rx_ring->sbq_len * sizeof(__le64); |
@@ -4074,14 +4107,14 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev) | |||
4074 | if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { | 4107 | if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { |
4075 | int i = 3; | 4108 | int i = 3; |
4076 | while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { | 4109 | while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { |
4077 | QPRINTK(qdev, IFUP, ERR, | 4110 | netif_err(qdev, ifup, qdev->ndev, |
4078 | "Waiting for adapter UP...\n"); | 4111 | "Waiting for adapter UP...\n"); |
4079 | ssleep(1); | 4112 | ssleep(1); |
4080 | } | 4113 | } |
4081 | 4114 | ||
4082 | if (!i) { | 4115 | if (!i) { |
4083 | QPRINTK(qdev, IFUP, ERR, | 4116 | netif_err(qdev, ifup, qdev->ndev, |
4084 | "Timed out waiting for adapter UP\n"); | 4117 | "Timed out waiting for adapter UP\n"); |
4085 | return -ETIMEDOUT; | 4118 | return -ETIMEDOUT; |
4086 | } | 4119 | } |
4087 | } | 4120 | } |
@@ -4107,8 +4140,8 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev) | |||
4107 | 4140 | ||
4108 | return status; | 4141 | return status; |
4109 | error: | 4142 | error: |
4110 | QPRINTK(qdev, IFUP, ALERT, | 4143 | netif_alert(qdev, ifup, qdev->ndev, |
4111 | "Driver up/down cycle failed, closing device.\n"); | 4144 | "Driver up/down cycle failed, closing device.\n"); |
4112 | set_bit(QL_ADAPTER_UP, &qdev->flags); | 4145 | set_bit(QL_ADAPTER_UP, &qdev->flags); |
4113 | dev_close(qdev->ndev); | 4146 | dev_close(qdev->ndev); |
4114 | return status; | 4147 | return status; |
@@ -4120,9 +4153,9 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu) | |||
4120 | int status; | 4153 | int status; |
4121 | 4154 | ||
4122 | if (ndev->mtu == 1500 && new_mtu == 9000) { | 4155 | if (ndev->mtu == 1500 && new_mtu == 9000) { |
4123 | QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n"); | 4156 | netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n"); |
4124 | } else if (ndev->mtu == 9000 && new_mtu == 1500) { | 4157 | } else if (ndev->mtu == 9000 && new_mtu == 1500) { |
4125 | QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n"); | 4158 | netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n"); |
4126 | } else | 4159 | } else |
4127 | return -EINVAL; | 4160 | return -EINVAL; |
4128 | 4161 | ||
@@ -4137,8 +4170,8 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu) | |||
4137 | 4170 | ||
4138 | status = ql_change_rx_buffers(qdev); | 4171 | status = ql_change_rx_buffers(qdev); |
4139 | if (status) { | 4172 | if (status) { |
4140 | QPRINTK(qdev, IFUP, ERR, | 4173 | netif_err(qdev, ifup, qdev->ndev, |
4141 | "Changing MTU failed.\n"); | 4174 | "Changing MTU failed.\n"); |
4142 | } | 4175 | } |
4143 | 4176 | ||
4144 | return status; | 4177 | return status; |
@@ -4198,8 +4231,8 @@ static void qlge_set_multicast_list(struct net_device *ndev) | |||
4198 | if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) { | 4231 | if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) { |
4199 | if (ql_set_routing_reg | 4232 | if (ql_set_routing_reg |
4200 | (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { | 4233 | (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { |
4201 | QPRINTK(qdev, HW, ERR, | 4234 | netif_err(qdev, hw, qdev->ndev, |
4202 | "Failed to set promiscous mode.\n"); | 4235 | "Failed to set promiscous mode.\n"); |
4203 | } else { | 4236 | } else { |
4204 | set_bit(QL_PROMISCUOUS, &qdev->flags); | 4237 | set_bit(QL_PROMISCUOUS, &qdev->flags); |
4205 | } | 4238 | } |
@@ -4208,8 +4241,8 @@ static void qlge_set_multicast_list(struct net_device *ndev) | |||
4208 | if (test_bit(QL_PROMISCUOUS, &qdev->flags)) { | 4241 | if (test_bit(QL_PROMISCUOUS, &qdev->flags)) { |
4209 | if (ql_set_routing_reg | 4242 | if (ql_set_routing_reg |
4210 | (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { | 4243 | (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { |
4211 | QPRINTK(qdev, HW, ERR, | 4244 | netif_err(qdev, hw, qdev->ndev, |
4212 | "Failed to clear promiscous mode.\n"); | 4245 | "Failed to clear promiscous mode.\n"); |
4213 | } else { | 4246 | } else { |
4214 | clear_bit(QL_PROMISCUOUS, &qdev->flags); | 4247 | clear_bit(QL_PROMISCUOUS, &qdev->flags); |
4215 | } | 4248 | } |
@@ -4225,8 +4258,8 @@ static void qlge_set_multicast_list(struct net_device *ndev) | |||
4225 | if (!test_bit(QL_ALLMULTI, &qdev->flags)) { | 4258 | if (!test_bit(QL_ALLMULTI, &qdev->flags)) { |
4226 | if (ql_set_routing_reg | 4259 | if (ql_set_routing_reg |
4227 | (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) { | 4260 | (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) { |
4228 | QPRINTK(qdev, HW, ERR, | 4261 | netif_err(qdev, hw, qdev->ndev, |
4229 | "Failed to set all-multi mode.\n"); | 4262 | "Failed to set all-multi mode.\n"); |
4230 | } else { | 4263 | } else { |
4231 | set_bit(QL_ALLMULTI, &qdev->flags); | 4264 | set_bit(QL_ALLMULTI, &qdev->flags); |
4232 | } | 4265 | } |
@@ -4235,8 +4268,8 @@ static void qlge_set_multicast_list(struct net_device *ndev) | |||
4235 | if (test_bit(QL_ALLMULTI, &qdev->flags)) { | 4268 | if (test_bit(QL_ALLMULTI, &qdev->flags)) { |
4236 | if (ql_set_routing_reg | 4269 | if (ql_set_routing_reg |
4237 | (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) { | 4270 | (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) { |
4238 | QPRINTK(qdev, HW, ERR, | 4271 | netif_err(qdev, hw, qdev->ndev, |
4239 | "Failed to clear all-multi mode.\n"); | 4272 | "Failed to clear all-multi mode.\n"); |
4240 | } else { | 4273 | } else { |
4241 | clear_bit(QL_ALLMULTI, &qdev->flags); | 4274 | clear_bit(QL_ALLMULTI, &qdev->flags); |
4242 | } | 4275 | } |
@@ -4251,16 +4284,16 @@ static void qlge_set_multicast_list(struct net_device *ndev) | |||
4251 | i++, mc_ptr = mc_ptr->next) | 4284 | i++, mc_ptr = mc_ptr->next) |
4252 | if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr, | 4285 | if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr, |
4253 | MAC_ADDR_TYPE_MULTI_MAC, i)) { | 4286 | MAC_ADDR_TYPE_MULTI_MAC, i)) { |
4254 | QPRINTK(qdev, HW, ERR, | 4287 | netif_err(qdev, hw, qdev->ndev, |
4255 | "Failed to loadmulticast address.\n"); | 4288 | "Failed to loadmulticast address.\n"); |
4256 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | 4289 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
4257 | goto exit; | 4290 | goto exit; |
4258 | } | 4291 | } |
4259 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | 4292 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
4260 | if (ql_set_routing_reg | 4293 | if (ql_set_routing_reg |
4261 | (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { | 4294 | (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { |
4262 | QPRINTK(qdev, HW, ERR, | 4295 | netif_err(qdev, hw, qdev->ndev, |
4263 | "Failed to set multicast match mode.\n"); | 4296 | "Failed to set multicast match mode.\n"); |
4264 | } else { | 4297 | } else { |
4265 | set_bit(QL_ALLMULTI, &qdev->flags); | 4298 | set_bit(QL_ALLMULTI, &qdev->flags); |
4266 | } | 4299 | } |
@@ -4285,7 +4318,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) | |||
4285 | status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, | 4318 | status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, |
4286 | MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); | 4319 | MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); |
4287 | if (status) | 4320 | if (status) |
4288 | QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); | 4321 | netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n"); |
4289 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | 4322 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
4290 | return status; | 4323 | return status; |
4291 | } | 4324 | } |
@@ -4318,8 +4351,8 @@ static void ql_asic_reset_work(struct work_struct *work) | |||
4318 | rtnl_unlock(); | 4351 | rtnl_unlock(); |
4319 | return; | 4352 | return; |
4320 | error: | 4353 | error: |
4321 | QPRINTK(qdev, IFUP, ALERT, | 4354 | netif_alert(qdev, ifup, qdev->ndev, |
4322 | "Driver up/down cycle failed, closing device\n"); | 4355 | "Driver up/down cycle failed, closing device\n"); |
4323 | 4356 | ||
4324 | set_bit(QL_ADAPTER_UP, &qdev->flags); | 4357 | set_bit(QL_ADAPTER_UP, &qdev->flags); |
4325 | dev_close(qdev->ndev); | 4358 | dev_close(qdev->ndev); |
@@ -4578,7 +4611,7 @@ static void ql_timer(unsigned long data) | |||
4578 | 4611 | ||
4579 | var = ql_read32(qdev, STS); | 4612 | var = ql_read32(qdev, STS); |
4580 | if (pci_channel_offline(qdev->pdev)) { | 4613 | if (pci_channel_offline(qdev->pdev)) { |
4581 | QPRINTK(qdev, IFUP, ERR, "EEH STS = 0x%.08x.\n", var); | 4614 | netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var); |
4582 | return; | 4615 | return; |
4583 | } | 4616 | } |
4584 | 4617 | ||
@@ -4747,14 +4780,14 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev) | |||
4747 | 4780 | ||
4748 | pci_restore_state(pdev); | 4781 | pci_restore_state(pdev); |
4749 | if (pci_enable_device(pdev)) { | 4782 | if (pci_enable_device(pdev)) { |
4750 | QPRINTK(qdev, IFUP, ERR, | 4783 | netif_err(qdev, ifup, qdev->ndev, |
4751 | "Cannot re-enable PCI device after reset.\n"); | 4784 | "Cannot re-enable PCI device after reset.\n"); |
4752 | return PCI_ERS_RESULT_DISCONNECT; | 4785 | return PCI_ERS_RESULT_DISCONNECT; |
4753 | } | 4786 | } |
4754 | pci_set_master(pdev); | 4787 | pci_set_master(pdev); |
4755 | 4788 | ||
4756 | if (ql_adapter_reset(qdev)) { | 4789 | if (ql_adapter_reset(qdev)) { |
4757 | QPRINTK(qdev, DRV, ERR, "reset FAILED!\n"); | 4790 | netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n"); |
4758 | set_bit(QL_EEH_FATAL, &qdev->flags); | 4791 | set_bit(QL_EEH_FATAL, &qdev->flags); |
4759 | return PCI_ERS_RESULT_DISCONNECT; | 4792 | return PCI_ERS_RESULT_DISCONNECT; |
4760 | } | 4793 | } |
@@ -4771,13 +4804,13 @@ static void qlge_io_resume(struct pci_dev *pdev) | |||
4771 | if (netif_running(ndev)) { | 4804 | if (netif_running(ndev)) { |
4772 | err = qlge_open(ndev); | 4805 | err = qlge_open(ndev); |
4773 | if (err) { | 4806 | if (err) { |
4774 | QPRINTK(qdev, IFUP, ERR, | 4807 | netif_err(qdev, ifup, qdev->ndev, |
4775 | "Device initialization failed after reset.\n"); | 4808 | "Device initialization failed after reset.\n"); |
4776 | return; | 4809 | return; |
4777 | } | 4810 | } |
4778 | } else { | 4811 | } else { |
4779 | QPRINTK(qdev, IFUP, ERR, | 4812 | netif_err(qdev, ifup, qdev->ndev, |
4780 | "Device was not running prior to EEH.\n"); | 4813 | "Device was not running prior to EEH.\n"); |
4781 | } | 4814 | } |
4782 | qdev->timer.expires = jiffies + (5*HZ); | 4815 | qdev->timer.expires = jiffies + (5*HZ); |
4783 | add_timer(&qdev->timer); | 4816 | add_timer(&qdev->timer); |
@@ -4828,7 +4861,7 @@ static int qlge_resume(struct pci_dev *pdev) | |||
4828 | pci_restore_state(pdev); | 4861 | pci_restore_state(pdev); |
4829 | err = pci_enable_device(pdev); | 4862 | err = pci_enable_device(pdev); |
4830 | if (err) { | 4863 | if (err) { |
4831 | QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n"); | 4864 | netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n"); |
4832 | return err; | 4865 | return err; |
4833 | } | 4866 | } |
4834 | pci_set_master(pdev); | 4867 | pci_set_master(pdev); |
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c index e2c846f17fc7..3c00462a5d22 100644 --- a/drivers/net/qlge/qlge_mpi.c +++ b/drivers/net/qlge/qlge_mpi.c | |||
@@ -135,7 +135,7 @@ static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) | |||
135 | ql_read_mpi_reg(qdev, qdev->mailbox_out + i, | 135 | ql_read_mpi_reg(qdev, qdev->mailbox_out + i, |
136 | &mbcp->mbox_out[i]); | 136 | &mbcp->mbox_out[i]); |
137 | if (status) { | 137 | if (status) { |
138 | QPRINTK(qdev, DRV, ERR, "Failed mailbox read.\n"); | 138 | netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n"); |
139 | break; | 139 | break; |
140 | } | 140 | } |
141 | } | 141 | } |
@@ -208,7 +208,7 @@ static int ql_idc_req_aen(struct ql_adapter *qdev) | |||
208 | int status; | 208 | int status; |
209 | struct mbox_params *mbcp = &qdev->idc_mbc; | 209 | struct mbox_params *mbcp = &qdev->idc_mbc; |
210 | 210 | ||
211 | QPRINTK(qdev, DRV, ERR, "Enter!\n"); | 211 | netif_err(qdev, drv, qdev->ndev, "Enter!\n"); |
212 | /* Get the status data and start up a thread to | 212 | /* Get the status data and start up a thread to |
213 | * handle the request. | 213 | * handle the request. |
214 | */ | 214 | */ |
@@ -216,8 +216,8 @@ static int ql_idc_req_aen(struct ql_adapter *qdev) | |||
216 | mbcp->out_count = 4; | 216 | mbcp->out_count = 4; |
217 | status = ql_get_mb_sts(qdev, mbcp); | 217 | status = ql_get_mb_sts(qdev, mbcp); |
218 | if (status) { | 218 | if (status) { |
219 | QPRINTK(qdev, DRV, ERR, | 219 | netif_err(qdev, drv, qdev->ndev, |
220 | "Could not read MPI, resetting ASIC!\n"); | 220 | "Could not read MPI, resetting ASIC!\n"); |
221 | ql_queue_asic_error(qdev); | 221 | ql_queue_asic_error(qdev); |
222 | } else { | 222 | } else { |
223 | /* Begin polled mode early so | 223 | /* Begin polled mode early so |
@@ -240,8 +240,8 @@ static int ql_idc_cmplt_aen(struct ql_adapter *qdev) | |||
240 | mbcp->out_count = 4; | 240 | mbcp->out_count = 4; |
241 | status = ql_get_mb_sts(qdev, mbcp); | 241 | status = ql_get_mb_sts(qdev, mbcp); |
242 | if (status) { | 242 | if (status) { |
243 | QPRINTK(qdev, DRV, ERR, | 243 | netif_err(qdev, drv, qdev->ndev, |
244 | "Could not read MPI, resetting RISC!\n"); | 244 | "Could not read MPI, resetting RISC!\n"); |
245 | ql_queue_fw_error(qdev); | 245 | ql_queue_fw_error(qdev); |
246 | } else | 246 | } else |
247 | /* Wake up the sleeping mpi_idc_work thread that is | 247 | /* Wake up the sleeping mpi_idc_work thread that is |
@@ -259,13 +259,13 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp) | |||
259 | 259 | ||
260 | status = ql_get_mb_sts(qdev, mbcp); | 260 | status = ql_get_mb_sts(qdev, mbcp); |
261 | if (status) { | 261 | if (status) { |
262 | QPRINTK(qdev, DRV, ERR, | 262 | netif_err(qdev, drv, qdev->ndev, |
263 | "%s: Could not get mailbox status.\n", __func__); | 263 | "%s: Could not get mailbox status.\n", __func__); |
264 | return; | 264 | return; |
265 | } | 265 | } |
266 | 266 | ||
267 | qdev->link_status = mbcp->mbox_out[1]; | 267 | qdev->link_status = mbcp->mbox_out[1]; |
268 | QPRINTK(qdev, DRV, ERR, "Link Up.\n"); | 268 | netif_err(qdev, drv, qdev->ndev, "Link Up.\n"); |
269 | 269 | ||
270 | /* If we're coming back from an IDC event | 270 | /* If we're coming back from an IDC event |
271 | * then set up the CAM and frame routing. | 271 | * then set up the CAM and frame routing. |
@@ -273,8 +273,8 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp) | |||
273 | if (test_bit(QL_CAM_RT_SET, &qdev->flags)) { | 273 | if (test_bit(QL_CAM_RT_SET, &qdev->flags)) { |
274 | status = ql_cam_route_initialize(qdev); | 274 | status = ql_cam_route_initialize(qdev); |
275 | if (status) { | 275 | if (status) { |
276 | QPRINTK(qdev, IFUP, ERR, | 276 | netif_err(qdev, ifup, qdev->ndev, |
277 | "Failed to init CAM/Routing tables.\n"); | 277 | "Failed to init CAM/Routing tables.\n"); |
278 | return; | 278 | return; |
279 | } else | 279 | } else |
280 | clear_bit(QL_CAM_RT_SET, &qdev->flags); | 280 | clear_bit(QL_CAM_RT_SET, &qdev->flags); |
@@ -285,7 +285,7 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp) | |||
285 | * to our liking. | 285 | * to our liking. |
286 | */ | 286 | */ |
287 | if (!test_bit(QL_PORT_CFG, &qdev->flags)) { | 287 | if (!test_bit(QL_PORT_CFG, &qdev->flags)) { |
288 | QPRINTK(qdev, DRV, ERR, "Queue Port Config Worker!\n"); | 288 | netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n"); |
289 | set_bit(QL_PORT_CFG, &qdev->flags); | 289 | set_bit(QL_PORT_CFG, &qdev->flags); |
290 | /* Begin polled mode early so | 290 | /* Begin polled mode early so |
291 | * we don't get another interrupt | 291 | * we don't get another interrupt |
@@ -307,7 +307,7 @@ static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp) | |||
307 | 307 | ||
308 | status = ql_get_mb_sts(qdev, mbcp); | 308 | status = ql_get_mb_sts(qdev, mbcp); |
309 | if (status) | 309 | if (status) |
310 | QPRINTK(qdev, DRV, ERR, "Link down AEN broken!\n"); | 310 | netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n"); |
311 | 311 | ||
312 | ql_link_off(qdev); | 312 | ql_link_off(qdev); |
313 | } | 313 | } |
@@ -320,9 +320,9 @@ static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp) | |||
320 | 320 | ||
321 | status = ql_get_mb_sts(qdev, mbcp); | 321 | status = ql_get_mb_sts(qdev, mbcp); |
322 | if (status) | 322 | if (status) |
323 | QPRINTK(qdev, DRV, ERR, "SFP in AEN broken!\n"); | 323 | netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n"); |
324 | else | 324 | else |
325 | QPRINTK(qdev, DRV, ERR, "SFP insertion detected.\n"); | 325 | netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n"); |
326 | 326 | ||
327 | return status; | 327 | return status; |
328 | } | 328 | } |
@@ -335,9 +335,9 @@ static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp) | |||
335 | 335 | ||
336 | status = ql_get_mb_sts(qdev, mbcp); | 336 | status = ql_get_mb_sts(qdev, mbcp); |
337 | if (status) | 337 | if (status) |
338 | QPRINTK(qdev, DRV, ERR, "SFP out AEN broken!\n"); | 338 | netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n"); |
339 | else | 339 | else |
340 | QPRINTK(qdev, DRV, ERR, "SFP removal detected.\n"); | 340 | netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n"); |
341 | 341 | ||
342 | return status; | 342 | return status; |
343 | } | 343 | } |
@@ -350,13 +350,13 @@ static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp) | |||
350 | 350 | ||
351 | status = ql_get_mb_sts(qdev, mbcp); | 351 | status = ql_get_mb_sts(qdev, mbcp); |
352 | if (status) | 352 | if (status) |
353 | QPRINTK(qdev, DRV, ERR, "Lost AEN broken!\n"); | 353 | netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n"); |
354 | else { | 354 | else { |
355 | int i; | 355 | int i; |
356 | QPRINTK(qdev, DRV, ERR, "Lost AEN detected.\n"); | 356 | netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n"); |
357 | for (i = 0; i < mbcp->out_count; i++) | 357 | for (i = 0; i < mbcp->out_count; i++) |
358 | QPRINTK(qdev, DRV, ERR, "mbox_out[%d] = 0x%.08x.\n", | 358 | netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n", |
359 | i, mbcp->mbox_out[i]); | 359 | i, mbcp->mbox_out[i]); |
360 | 360 | ||
361 | } | 361 | } |
362 | 362 | ||
@@ -371,15 +371,15 @@ static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp) | |||
371 | 371 | ||
372 | status = ql_get_mb_sts(qdev, mbcp); | 372 | status = ql_get_mb_sts(qdev, mbcp); |
373 | if (status) { | 373 | if (status) { |
374 | QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n"); | 374 | netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n"); |
375 | } else { | 375 | } else { |
376 | QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n", | 376 | netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n", |
377 | mbcp->mbox_out[1]); | 377 | mbcp->mbox_out[1]); |
378 | qdev->fw_rev_id = mbcp->mbox_out[1]; | 378 | qdev->fw_rev_id = mbcp->mbox_out[1]; |
379 | status = ql_cam_route_initialize(qdev); | 379 | status = ql_cam_route_initialize(qdev); |
380 | if (status) | 380 | if (status) |
381 | QPRINTK(qdev, IFUP, ERR, | 381 | netif_err(qdev, ifup, qdev->ndev, |
382 | "Failed to init CAM/Routing tables.\n"); | 382 | "Failed to init CAM/Routing tables.\n"); |
383 | } | 383 | } |
384 | } | 384 | } |
385 | 385 | ||
@@ -398,8 +398,8 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) | |||
398 | mbcp->out_count = 1; | 398 | mbcp->out_count = 1; |
399 | status = ql_get_mb_sts(qdev, mbcp); | 399 | status = ql_get_mb_sts(qdev, mbcp); |
400 | if (status) { | 400 | if (status) { |
401 | QPRINTK(qdev, DRV, ERR, | 401 | netif_err(qdev, drv, qdev->ndev, |
402 | "Could not read MPI, resetting ASIC!\n"); | 402 | "Could not read MPI, resetting ASIC!\n"); |
403 | ql_queue_asic_error(qdev); | 403 | ql_queue_asic_error(qdev); |
404 | goto end; | 404 | goto end; |
405 | } | 405 | } |
@@ -488,15 +488,14 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) | |||
488 | mbcp->mbox_out[0] = MB_CMD_STS_ERR; | 488 | mbcp->mbox_out[0] = MB_CMD_STS_ERR; |
489 | return status; | 489 | return status; |
490 | } | 490 | } |
491 | QPRINTK(qdev, DRV, ERR, | 491 | netif_err(qdev, drv, qdev->ndev, |
492 | "Firmware initialization failed.\n"); | 492 | "Firmware initialization failed.\n"); |
493 | status = -EIO; | 493 | status = -EIO; |
494 | ql_queue_fw_error(qdev); | 494 | ql_queue_fw_error(qdev); |
495 | break; | 495 | break; |
496 | 496 | ||
497 | case AEN_SYS_ERR: | 497 | case AEN_SYS_ERR: |
498 | QPRINTK(qdev, DRV, ERR, | 498 | netif_err(qdev, drv, qdev->ndev, "System Error.\n"); |
499 | "System Error.\n"); | ||
500 | ql_queue_fw_error(qdev); | 499 | ql_queue_fw_error(qdev); |
501 | status = -EIO; | 500 | status = -EIO; |
502 | break; | 501 | break; |
@@ -509,8 +508,8 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) | |||
509 | /* Need to support AEN 8110 */ | 508 | /* Need to support AEN 8110 */ |
510 | break; | 509 | break; |
511 | default: | 510 | default: |
512 | QPRINTK(qdev, DRV, ERR, | 511 | netif_err(qdev, drv, qdev->ndev, |
513 | "Unsupported AE %.08x.\n", mbcp->mbox_out[0]); | 512 | "Unsupported AE %.08x.\n", mbcp->mbox_out[0]); |
514 | /* Clear the MPI firmware status. */ | 513 | /* Clear the MPI firmware status. */ |
515 | } | 514 | } |
516 | end: | 515 | end: |
@@ -583,8 +582,8 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) | |||
583 | goto done; | 582 | goto done; |
584 | } while (time_before(jiffies, count)); | 583 | } while (time_before(jiffies, count)); |
585 | 584 | ||
586 | QPRINTK(qdev, DRV, ERR, | 585 | netif_err(qdev, drv, qdev->ndev, |
587 | "Timed out waiting for mailbox complete.\n"); | 586 | "Timed out waiting for mailbox complete.\n"); |
588 | status = -ETIMEDOUT; | 587 | status = -ETIMEDOUT; |
589 | goto end; | 588 | goto end; |
590 | 589 | ||
@@ -646,8 +645,8 @@ int ql_mb_about_fw(struct ql_adapter *qdev) | |||
646 | return status; | 645 | return status; |
647 | 646 | ||
648 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { | 647 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { |
649 | QPRINTK(qdev, DRV, ERR, | 648 | netif_err(qdev, drv, qdev->ndev, |
650 | "Failed about firmware command\n"); | 649 | "Failed about firmware command\n"); |
651 | status = -EIO; | 650 | status = -EIO; |
652 | } | 651 | } |
653 | 652 | ||
@@ -678,8 +677,8 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev) | |||
678 | return status; | 677 | return status; |
679 | 678 | ||
680 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { | 679 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { |
681 | QPRINTK(qdev, DRV, ERR, | 680 | netif_err(qdev, drv, qdev->ndev, |
682 | "Failed Get Firmware State.\n"); | 681 | "Failed Get Firmware State.\n"); |
683 | status = -EIO; | 682 | status = -EIO; |
684 | } | 683 | } |
685 | 684 | ||
@@ -688,8 +687,8 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev) | |||
688 | * happen. | 687 | * happen. |
689 | */ | 688 | */ |
690 | if (mbcp->mbox_out[1] & 1) { | 689 | if (mbcp->mbox_out[1] & 1) { |
691 | QPRINTK(qdev, DRV, ERR, | 690 | netif_err(qdev, drv, qdev->ndev, |
692 | "Firmware waiting for initialization.\n"); | 691 | "Firmware waiting for initialization.\n"); |
693 | status = -EIO; | 692 | status = -EIO; |
694 | } | 693 | } |
695 | 694 | ||
@@ -721,8 +720,7 @@ int ql_mb_idc_ack(struct ql_adapter *qdev) | |||
721 | return status; | 720 | return status; |
722 | 721 | ||
723 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { | 722 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { |
724 | QPRINTK(qdev, DRV, ERR, | 723 | netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n"); |
725 | "Failed IDC ACK send.\n"); | ||
726 | status = -EIO; | 724 | status = -EIO; |
727 | } | 725 | } |
728 | return status; | 726 | return status; |
@@ -753,11 +751,11 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev) | |||
753 | return status; | 751 | return status; |
754 | 752 | ||
755 | if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) { | 753 | if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) { |
756 | QPRINTK(qdev, DRV, ERR, | 754 | netif_err(qdev, drv, qdev->ndev, |
757 | "Port Config sent, wait for IDC.\n"); | 755 | "Port Config sent, wait for IDC.\n"); |
758 | } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { | 756 | } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { |
759 | QPRINTK(qdev, DRV, ERR, | 757 | netif_err(qdev, drv, qdev->ndev, |
760 | "Failed Set Port Configuration.\n"); | 758 | "Failed Set Port Configuration.\n"); |
761 | status = -EIO; | 759 | status = -EIO; |
762 | } | 760 | } |
763 | return status; | 761 | return status; |
@@ -791,8 +789,7 @@ int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr, | |||
791 | return status; | 789 | return status; |
792 | 790 | ||
793 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { | 791 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { |
794 | QPRINTK(qdev, DRV, ERR, | 792 | netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n"); |
795 | "Failed to dump risc RAM.\n"); | ||
796 | status = -EIO; | 793 | status = -EIO; |
797 | } | 794 | } |
798 | return status; | 795 | return status; |
@@ -842,12 +839,12 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev) | |||
842 | return status; | 839 | return status; |
843 | 840 | ||
844 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { | 841 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { |
845 | QPRINTK(qdev, DRV, ERR, | 842 | netif_err(qdev, drv, qdev->ndev, |
846 | "Failed Get Port Configuration.\n"); | 843 | "Failed Get Port Configuration.\n"); |
847 | status = -EIO; | 844 | status = -EIO; |
848 | } else { | 845 | } else { |
849 | QPRINTK(qdev, DRV, DEBUG, | 846 | netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, |
850 | "Passed Get Port Configuration.\n"); | 847 | "Passed Get Port Configuration.\n"); |
851 | qdev->link_config = mbcp->mbox_out[1]; | 848 | qdev->link_config = mbcp->mbox_out[1]; |
852 | qdev->max_frame_size = mbcp->mbox_out[2]; | 849 | qdev->max_frame_size = mbcp->mbox_out[2]; |
853 | } | 850 | } |
@@ -874,8 +871,7 @@ int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol) | |||
874 | return status; | 871 | return status; |
875 | 872 | ||
876 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { | 873 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { |
877 | QPRINTK(qdev, DRV, ERR, | 874 | netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n"); |
878 | "Failed to set WOL mode.\n"); | ||
879 | status = -EIO; | 875 | status = -EIO; |
880 | } | 876 | } |
881 | return status; | 877 | return status; |
@@ -917,8 +913,7 @@ int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol) | |||
917 | return status; | 913 | return status; |
918 | 914 | ||
919 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { | 915 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { |
920 | QPRINTK(qdev, DRV, ERR, | 916 | netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n"); |
921 | "Failed to set WOL mode.\n"); | ||
922 | status = -EIO; | 917 | status = -EIO; |
923 | } | 918 | } |
924 | return status; | 919 | return status; |
@@ -944,8 +939,7 @@ static int ql_idc_wait(struct ql_adapter *qdev) | |||
944 | wait_for_completion_timeout(&qdev->ide_completion, | 939 | wait_for_completion_timeout(&qdev->ide_completion, |
945 | wait_time); | 940 | wait_time); |
946 | if (!wait_time) { | 941 | if (!wait_time) { |
947 | QPRINTK(qdev, DRV, ERR, | 942 | netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n"); |
948 | "IDC Timeout.\n"); | ||
949 | break; | 943 | break; |
950 | } | 944 | } |
951 | /* Now examine the response from the IDC process. | 945 | /* Now examine the response from the IDC process. |
@@ -953,18 +947,17 @@ static int ql_idc_wait(struct ql_adapter *qdev) | |||
953 | * more wait time. | 947 | * more wait time. |
954 | */ | 948 | */ |
955 | if (mbcp->mbox_out[0] == AEN_IDC_EXT) { | 949 | if (mbcp->mbox_out[0] == AEN_IDC_EXT) { |
956 | QPRINTK(qdev, DRV, ERR, | 950 | netif_err(qdev, drv, qdev->ndev, |
957 | "IDC Time Extension from function.\n"); | 951 | "IDC Time Extension from function.\n"); |
958 | wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f; | 952 | wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f; |
959 | } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) { | 953 | } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) { |
960 | QPRINTK(qdev, DRV, ERR, | 954 | netif_err(qdev, drv, qdev->ndev, "IDC Success.\n"); |
961 | "IDC Success.\n"); | ||
962 | status = 0; | 955 | status = 0; |
963 | break; | 956 | break; |
964 | } else { | 957 | } else { |
965 | QPRINTK(qdev, DRV, ERR, | 958 | netif_err(qdev, drv, qdev->ndev, |
966 | "IDC: Invalid State 0x%.04x.\n", | 959 | "IDC: Invalid State 0x%.04x.\n", |
967 | mbcp->mbox_out[0]); | 960 | mbcp->mbox_out[0]); |
968 | status = -EIO; | 961 | status = -EIO; |
969 | break; | 962 | break; |
970 | } | 963 | } |
@@ -993,8 +986,8 @@ int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config) | |||
993 | return status; | 986 | return status; |
994 | 987 | ||
995 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { | 988 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { |
996 | QPRINTK(qdev, DRV, ERR, | 989 | netif_err(qdev, drv, qdev->ndev, |
997 | "Failed to set LED Configuration.\n"); | 990 | "Failed to set LED Configuration.\n"); |
998 | status = -EIO; | 991 | status = -EIO; |
999 | } | 992 | } |
1000 | 993 | ||
@@ -1019,8 +1012,8 @@ int ql_mb_get_led_cfg(struct ql_adapter *qdev) | |||
1019 | return status; | 1012 | return status; |
1020 | 1013 | ||
1021 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { | 1014 | if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { |
1022 | QPRINTK(qdev, DRV, ERR, | 1015 | netif_err(qdev, drv, qdev->ndev, |
1023 | "Failed to get LED Configuration.\n"); | 1016 | "Failed to get LED Configuration.\n"); |
1024 | status = -EIO; | 1017 | status = -EIO; |
1025 | } else | 1018 | } else |
1026 | qdev->led_config = mbcp->mbox_out[1]; | 1019 | qdev->led_config = mbcp->mbox_out[1]; |
@@ -1050,16 +1043,16 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control) | |||
1050 | return status; | 1043 | return status; |
1051 | 1044 | ||
1052 | if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { | 1045 | if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { |
1053 | QPRINTK(qdev, DRV, ERR, | 1046 | netif_err(qdev, drv, qdev->ndev, |
1054 | "Command not supported by firmware.\n"); | 1047 | "Command not supported by firmware.\n"); |
1055 | status = -EINVAL; | 1048 | status = -EINVAL; |
1056 | } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { | 1049 | } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { |
1057 | /* This indicates that the firmware is | 1050 | /* This indicates that the firmware is |
1058 | * already in the state we are trying to | 1051 | * already in the state we are trying to |
1059 | * change it to. | 1052 | * change it to. |
1060 | */ | 1053 | */ |
1061 | QPRINTK(qdev, DRV, ERR, | 1054 | netif_err(qdev, drv, qdev->ndev, |
1062 | "Command parameters make no change.\n"); | 1055 | "Command parameters make no change.\n"); |
1063 | } | 1056 | } |
1064 | return status; | 1057 | return status; |
1065 | } | 1058 | } |
@@ -1089,12 +1082,12 @@ static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control) | |||
1089 | } | 1082 | } |
1090 | 1083 | ||
1091 | if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { | 1084 | if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { |
1092 | QPRINTK(qdev, DRV, ERR, | 1085 | netif_err(qdev, drv, qdev->ndev, |
1093 | "Command not supported by firmware.\n"); | 1086 | "Command not supported by firmware.\n"); |
1094 | status = -EINVAL; | 1087 | status = -EINVAL; |
1095 | } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { | 1088 | } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { |
1096 | QPRINTK(qdev, DRV, ERR, | 1089 | netif_err(qdev, drv, qdev->ndev, |
1097 | "Failed to get MPI traffic control.\n"); | 1090 | "Failed to get MPI traffic control.\n"); |
1098 | status = -EIO; | 1091 | status = -EIO; |
1099 | } | 1092 | } |
1100 | return status; | 1093 | return status; |
@@ -1150,8 +1143,8 @@ void ql_mpi_port_cfg_work(struct work_struct *work) | |||
1150 | status = ql_mb_get_port_cfg(qdev); | 1143 | status = ql_mb_get_port_cfg(qdev); |
1151 | rtnl_unlock(); | 1144 | rtnl_unlock(); |
1152 | if (status) { | 1145 | if (status) { |
1153 | QPRINTK(qdev, DRV, ERR, | 1146 | netif_err(qdev, drv, qdev->ndev, |
1154 | "Bug: Failed to get port config data.\n"); | 1147 | "Bug: Failed to get port config data.\n"); |
1155 | goto err; | 1148 | goto err; |
1156 | } | 1149 | } |
1157 | 1150 | ||
@@ -1164,8 +1157,8 @@ void ql_mpi_port_cfg_work(struct work_struct *work) | |||
1164 | qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE; | 1157 | qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE; |
1165 | status = ql_set_port_cfg(qdev); | 1158 | status = ql_set_port_cfg(qdev); |
1166 | if (status) { | 1159 | if (status) { |
1167 | QPRINTK(qdev, DRV, ERR, | 1160 | netif_err(qdev, drv, qdev->ndev, |
1168 | "Bug: Failed to set port config data.\n"); | 1161 | "Bug: Failed to set port config data.\n"); |
1169 | goto err; | 1162 | goto err; |
1170 | } | 1163 | } |
1171 | end: | 1164 | end: |
@@ -1197,8 +1190,8 @@ void ql_mpi_idc_work(struct work_struct *work) | |||
1197 | 1190 | ||
1198 | switch (aen) { | 1191 | switch (aen) { |
1199 | default: | 1192 | default: |
1200 | QPRINTK(qdev, DRV, ERR, | 1193 | netif_err(qdev, drv, qdev->ndev, |
1201 | "Bug: Unhandled IDC action.\n"); | 1194 | "Bug: Unhandled IDC action.\n"); |
1202 | break; | 1195 | break; |
1203 | case MB_CMD_PORT_RESET: | 1196 | case MB_CMD_PORT_RESET: |
1204 | case MB_CMD_STOP_FW: | 1197 | case MB_CMD_STOP_FW: |
@@ -1213,11 +1206,11 @@ void ql_mpi_idc_work(struct work_struct *work) | |||
1213 | if (timeout) { | 1206 | if (timeout) { |
1214 | status = ql_mb_idc_ack(qdev); | 1207 | status = ql_mb_idc_ack(qdev); |
1215 | if (status) | 1208 | if (status) |
1216 | QPRINTK(qdev, DRV, ERR, | 1209 | netif_err(qdev, drv, qdev->ndev, |
1217 | "Bug: No pending IDC!\n"); | 1210 | "Bug: No pending IDC!\n"); |
1218 | } else { | 1211 | } else { |
1219 | QPRINTK(qdev, DRV, DEBUG, | 1212 | netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, |
1220 | "IDC ACK not required\n"); | 1213 | "IDC ACK not required\n"); |
1221 | status = 0; /* success */ | 1214 | status = 0; /* success */ |
1222 | } | 1215 | } |
1223 | break; | 1216 | break; |
@@ -1246,11 +1239,11 @@ void ql_mpi_idc_work(struct work_struct *work) | |||
1246 | if (timeout) { | 1239 | if (timeout) { |
1247 | status = ql_mb_idc_ack(qdev); | 1240 | status = ql_mb_idc_ack(qdev); |
1248 | if (status) | 1241 | if (status) |
1249 | QPRINTK(qdev, DRV, ERR, | 1242 | netif_err(qdev, drv, qdev->ndev, |
1250 | "Bug: No pending IDC!\n"); | 1243 | "Bug: No pending IDC!\n"); |
1251 | } else { | 1244 | } else { |
1252 | QPRINTK(qdev, DRV, DEBUG, | 1245 | netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, |
1253 | "IDC ACK not required\n"); | 1246 | "IDC ACK not required\n"); |
1254 | status = 0; /* success */ | 1247 | status = 0; /* success */ |
1255 | } | 1248 | } |
1256 | break; | 1249 | break; |
@@ -1298,12 +1291,12 @@ void ql_mpi_reset_work(struct work_struct *work) | |||
1298 | * then there is nothing to do. | 1291 | * then there is nothing to do. |
1299 | */ | 1292 | */ |
1300 | if (!ql_own_firmware(qdev)) { | 1293 | if (!ql_own_firmware(qdev)) { |
1301 | QPRINTK(qdev, DRV, ERR, "Don't own firmware!\n"); | 1294 | netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); |
1302 | return; | 1295 | return; |
1303 | } | 1296 | } |
1304 | 1297 | ||
1305 | if (!ql_core_dump(qdev, qdev->mpi_coredump)) { | 1298 | if (!ql_core_dump(qdev, qdev->mpi_coredump)) { |
1306 | QPRINTK(qdev, DRV, ERR, "Core is dumped!\n"); | 1299 | netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); |
1307 | qdev->core_is_dumped = 1; | 1300 | qdev->core_is_dumped = 1; |
1308 | queue_delayed_work(qdev->workqueue, | 1301 | queue_delayed_work(qdev->workqueue, |
1309 | &qdev->mpi_core_to_log, 5 * HZ); | 1302 | &qdev->mpi_core_to_log, 5 * HZ); |