aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/bnx2x/bnx2x.h3
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c206
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h75
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c78
4 files changed, 228 insertions, 134 deletions
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 9b78a0487791..d80809f5ffc9 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -308,6 +308,7 @@ union host_hc_status_block {
308 308
309struct bnx2x_fastpath { 309struct bnx2x_fastpath {
310 310
311#define BNX2X_NAPI_WEIGHT 128
311 struct napi_struct napi; 312 struct napi_struct napi;
312 union host_hc_status_block status_blk; 313 union host_hc_status_block status_blk;
313 /* chip independed shortcuts into sb structure */ 314 /* chip independed shortcuts into sb structure */
@@ -920,8 +921,10 @@ struct bnx2x {
920#define USING_DAC_FLAG 0x10 921#define USING_DAC_FLAG 0x10
921#define USING_MSIX_FLAG 0x20 922#define USING_MSIX_FLAG 0x20
922#define USING_MSI_FLAG 0x40 923#define USING_MSI_FLAG 0x40
924
923#define TPA_ENABLE_FLAG 0x80 925#define TPA_ENABLE_FLAG 0x80
924#define NO_MCP_FLAG 0x100 926#define NO_MCP_FLAG 0x100
927#define DISABLE_MSI_FLAG 0x200
925#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) 928#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
926#define HW_VLAN_TX_FLAG 0x400 929#define HW_VLAN_TX_FLAG 0x400
927#define HW_VLAN_RX_FLAG 0x800 930#define HW_VLAN_RX_FLAG 0x800
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index da46309a855c..2998969beacc 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -29,7 +29,6 @@
29 29
30#include "bnx2x_init.h" 30#include "bnx2x_init.h"
31 31
32static int bnx2x_poll(struct napi_struct *napi, int budget);
33 32
34/* free skb in the packet ring at pos idx 33/* free skb in the packet ring at pos idx
35 * return idx of last bd freed 34 * return idx of last bd freed
@@ -989,55 +988,49 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
989 } 988 }
990} 989}
991 990
992void bnx2x_free_irq(struct bnx2x *bp, bool disable_only) 991void bnx2x_free_irq(struct bnx2x *bp)
993{ 992{
994 if (bp->flags & USING_MSIX_FLAG) { 993 if (bp->flags & USING_MSIX_FLAG)
995 if (!disable_only) 994 bnx2x_free_msix_irqs(bp);
996 bnx2x_free_msix_irqs(bp); 995 else if (bp->flags & USING_MSI_FLAG)
997 pci_disable_msix(bp->pdev); 996 free_irq(bp->pdev->irq, bp->dev);
998 bp->flags &= ~USING_MSIX_FLAG; 997 else
999
1000 } else if (bp->flags & USING_MSI_FLAG) {
1001 if (!disable_only)
1002 free_irq(bp->pdev->irq, bp->dev);
1003 pci_disable_msi(bp->pdev);
1004 bp->flags &= ~USING_MSI_FLAG;
1005
1006 } else if (!disable_only)
1007 free_irq(bp->pdev->irq, bp->dev); 998 free_irq(bp->pdev->irq, bp->dev);
1008} 999}
1009 1000
1010static int bnx2x_enable_msix(struct bnx2x *bp) 1001int bnx2x_enable_msix(struct bnx2x *bp)
1011{ 1002{
1012 int i, rc, offset = 1; 1003 int msix_vec = 0, i, rc, req_cnt;
1013 int igu_vec = 0;
1014 1004
1015 bp->msix_table[0].entry = igu_vec; 1005 bp->msix_table[msix_vec].entry = msix_vec;
1016 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec); 1006 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1007 bp->msix_table[0].entry);
1008 msix_vec++;
1017 1009
1018#ifdef BCM_CNIC 1010#ifdef BCM_CNIC
1019 igu_vec = BP_L_ID(bp) + offset; 1011 bp->msix_table[msix_vec].entry = msix_vec;
1020 bp->msix_table[1].entry = igu_vec; 1012 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1021 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec); 1013 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1022 offset++; 1014 msix_vec++;
1023#endif 1015#endif
1024 for_each_queue(bp, i) { 1016 for_each_queue(bp, i) {
1025 igu_vec = BP_L_ID(bp) + offset + i; 1017 bp->msix_table[msix_vec].entry = msix_vec;
1026 bp->msix_table[i + offset].entry = igu_vec;
1027 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " 1018 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1028 "(fastpath #%u)\n", i + offset, igu_vec, i); 1019 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1020 msix_vec++;
1029 } 1021 }
1030 1022
1031 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1023 req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1032 BNX2X_NUM_QUEUES(bp) + offset); 1024
1025 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1033 1026
1034 /* 1027 /*
1035 * reconfigure number of tx/rx queues according to available 1028 * reconfigure number of tx/rx queues according to available
1036 * MSI-X vectors 1029 * MSI-X vectors
1037 */ 1030 */
1038 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { 1031 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1039 /* vectors available for FP */ 1032 /* how less vectors we will have? */
1040 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START; 1033 int diff = req_cnt - rc;
1041 1034
1042 DP(NETIF_MSG_IFUP, 1035 DP(NETIF_MSG_IFUP,
1043 "Trying to use less MSI-X vectors: %d\n", rc); 1036 "Trying to use less MSI-X vectors: %d\n", rc);
@@ -1049,12 +1042,17 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
1049 "MSI-X is not attainable rc %d\n", rc); 1042 "MSI-X is not attainable rc %d\n", rc);
1050 return rc; 1043 return rc;
1051 } 1044 }
1052 1045 /*
1053 bp->num_queues = min(bp->num_queues, fp_vec); 1046 * decrease number of queues by number of unallocated entries
1047 */
1048 bp->num_queues -= diff;
1054 1049
1055 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n", 1050 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1056 bp->num_queues); 1051 bp->num_queues);
1057 } else if (rc) { 1052 } else if (rc) {
1053 /* fall to INTx if not enough memory */
1054 if (rc == -ENOMEM)
1055 bp->flags |= DISABLE_MSI_FLAG;
1058 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); 1056 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1059 return rc; 1057 return rc;
1060 } 1058 }
@@ -1083,7 +1081,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1083 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", 1081 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1084 bp->dev->name, i); 1082 bp->dev->name, i);
1085 1083
1086 rc = request_irq(bp->msix_table[i + offset].vector, 1084 rc = request_irq(bp->msix_table[offset].vector,
1087 bnx2x_msix_fp_int, 0, fp->name, fp); 1085 bnx2x_msix_fp_int, 0, fp->name, fp);
1088 if (rc) { 1086 if (rc) {
1089 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc); 1087 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
@@ -1091,10 +1089,12 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1091 return -EBUSY; 1089 return -EBUSY;
1092 } 1090 }
1093 1091
1092 offset++;
1094 fp->state = BNX2X_FP_STATE_IRQ; 1093 fp->state = BNX2X_FP_STATE_IRQ;
1095 } 1094 }
1096 1095
1097 i = BNX2X_NUM_QUEUES(bp); 1096 i = BNX2X_NUM_QUEUES(bp);
1097 offset = 1 + CNIC_CONTEXT_USE;
1098 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" 1098 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1099 " ... fp[%d] %d\n", 1099 " ... fp[%d] %d\n",
1100 bp->msix_table[0].vector, 1100 bp->msix_table[0].vector,
@@ -1104,7 +1104,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1104 return 0; 1104 return 0;
1105} 1105}
1106 1106
1107static int bnx2x_enable_msi(struct bnx2x *bp) 1107int bnx2x_enable_msi(struct bnx2x *bp)
1108{ 1108{
1109 int rc; 1109 int rc;
1110 1110
@@ -1175,44 +1175,20 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1175 bnx2x_napi_disable(bp); 1175 bnx2x_napi_disable(bp);
1176 netif_tx_disable(bp->dev); 1176 netif_tx_disable(bp->dev);
1177} 1177}
1178static int bnx2x_set_num_queues(struct bnx2x *bp)
1179{
1180 int rc = 0;
1181 1178
1182 switch (bp->int_mode) { 1179void bnx2x_set_num_queues(struct bnx2x *bp)
1183 case INT_MODE_MSI: 1180{
1184 bnx2x_enable_msi(bp); 1181 switch (bp->multi_mode) {
1185 /* falling through... */ 1182 case ETH_RSS_MODE_DISABLED:
1186 case INT_MODE_INTx:
1187 bp->num_queues = 1; 1183 bp->num_queues = 1;
1188 DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); 1184 break;
1185 case ETH_RSS_MODE_REGULAR:
1186 bp->num_queues = bnx2x_calc_num_queues(bp);
1189 break; 1187 break;
1190 default: 1188 default:
1191 /* Set number of queues according to bp->multi_mode value */ 1189 bp->num_queues = 1;
1192 bnx2x_set_num_queues_msix(bp);
1193
1194 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
1195 bp->num_queues);
1196
1197 /* if we can't use MSI-X we only need one fp,
1198 * so try to enable MSI-X with the requested number of fp's
1199 * and fallback to MSI or legacy INTx with one fp
1200 */
1201 rc = bnx2x_enable_msix(bp);
1202 if (rc) {
1203 /* failed to enable MSI-X */
1204 bp->num_queues = 1;
1205
1206 /* Fall to INTx if failed to enable MSI-X due to lack of
1207 * memory (in bnx2x_set_num_queues()) */
1208 if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
1209 bnx2x_enable_msi(bp);
1210 }
1211
1212 break; 1190 break;
1213 } 1191 }
1214 netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1215 return netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1216} 1192}
1217 1193
1218static void bnx2x_release_firmware(struct bnx2x *bp) 1194static void bnx2x_release_firmware(struct bnx2x *bp)
@@ -1243,49 +1219,25 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1243 1219
1244 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 1220 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1245 1221
1246 rc = bnx2x_set_num_queues(bp);
1247 if (rc)
1248 return rc;
1249
1250 /* must be called before memory allocation and HW init */ 1222 /* must be called before memory allocation and HW init */
1251 bnx2x_ilt_set_info(bp); 1223 bnx2x_ilt_set_info(bp);
1252 1224
1253 if (bnx2x_alloc_mem(bp)) { 1225 if (bnx2x_alloc_mem(bp))
1254 bnx2x_free_irq(bp, true);
1255 return -ENOMEM; 1226 return -ENOMEM;
1227
1228 netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1229 rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1230 if (rc) {
1231 BNX2X_ERR("Unable to update real_num_rx_queues\n");
1232 goto load_error0;
1256 } 1233 }
1257 1234
1258 for_each_queue(bp, i) 1235 for_each_queue(bp, i)
1259 bnx2x_fp(bp, i, disable_tpa) = 1236 bnx2x_fp(bp, i, disable_tpa) =
1260 ((bp->flags & TPA_ENABLE_FLAG) == 0); 1237 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1261 1238
1262 for_each_queue(bp, i)
1263 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
1264 bnx2x_poll, 128);
1265
1266 bnx2x_napi_enable(bp); 1239 bnx2x_napi_enable(bp);
1267 1240
1268 if (bp->flags & USING_MSIX_FLAG) {
1269 rc = bnx2x_req_msix_irqs(bp);
1270 if (rc) {
1271 bnx2x_free_irq(bp, true);
1272 goto load_error1;
1273 }
1274 } else {
1275 bnx2x_ack_int(bp);
1276 rc = bnx2x_req_irq(bp);
1277 if (rc) {
1278 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1279 bnx2x_free_irq(bp, true);
1280 goto load_error1;
1281 }
1282 if (bp->flags & USING_MSI_FLAG) {
1283 bp->dev->irq = bp->pdev->irq;
1284 netdev_info(bp->dev, "using MSI IRQ %d\n",
1285 bp->pdev->irq);
1286 }
1287 }
1288
1289 /* Send LOAD_REQUEST command to MCP 1241 /* Send LOAD_REQUEST command to MCP
1290 Returns the type of LOAD command: 1242 Returns the type of LOAD command:
1291 if it is the first port to be initialized 1243 if it is the first port to be initialized
@@ -1296,11 +1248,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1296 if (!load_code) { 1248 if (!load_code) {
1297 BNX2X_ERR("MCP response failure, aborting\n"); 1249 BNX2X_ERR("MCP response failure, aborting\n");
1298 rc = -EBUSY; 1250 rc = -EBUSY;
1299 goto load_error2; 1251 goto load_error1;
1300 } 1252 }
1301 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { 1253 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1302 rc = -EBUSY; /* other port in diagnostic mode */ 1254 rc = -EBUSY; /* other port in diagnostic mode */
1303 goto load_error2; 1255 goto load_error1;
1304 } 1256 }
1305 1257
1306 } else { 1258 } else {
@@ -1341,6 +1293,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1341 goto load_error2; 1293 goto load_error2;
1342 } 1294 }
1343 1295
1296 /* Connect to IRQs */
1297 rc = bnx2x_setup_irqs(bp);
1344 if (rc) { 1298 if (rc) {
1345 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 1299 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1346 goto load_error2; 1300 goto load_error2;
@@ -1481,22 +1435,24 @@ load_error4:
1481#endif 1435#endif
1482load_error3: 1436load_error3:
1483 bnx2x_int_disable_sync(bp, 1); 1437 bnx2x_int_disable_sync(bp, 1);
1484 if (!BP_NOMCP(bp)) { 1438
1485 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1486 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1487 }
1488 bp->port.pmf = 0;
1489 /* Free SKBs, SGEs, TPA pool and driver internals */ 1439 /* Free SKBs, SGEs, TPA pool and driver internals */
1490 bnx2x_free_skbs(bp); 1440 bnx2x_free_skbs(bp);
1491 for_each_queue(bp, i) 1441 for_each_queue(bp, i)
1492 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 1442 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1493load_error2: 1443
1494 /* Release IRQs */ 1444 /* Release IRQs */
1495 bnx2x_free_irq(bp, false); 1445 bnx2x_free_irq(bp);
1446load_error2:
1447 if (!BP_NOMCP(bp)) {
1448 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1449 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1450 }
1451
1452 bp->port.pmf = 0;
1496load_error1: 1453load_error1:
1497 bnx2x_napi_disable(bp); 1454 bnx2x_napi_disable(bp);
1498 for_each_queue(bp, i) 1455load_error0:
1499 netif_napi_del(&bnx2x_fp(bp, i, napi));
1500 bnx2x_free_mem(bp); 1456 bnx2x_free_mem(bp);
1501 1457
1502 bnx2x_release_firmware(bp); 1458 bnx2x_release_firmware(bp);
@@ -1544,7 +1500,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1544 bnx2x_netif_stop(bp, 1); 1500 bnx2x_netif_stop(bp, 1);
1545 1501
1546 /* Release IRQs */ 1502 /* Release IRQs */
1547 bnx2x_free_irq(bp, false); 1503 bnx2x_free_irq(bp);
1548 } 1504 }
1549 1505
1550 bp->port.pmf = 0; 1506 bp->port.pmf = 0;
@@ -1553,8 +1509,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1553 bnx2x_free_skbs(bp); 1509 bnx2x_free_skbs(bp);
1554 for_each_queue(bp, i) 1510 for_each_queue(bp, i)
1555 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 1511 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1556 for_each_queue(bp, i) 1512
1557 netif_napi_del(&bnx2x_fp(bp, i, napi));
1558 bnx2x_free_mem(bp); 1513 bnx2x_free_mem(bp);
1559 1514
1560 bp->state = BNX2X_STATE_CLOSED; 1515 bp->state = BNX2X_STATE_CLOSED;
@@ -1624,7 +1579,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1624 * net_device service functions 1579 * net_device service functions
1625 */ 1580 */
1626 1581
1627static int bnx2x_poll(struct napi_struct *napi, int budget) 1582int bnx2x_poll(struct napi_struct *napi, int budget)
1628{ 1583{
1629 int work_done = 0; 1584 int work_done = 0;
1630 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, 1585 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
@@ -2261,6 +2216,31 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2261 return 0; 2216 return 0;
2262} 2217}
2263 2218
2219
2220int bnx2x_setup_irqs(struct bnx2x *bp)
2221{
2222 int rc = 0;
2223 if (bp->flags & USING_MSIX_FLAG) {
2224 rc = bnx2x_req_msix_irqs(bp);
2225 if (rc)
2226 return rc;
2227 } else {
2228 bnx2x_ack_int(bp);
2229 rc = bnx2x_req_irq(bp);
2230 if (rc) {
2231 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2232 return rc;
2233 }
2234 if (bp->flags & USING_MSI_FLAG) {
2235 bp->dev->irq = bp->pdev->irq;
2236 netdev_info(bp->dev, "using MSI IRQ %d\n",
2237 bp->pdev->irq);
2238 }
2239 }
2240
2241 return 0;
2242}
2243
2264void bnx2x_free_mem_bp(struct bnx2x *bp) 2244void bnx2x_free_mem_bp(struct bnx2x *bp)
2265{ 2245{
2266 kfree(bp->fp); 2246 kfree(bp->fp);
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index f08a42ad6b47..1d9686ea6b66 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -23,6 +23,7 @@
23 23
24#include "bnx2x.h" 24#include "bnx2x.h"
25 25
26extern int num_queues;
26 27
27/*********************** Interfaces **************************** 28/*********************** Interfaces ****************************
28 * Functions that need to be implemented by each driver version 29 * Functions that need to be implemented by each driver version
@@ -193,12 +194,12 @@ int bnx2x_stop_fw_client(struct bnx2x *bp,
193 struct bnx2x_client_ramrod_params *p); 194 struct bnx2x_client_ramrod_params *p);
194 195
195/** 196/**
196 * Set number of quueus according to mode 197 * Set number of queues according to mode
197 * 198 *
198 * @param bp 199 * @param bp
199 * 200 *
200 */ 201 */
201void bnx2x_set_num_queues_msix(struct bnx2x *bp); 202void bnx2x_set_num_queues(struct bnx2x *bp);
202 203
203/** 204/**
204 * Cleanup chip internals: 205 * Cleanup chip internals:
@@ -325,6 +326,42 @@ int bnx2x_func_stop(struct bnx2x *bp);
325 */ 326 */
326void bnx2x_ilt_set_info(struct bnx2x *bp); 327void bnx2x_ilt_set_info(struct bnx2x *bp);
327 328
329/**
330 * Fill msix_table, request vectors, update num_queues according
331 * to number of available vectors
332 *
333 * @param bp
334 *
335 * @return int
336 */
337int bnx2x_enable_msix(struct bnx2x *bp);
338
339/**
340 * Request msi mode from OS, updated internals accordingly
341 *
342 * @param bp
343 *
344 * @return int
345 */
346int bnx2x_enable_msi(struct bnx2x *bp);
347
348/**
349 * Request IRQ vectors from OS.
350 *
351 * @param bp
352 *
353 * @return int
354 */
355int bnx2x_setup_irqs(struct bnx2x *bp);
356/**
357 * NAPI callback
358 *
359 * @param napi
360 * @param budget
361 *
362 * @return int
363 */
364int bnx2x_poll(struct napi_struct *napi, int budget);
328static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 365static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
329{ 366{
330 barrier(); /* status block is written to by the chip */ 367 barrier(); /* status block is written to by the chip */
@@ -605,9 +642,41 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
605 sge->addr_lo = 0; 642 sge->addr_lo = 0;
606} 643}
607 644
645static inline void bnx2x_add_all_napi(struct bnx2x *bp)
646{
647 int i;
608 648
649 /* Add NAPI objects */
650 for_each_queue(bp, i)
651 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
652 bnx2x_poll, BNX2X_NAPI_WEIGHT);
653}
609 654
655static inline void bnx2x_del_all_napi(struct bnx2x *bp)
656{
657 int i;
658
659 for_each_queue(bp, i)
660 netif_napi_del(&bnx2x_fp(bp, i, napi));
661}
610 662
663static inline void bnx2x_disable_msi(struct bnx2x *bp)
664{
665 if (bp->flags & USING_MSIX_FLAG) {
666 pci_disable_msix(bp->pdev);
667 bp->flags &= ~USING_MSIX_FLAG;
668 } else if (bp->flags & USING_MSI_FLAG) {
669 pci_disable_msi(bp->pdev);
670 bp->flags &= ~USING_MSI_FLAG;
671 }
672}
673
674static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
675{
676 return num_queues ?
677 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
678 min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp));
679}
611 680
612static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) 681static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
613{ 682{
@@ -877,7 +946,7 @@ void bnx2x_tx_timeout(struct net_device *dev);
877void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp); 946void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
878void bnx2x_netif_start(struct bnx2x *bp); 947void bnx2x_netif_start(struct bnx2x *bp);
879void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); 948void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
880void bnx2x_free_irq(struct bnx2x *bp, bool disable_only); 949void bnx2x_free_irq(struct bnx2x *bp);
881int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); 950int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
882int bnx2x_resume(struct pci_dev *pdev); 951int bnx2x_resume(struct pci_dev *pdev);
883void bnx2x_free_skbs(struct bnx2x *bp); 952void bnx2x_free_skbs(struct bnx2x *bp);
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 0ac416a14202..2572eb40c0ed 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -90,7 +90,7 @@ module_param(multi_mode, int, 0);
90MODULE_PARM_DESC(multi_mode, " Multi queue mode " 90MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91 "(0 Disable; 1 Enable (default))"); 91 "(0 Disable; 1 Enable (default))");
92 92
93static int num_queues; 93int num_queues;
94module_param(num_queues, int, 0); 94module_param(num_queues, int, 0);
95MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1" 95MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96 " (default is as a number of CPUs)"); 96 " (default is as a number of CPUs)");
@@ -6409,28 +6409,57 @@ int bnx2x_setup_fw_client(struct bnx2x *bp,
6409 return rc; 6409 return rc;
6410} 6410}
6411 6411
6412void bnx2x_set_num_queues_msix(struct bnx2x *bp) 6412/**
6413 * Configure interrupt mode according to current configuration.
6414 * In case of MSI-X it will also try to enable MSI-X.
6415 *
6416 * @param bp
6417 *
6418 * @return int
6419 */
6420static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6413{ 6421{
6422 int rc = 0;
6414 6423
6415 switch (bp->multi_mode) { 6424 switch (bp->int_mode) {
6416 case ETH_RSS_MODE_DISABLED: 6425 case INT_MODE_MSI:
6426 bnx2x_enable_msi(bp);
6427 /* falling through... */
6428 case INT_MODE_INTx:
6417 bp->num_queues = 1; 6429 bp->num_queues = 1;
6430 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6418 break; 6431 break;
6432 default:
6433 /* Set number of queues according to bp->multi_mode value */
6434 bnx2x_set_num_queues(bp);
6419 6435
6420 case ETH_RSS_MODE_REGULAR: 6436 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6421 if (num_queues) 6437 bp->num_queues);
6422 bp->num_queues = min_t(u32, num_queues,
6423 BNX2X_MAX_QUEUES(bp));
6424 else
6425 bp->num_queues = min_t(u32, num_online_cpus(),
6426 BNX2X_MAX_QUEUES(bp));
6427 break;
6428 6438
6439 /* if we can't use MSI-X we only need one fp,
6440 * so try to enable MSI-X with the requested number of fp's
6441 * and fallback to MSI or legacy INTx with one fp
6442 */
6443 rc = bnx2x_enable_msix(bp);
6444 if (rc) {
6445 /* failed to enable MSI-X */
6446 if (bp->multi_mode)
6447 DP(NETIF_MSG_IFUP,
6448 "Multi requested but failed to "
6449 "enable MSI-X (%d), "
6450 "set number of queues to %d\n",
6451 bp->num_queues,
6452 1);
6453 bp->num_queues = 1;
6454
6455 if (!(bp->flags & DISABLE_MSI_FLAG))
6456 bnx2x_enable_msi(bp);
6457 }
6429 6458
6430 default:
6431 bp->num_queues = 1;
6432 break; 6459 break;
6433 } 6460 }
6461
6462 return rc;
6434} 6463}
6435 6464
6436void bnx2x_ilt_set_info(struct bnx2x *bp) 6465void bnx2x_ilt_set_info(struct bnx2x *bp)
@@ -6881,7 +6910,7 @@ unload_error:
6881 bnx2x_netif_stop(bp, 1); 6910 bnx2x_netif_stop(bp, 1);
6882 6911
6883 /* Release IRQs */ 6912 /* Release IRQs */
6884 bnx2x_free_irq(bp, false); 6913 bnx2x_free_irq(bp);
6885 6914
6886 /* Reset the chip */ 6915 /* Reset the chip */
6887 bnx2x_reset_chip(bp, reset_code); 6916 bnx2x_reset_chip(bp, reset_code);
@@ -9024,7 +9053,16 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9024 goto init_one_exit; 9053 goto init_one_exit;
9025 } 9054 }
9026 9055
9056 /* Configure interupt mode: try to enable MSI-X/MSI if
9057 * needed, set bp->num_queues appropriately.
9058 */
9059 bnx2x_set_int_mode(bp);
9060
9061 /* Add all NAPI objects */
9062 bnx2x_add_all_napi(bp);
9063
9027 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 9064 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9065
9028 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx," 9066 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9029 " IRQ %d, ", board_info[ent->driver_data].name, 9067 " IRQ %d, ", board_info[ent->driver_data].name,
9030 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 9068 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
@@ -9068,6 +9106,11 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9068 9106
9069 unregister_netdev(dev); 9107 unregister_netdev(dev);
9070 9108
9109 /* Delete all NAPI objects */
9110 bnx2x_del_all_napi(bp);
9111
9112 /* Disable MSI/MSI-X */
9113 bnx2x_disable_msi(bp);
9071 /* Make sure RESET task is not scheduled before continuing */ 9114 /* Make sure RESET task is not scheduled before continuing */
9072 cancel_delayed_work_sync(&bp->reset_task); 9115 cancel_delayed_work_sync(&bp->reset_task);
9073 9116
@@ -9104,15 +9147,14 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9104 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); 9147 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9105 9148
9106 /* Release IRQs */ 9149 /* Release IRQs */
9107 bnx2x_free_irq(bp, false); 9150 bnx2x_free_irq(bp);
9108 9151
9109 /* Free SKBs, SGEs, TPA pool and driver internals */ 9152 /* Free SKBs, SGEs, TPA pool and driver internals */
9110 bnx2x_free_skbs(bp); 9153 bnx2x_free_skbs(bp);
9111 9154
9112 for_each_queue(bp, i) 9155 for_each_queue(bp, i)
9113 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 9156 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
9114 for_each_queue(bp, i) 9157
9115 netif_napi_del(&bnx2x_fp(bp, i, napi));
9116 bnx2x_free_mem(bp); 9158 bnx2x_free_mem(bp);
9117 9159
9118 bp->state = BNX2X_STATE_CLOSED; 9160 bp->state = BNX2X_STATE_CLOSED;