aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2x.c')
-rw-r--r--drivers/net/bnx2x.c2663
1 files changed, 1809 insertions, 854 deletions
diff --git a/drivers/net/bnx2x.c b/drivers/net/bnx2x.c
index afc7f34b1dcf..8af142ccf373 100644
--- a/drivers/net/bnx2x.c
+++ b/drivers/net/bnx2x.c
@@ -1,6 +1,6 @@
1/* bnx2x.c: Broadcom Everest network driver. 1/* bnx2x.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007 Broadcom Corporation 3 * Copyright (c) 2007-2008 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -10,13 +10,13 @@
10 * Based on code from Michael Chan's bnx2 driver 10 * Based on code from Michael Chan's bnx2 driver
11 * UDP CSUM errata workaround by Arik Gendelman 11 * UDP CSUM errata workaround by Arik Gendelman
12 * Slowpath rework by Vladislav Zolotarov 12 * Slowpath rework by Vladislav Zolotarov
13 * Statistics and Link managment by Yitchak Gertner 13 * Statistics and Link management by Yitchak Gertner
14 * 14 *
15 */ 15 */
16 16
17/* define this to make the driver freeze on error 17/* define this to make the driver freeze on error
18 * to allow getting debug info 18 * to allow getting debug info
19 * (you will need to reboot afterwords) 19 * (you will need to reboot afterwards)
20 */ 20 */
21/*#define BNX2X_STOP_ON_ERROR*/ 21/*#define BNX2X_STOP_ON_ERROR*/
22 22
@@ -63,22 +63,21 @@
63#include "bnx2x.h" 63#include "bnx2x.h"
64#include "bnx2x_init.h" 64#include "bnx2x_init.h"
65 65
66#define DRV_MODULE_VERSION "0.40.15" 66#define DRV_MODULE_VERSION "1.40.22"
67#define DRV_MODULE_RELDATE "$DateTime: 2007/11/15 07:28:37 $" 67#define DRV_MODULE_RELDATE "2007/11/27"
68#define BNX2X_BC_VER 0x040009 68#define BNX2X_BC_VER 0x040200
69 69
70/* Time in jiffies before concluding the transmitter is hung. */ 70/* Time in jiffies before concluding the transmitter is hung. */
71#define TX_TIMEOUT (5*HZ) 71#define TX_TIMEOUT (5*HZ)
72 72
73static char version[] __devinitdata = 73static char version[] __devinitdata =
74 "Broadcom NetXtreme II 577xx 10Gigabit Ethernet Driver " 74 "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
75 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 75 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 76
77MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>"); 77MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
78MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver"); 78MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
79MODULE_LICENSE("GPL"); 79MODULE_LICENSE("GPL");
80MODULE_VERSION(DRV_MODULE_VERSION); 80MODULE_VERSION(DRV_MODULE_VERSION);
81MODULE_INFO(cvs_version, "$Revision: #356 $");
82 81
83static int use_inta; 82static int use_inta;
84static int poll; 83static int poll;
@@ -94,8 +93,8 @@ module_param(debug, int, 0);
94MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); 93MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
95MODULE_PARM_DESC(poll, "use polling (for debug)"); 94MODULE_PARM_DESC(poll, "use polling (for debug)");
96MODULE_PARM_DESC(onefunc, "enable only first function"); 95MODULE_PARM_DESC(onefunc, "enable only first function");
97MODULE_PARM_DESC(nomcp, "ignore managment CPU (Implies onefunc)"); 96MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
98MODULE_PARM_DESC(debug, "defualt debug msglevel"); 97MODULE_PARM_DESC(debug, "default debug msglevel");
99 98
100#ifdef BNX2X_MULTI 99#ifdef BNX2X_MULTI
101module_param(use_multi, int, 0); 100module_param(use_multi, int, 0);
@@ -298,8 +297,7 @@ static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
298 297
299static int bnx2x_mc_assert(struct bnx2x *bp) 298static int bnx2x_mc_assert(struct bnx2x *bp)
300{ 299{
301 int i, j; 300 int i, j, rc = 0;
302 int rc = 0;
303 char last_idx; 301 char last_idx;
304 const char storm[] = {"XTCU"}; 302 const char storm[] = {"XTCU"};
305 const u32 intmem_base[] = { 303 const u32 intmem_base[] = {
@@ -313,8 +311,9 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
313 for (i = 0; i < 4; i++) { 311 for (i = 0; i < 4; i++) {
314 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET + 312 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
315 intmem_base[i]); 313 intmem_base[i]);
316 BNX2X_ERR("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n", 314 if (last_idx)
317 storm[i], last_idx); 315 BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
316 storm[i], last_idx);
318 317
319 /* print the asserts */ 318 /* print the asserts */
320 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) { 319 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
@@ -330,7 +329,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
330 intmem_base[i]); 329 intmem_base[i]);
331 330
332 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 331 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
333 BNX2X_ERR("DATA %cSTORM_ASSERT_INDEX 0x%x =" 332 BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x ="
334 " 0x%08x 0x%08x 0x%08x 0x%08x\n", 333 " 0x%08x 0x%08x 0x%08x 0x%08x\n",
335 storm[i], j, row3, row2, row1, row0); 334 storm[i], j, row3, row2, row1, row0);
336 rc++; 335 rc++;
@@ -341,6 +340,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
341 } 340 }
342 return rc; 341 return rc;
343} 342}
343
344static void bnx2x_fw_dump(struct bnx2x *bp) 344static void bnx2x_fw_dump(struct bnx2x *bp)
345{ 345{
346 u32 mark, offset; 346 u32 mark, offset;
@@ -348,21 +348,22 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
348 int word; 348 int word;
349 349
350 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104); 350 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
351 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark); 351 mark = ((mark + 0x3) & ~0x3);
352 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
352 353
353 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) { 354 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
354 for (word = 0; word < 8; word++) 355 for (word = 0; word < 8; word++)
355 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + 356 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
356 offset + 4*word)); 357 offset + 4*word));
357 data[8] = 0x0; 358 data[8] = 0x0;
358 printk(KERN_ERR PFX "%s", (char *)data); 359 printk(KERN_CONT "%s", (char *)data);
359 } 360 }
360 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) { 361 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
361 for (word = 0; word < 8; word++) 362 for (word = 0; word < 8; word++)
362 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + 363 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
363 offset + 4*word)); 364 offset + 4*word));
364 data[8] = 0x0; 365 data[8] = 0x0;
365 printk(KERN_ERR PFX "%s", (char *)data); 366 printk(KERN_CONT "%s", (char *)data);
366 } 367 }
367 printk("\n" KERN_ERR PFX "end of fw dump\n"); 368 printk("\n" KERN_ERR PFX "end of fw dump\n");
368} 369}
@@ -427,10 +428,10 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
427 } 428 }
428 } 429 }
429 430
430 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_t_idx(%u)" 431 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
431 " def_x_idx(%u) def_att_idx(%u) attn_state(%u)" 432 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
432 " spq_prod_idx(%u)\n", 433 " spq_prod_idx(%u)\n",
433 bp->def_c_idx, bp->def_u_idx, bp->def_t_idx, bp->def_x_idx, 434 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
434 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); 435 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
435 436
436 437
@@ -441,7 +442,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
441 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n"); 442 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
442} 443}
443 444
444static void bnx2x_enable_int(struct bnx2x *bp) 445static void bnx2x_int_enable(struct bnx2x *bp)
445{ 446{
446 int port = bp->port; 447 int port = bp->port;
447 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 448 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -454,18 +455,26 @@ static void bnx2x_enable_int(struct bnx2x *bp)
454 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 455 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
455 } else { 456 } else {
456 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 457 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
458 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
457 HC_CONFIG_0_REG_INT_LINE_EN_0 | 459 HC_CONFIG_0_REG_INT_LINE_EN_0 |
458 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 460 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
461
462 /* Errata A0.158 workaround */
463 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
464 val, port, addr, msix);
465
466 REG_WR(bp, addr, val);
467
459 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 468 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
460 } 469 }
461 470
462 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) msi %d\n", 471 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
463 val, port, addr, msix); 472 val, port, addr, msix);
464 473
465 REG_WR(bp, addr, val); 474 REG_WR(bp, addr, val);
466} 475}
467 476
468static void bnx2x_disable_int(struct bnx2x *bp) 477static void bnx2x_int_disable(struct bnx2x *bp)
469{ 478{
470 int port = bp->port; 479 int port = bp->port;
471 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 480 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -484,15 +493,15 @@ static void bnx2x_disable_int(struct bnx2x *bp)
484 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 493 BNX2X_ERR("BUG! proper val not read from IGU!\n");
485} 494}
486 495
487static void bnx2x_disable_int_sync(struct bnx2x *bp) 496static void bnx2x_int_disable_sync(struct bnx2x *bp)
488{ 497{
489 498
490 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 499 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
491 int i; 500 int i;
492 501
493 atomic_inc(&bp->intr_sem); 502 atomic_inc(&bp->intr_sem);
494 /* prevent the HW from sending interrupts*/ 503 /* prevent the HW from sending interrupts */
495 bnx2x_disable_int(bp); 504 bnx2x_int_disable(bp);
496 505
497 /* make sure all ISRs are done */ 506 /* make sure all ISRs are done */
498 if (msix) { 507 if (msix) {
@@ -775,6 +784,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
775 mb(); /* force bnx2x_wait_ramrod to see the change */ 784 mb(); /* force bnx2x_wait_ramrod to see the change */
776 return; 785 return;
777 } 786 }
787
778 switch (command | bp->state) { 788 switch (command | bp->state) {
779 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT): 789 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
780 DP(NETIF_MSG_IFUP, "got setup ramrod\n"); 790 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
@@ -787,20 +797,20 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
787 fp->state = BNX2X_FP_STATE_HALTED; 797 fp->state = BNX2X_FP_STATE_HALTED;
788 break; 798 break;
789 799
790 case (RAMROD_CMD_ID_ETH_PORT_DEL | BNX2X_STATE_CLOSING_WAIT4_DELETE):
791 DP(NETIF_MSG_IFDOWN, "got delete ramrod\n");
792 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
793 break;
794
795 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT): 800 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
796 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid); 801 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n",
797 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_DELETED; 802 cid);
803 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
798 break; 804 break;
799 805
800 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): 806 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
801 DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); 807 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
802 break; 808 break;
803 809
810 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
811 DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n");
812 break;
813
804 default: 814 default:
805 BNX2X_ERR("unexpected ramrod (%d) state is %x\n", 815 BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
806 command, bp->state); 816 command, bp->state);
@@ -1179,12 +1189,175 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
1179 return val; 1189 return val;
1180} 1190}
1181 1191
1192static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1193{
1194 u32 cnt;
1195 u32 lock_status;
1196 u32 resource_bit = (1 << resource);
1197 u8 func = bp->port;
1198
1199 /* Validating that the resource is within range */
1200 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1201 DP(NETIF_MSG_HW,
1202 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1203 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1204 return -EINVAL;
1205 }
1206
1207 /* Validating that the resource is not already taken */
1208 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1209 if (lock_status & resource_bit) {
1210 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1211 lock_status, resource_bit);
1212 return -EEXIST;
1213 }
1214
1215 /* Try for 1 second every 5ms */
1216 for (cnt = 0; cnt < 200; cnt++) {
1217 /* Try to acquire the lock */
1218 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8 + 4,
1219 resource_bit);
1220 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1221 if (lock_status & resource_bit)
1222 return 0;
1223
1224 msleep(5);
1225 }
1226 DP(NETIF_MSG_HW, "Timeout\n");
1227 return -EAGAIN;
1228}
1229
1230static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1231{
1232 u32 lock_status;
1233 u32 resource_bit = (1 << resource);
1234 u8 func = bp->port;
1235
1236 /* Validating that the resource is within range */
1237 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1238 DP(NETIF_MSG_HW,
1239 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1240 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1241 return -EINVAL;
1242 }
1243
1244 /* Validating that the resource is currently taken */
1245 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1246 if (!(lock_status & resource_bit)) {
1247 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1248 lock_status, resource_bit);
1249 return -EFAULT;
1250 }
1251
1252 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8, resource_bit);
1253 return 0;
1254}
1255
1256static int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1257{
1258 /* The GPIO should be swapped if swap register is set and active */
1259 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1260 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
1261 int gpio_shift = gpio_num +
1262 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1263 u32 gpio_mask = (1 << gpio_shift);
1264 u32 gpio_reg;
1265
1266 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1267 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1268 return -EINVAL;
1269 }
1270
1271 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1272 /* read GPIO and mask except the float bits */
1273 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1274
1275 switch (mode) {
1276 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1277 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1278 gpio_num, gpio_shift);
1279 /* clear FLOAT and set CLR */
1280 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1281 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1282 break;
1283
1284 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1285 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1286 gpio_num, gpio_shift);
1287 /* clear FLOAT and set SET */
1288 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1289 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1290 break;
1291
1292 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1293 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1294 gpio_num, gpio_shift);
1295 /* set FLOAT */
1296 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1297 break;
1298
1299 default:
1300 break;
1301 }
1302
1303 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1304 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1305
1306 return 0;
1307}
1308
1309static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1310{
1311 u32 spio_mask = (1 << spio_num);
1312 u32 spio_reg;
1313
1314 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1315 (spio_num > MISC_REGISTERS_SPIO_7)) {
1316 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1317 return -EINVAL;
1318 }
1319
1320 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1321 /* read SPIO and mask except the float bits */
1322 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1323
1324 switch (mode) {
1325 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1326 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1327 /* clear FLOAT and set CLR */
1328 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1329 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1330 break;
1331
1332 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1333 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1334 /* clear FLOAT and set SET */
1335 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1336 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1337 break;
1338
1339 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1340 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1341 /* set FLOAT */
1342 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1343 break;
1344
1345 default:
1346 break;
1347 }
1348
1349 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1350 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1351
1352 return 0;
1353}
1354
1182static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val) 1355static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
1183{ 1356{
1184 int rc;
1185 u32 tmp, i;
1186 int port = bp->port; 1357 int port = bp->port;
1187 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 1358 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1359 u32 tmp;
1360 int i, rc;
1188 1361
1189/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n", 1362/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n",
1190 bp->phy_addr, reg, val); */ 1363 bp->phy_addr, reg, val); */
@@ -1236,8 +1409,8 @@ static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1236{ 1409{
1237 int port = bp->port; 1410 int port = bp->port;
1238 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 1411 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1239 u32 val, i; 1412 u32 val;
1240 int rc; 1413 int i, rc;
1241 1414
1242 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { 1415 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1243 1416
@@ -1286,58 +1459,54 @@ static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1286 return rc; 1459 return rc;
1287} 1460}
1288 1461
1289static int bnx2x_mdio45_write(struct bnx2x *bp, u32 reg, u32 addr, u32 val) 1462static int bnx2x_mdio45_ctrl_write(struct bnx2x *bp, u32 mdio_ctrl,
1463 u32 phy_addr, u32 reg, u32 addr, u32 val)
1290{ 1464{
1291 int rc = 0; 1465 u32 tmp;
1292 u32 tmp, i; 1466 int i, rc = 0;
1293 int port = bp->port;
1294 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1295
1296 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1297
1298 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1299 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1300 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1301 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1302 udelay(40);
1303 }
1304 1467
1305 /* set clause 45 mode */ 1468 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1306 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); 1469 * (a value of 49==0x31) and make sure that the AUTO poll is off
1307 tmp |= EMAC_MDIO_MODE_CLAUSE_45; 1470 */
1308 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp); 1471 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1472 tmp &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1473 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
1474 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1475 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1476 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1477 udelay(40);
1309 1478
1310 /* address */ 1479 /* address */
1311 tmp = ((bp->phy_addr << 21) | (reg << 16) | addr | 1480 tmp = ((phy_addr << 21) | (reg << 16) | addr |
1312 EMAC_MDIO_COMM_COMMAND_ADDRESS | 1481 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1313 EMAC_MDIO_COMM_START_BUSY); 1482 EMAC_MDIO_COMM_START_BUSY);
1314 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp); 1483 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
1315 1484
1316 for (i = 0; i < 50; i++) { 1485 for (i = 0; i < 50; i++) {
1317 udelay(10); 1486 udelay(10);
1318 1487
1319 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM); 1488 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1320 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 1489 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1321 udelay(5); 1490 udelay(5);
1322 break; 1491 break;
1323 } 1492 }
1324 } 1493 }
1325
1326 if (tmp & EMAC_MDIO_COMM_START_BUSY) { 1494 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1327 BNX2X_ERR("write phy register failed\n"); 1495 BNX2X_ERR("write phy register failed\n");
1328 1496
1329 rc = -EBUSY; 1497 rc = -EBUSY;
1498
1330 } else { 1499 } else {
1331 /* data */ 1500 /* data */
1332 tmp = ((bp->phy_addr << 21) | (reg << 16) | val | 1501 tmp = ((phy_addr << 21) | (reg << 16) | val |
1333 EMAC_MDIO_COMM_COMMAND_WRITE_45 | 1502 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
1334 EMAC_MDIO_COMM_START_BUSY); 1503 EMAC_MDIO_COMM_START_BUSY);
1335 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp); 1504 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
1336 1505
1337 for (i = 0; i < 50; i++) { 1506 for (i = 0; i < 50; i++) {
1338 udelay(10); 1507 udelay(10);
1339 1508
1340 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM); 1509 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1341 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 1510 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1342 udelay(5); 1511 udelay(5);
1343 break; 1512 break;
@@ -1351,75 +1520,78 @@ static int bnx2x_mdio45_write(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
1351 } 1520 }
1352 } 1521 }
1353 1522
1354 /* unset clause 45 mode */ 1523 /* unset clause 45 mode, set the MDIO clock to a faster value
1355 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); 1524 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1356 tmp &= ~EMAC_MDIO_MODE_CLAUSE_45; 1525 */
1357 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp); 1526 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1358 1527 tmp &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1359 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { 1528 tmp |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1360 1529 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
1361 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1362 tmp |= EMAC_MDIO_MODE_AUTO_POLL; 1530 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1363 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp); 1531 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1364 }
1365 1532
1366 return rc; 1533 return rc;
1367} 1534}
1368 1535
1369static int bnx2x_mdio45_read(struct bnx2x *bp, u32 reg, u32 addr, 1536static int bnx2x_mdio45_write(struct bnx2x *bp, u32 phy_addr, u32 reg,
1370 u32 *ret_val) 1537 u32 addr, u32 val)
1371{ 1538{
1372 int port = bp->port; 1539 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1373 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1374 u32 val, i;
1375 int rc = 0;
1376 1540
1377 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { 1541 return bnx2x_mdio45_ctrl_write(bp, emac_base, phy_addr,
1542 reg, addr, val);
1543}
1378 1544
1379 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); 1545static int bnx2x_mdio45_ctrl_read(struct bnx2x *bp, u32 mdio_ctrl,
1380 val &= ~EMAC_MDIO_MODE_AUTO_POLL; 1546 u32 phy_addr, u32 reg, u32 addr,
1381 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val); 1547 u32 *ret_val)
1382 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); 1548{
1383 udelay(40); 1549 u32 val;
1384 } 1550 int i, rc = 0;
1385 1551
1386 /* set clause 45 mode */ 1552 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1387 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); 1553 * (a value of 49==0x31) and make sure that the AUTO poll is off
1388 val |= EMAC_MDIO_MODE_CLAUSE_45; 1554 */
1389 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val); 1555 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1556 val &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1557 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
1558 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1559 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1560 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1561 udelay(40);
1390 1562
1391 /* address */ 1563 /* address */
1392 val = ((bp->phy_addr << 21) | (reg << 16) | addr | 1564 val = ((phy_addr << 21) | (reg << 16) | addr |
1393 EMAC_MDIO_COMM_COMMAND_ADDRESS | 1565 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1394 EMAC_MDIO_COMM_START_BUSY); 1566 EMAC_MDIO_COMM_START_BUSY);
1395 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val); 1567 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
1396 1568
1397 for (i = 0; i < 50; i++) { 1569 for (i = 0; i < 50; i++) {
1398 udelay(10); 1570 udelay(10);
1399 1571
1400 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM); 1572 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1401 if (!(val & EMAC_MDIO_COMM_START_BUSY)) { 1573 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1402 udelay(5); 1574 udelay(5);
1403 break; 1575 break;
1404 } 1576 }
1405 } 1577 }
1406
1407 if (val & EMAC_MDIO_COMM_START_BUSY) { 1578 if (val & EMAC_MDIO_COMM_START_BUSY) {
1408 BNX2X_ERR("read phy register failed\n"); 1579 BNX2X_ERR("read phy register failed\n");
1409 1580
1410 *ret_val = 0; 1581 *ret_val = 0;
1411 rc = -EBUSY; 1582 rc = -EBUSY;
1583
1412 } else { 1584 } else {
1413 /* data */ 1585 /* data */
1414 val = ((bp->phy_addr << 21) | (reg << 16) | 1586 val = ((phy_addr << 21) | (reg << 16) |
1415 EMAC_MDIO_COMM_COMMAND_READ_45 | 1587 EMAC_MDIO_COMM_COMMAND_READ_45 |
1416 EMAC_MDIO_COMM_START_BUSY); 1588 EMAC_MDIO_COMM_START_BUSY);
1417 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val); 1589 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
1418 1590
1419 for (i = 0; i < 50; i++) { 1591 for (i = 0; i < 50; i++) {
1420 udelay(10); 1592 udelay(10);
1421 1593
1422 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM); 1594 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1423 if (!(val & EMAC_MDIO_COMM_START_BUSY)) { 1595 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1424 val &= EMAC_MDIO_COMM_DATA; 1596 val &= EMAC_MDIO_COMM_DATA;
1425 break; 1597 break;
@@ -1436,31 +1608,39 @@ static int bnx2x_mdio45_read(struct bnx2x *bp, u32 reg, u32 addr,
1436 *ret_val = val; 1608 *ret_val = val;
1437 } 1609 }
1438 1610
1439 /* unset clause 45 mode */ 1611 /* unset clause 45 mode, set the MDIO clock to a faster value
1440 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); 1612 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1441 val &= ~EMAC_MDIO_MODE_CLAUSE_45; 1613 */
1442 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val); 1614 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1443 1615 val &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1444 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { 1616 val |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1445 1617 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
1446 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1447 val |= EMAC_MDIO_MODE_AUTO_POLL; 1618 val |= EMAC_MDIO_MODE_AUTO_POLL;
1448 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val); 1619 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1449 }
1450 1620
1451 return rc; 1621 return rc;
1452} 1622}
1453 1623
1454static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 reg, u32 addr, u32 val) 1624static int bnx2x_mdio45_read(struct bnx2x *bp, u32 phy_addr, u32 reg,
1625 u32 addr, u32 *ret_val)
1626{
1627 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1628
1629 return bnx2x_mdio45_ctrl_read(bp, emac_base, phy_addr,
1630 reg, addr, ret_val);
1631}
1632
1633static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 phy_addr, u32 reg,
1634 u32 addr, u32 val)
1455{ 1635{
1456 int i; 1636 int i;
1457 u32 rd_val; 1637 u32 rd_val;
1458 1638
1459 might_sleep(); 1639 might_sleep();
1460 for (i = 0; i < 10; i++) { 1640 for (i = 0; i < 10; i++) {
1461 bnx2x_mdio45_write(bp, reg, addr, val); 1641 bnx2x_mdio45_write(bp, phy_addr, reg, addr, val);
1462 msleep(5); 1642 msleep(5);
1463 bnx2x_mdio45_read(bp, reg, addr, &rd_val); 1643 bnx2x_mdio45_read(bp, phy_addr, reg, addr, &rd_val);
1464 /* if the read value is not the same as the value we wrote, 1644 /* if the read value is not the same as the value we wrote,
1465 we should write it again */ 1645 we should write it again */
1466 if (rd_val == val) 1646 if (rd_val == val)
@@ -1471,18 +1651,81 @@ static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
1471} 1651}
1472 1652
1473/* 1653/*
1474 * link managment 1654 * link management
1475 */ 1655 */
1476 1656
1657static void bnx2x_pause_resolve(struct bnx2x *bp, u32 pause_result)
1658{
1659 switch (pause_result) { /* ASYM P ASYM P */
1660 case 0xb: /* 1 0 1 1 */
1661 bp->flow_ctrl = FLOW_CTRL_TX;
1662 break;
1663
1664 case 0xe: /* 1 1 1 0 */
1665 bp->flow_ctrl = FLOW_CTRL_RX;
1666 break;
1667
1668 case 0x5: /* 0 1 0 1 */
1669 case 0x7: /* 0 1 1 1 */
1670 case 0xd: /* 1 1 0 1 */
1671 case 0xf: /* 1 1 1 1 */
1672 bp->flow_ctrl = FLOW_CTRL_BOTH;
1673 break;
1674
1675 default:
1676 break;
1677 }
1678}
1679
1680static u8 bnx2x_ext_phy_resove_fc(struct bnx2x *bp)
1681{
1682 u32 ext_phy_addr;
1683 u32 ld_pause; /* local */
1684 u32 lp_pause; /* link partner */
1685 u32 an_complete; /* AN complete */
1686 u32 pause_result;
1687 u8 ret = 0;
1688
1689 ext_phy_addr = ((bp->ext_phy_config &
1690 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1691 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1692
1693 /* read twice */
1694 bnx2x_mdio45_read(bp, ext_phy_addr,
1695 EXT_PHY_KR_AUTO_NEG_DEVAD,
1696 EXT_PHY_KR_STATUS, &an_complete);
1697 bnx2x_mdio45_read(bp, ext_phy_addr,
1698 EXT_PHY_KR_AUTO_NEG_DEVAD,
1699 EXT_PHY_KR_STATUS, &an_complete);
1700
1701 if (an_complete & EXT_PHY_KR_AUTO_NEG_COMPLETE) {
1702 ret = 1;
1703 bnx2x_mdio45_read(bp, ext_phy_addr,
1704 EXT_PHY_KR_AUTO_NEG_DEVAD,
1705 EXT_PHY_KR_AUTO_NEG_ADVERT, &ld_pause);
1706 bnx2x_mdio45_read(bp, ext_phy_addr,
1707 EXT_PHY_KR_AUTO_NEG_DEVAD,
1708 EXT_PHY_KR_LP_AUTO_NEG, &lp_pause);
1709 pause_result = (ld_pause &
1710 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 8;
1711 pause_result |= (lp_pause &
1712 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 10;
1713 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
1714 pause_result);
1715 bnx2x_pause_resolve(bp, pause_result);
1716 }
1717 return ret;
1718}
1719
1477static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status) 1720static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1478{ 1721{
1479 u32 ld_pause; /* local driver */ 1722 u32 ld_pause; /* local driver */
1480 u32 lp_pause; /* link partner */ 1723 u32 lp_pause; /* link partner */
1481 u32 pause_result; 1724 u32 pause_result;
1482 1725
1483 bp->flow_ctrl = 0; 1726 bp->flow_ctrl = 0;
1484 1727
1485 /* reolve from gp_status in case of AN complete and not sgmii */ 1728 /* resolve from gp_status in case of AN complete and not sgmii */
1486 if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) && 1729 if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
1487 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) && 1730 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1488 (!(bp->phy_flags & PHY_SGMII_FLAG)) && 1731 (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
@@ -1499,45 +1742,57 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1499 pause_result |= (lp_pause & 1742 pause_result |= (lp_pause &
1500 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; 1743 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1501 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result); 1744 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
1745 bnx2x_pause_resolve(bp, pause_result);
1746 } else if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) ||
1747 !(bnx2x_ext_phy_resove_fc(bp))) {
1748 /* forced speed */
1749 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
1750 switch (bp->req_flow_ctrl) {
1751 case FLOW_CTRL_AUTO:
1752 if (bp->dev->mtu <= 4500)
1753 bp->flow_ctrl = FLOW_CTRL_BOTH;
1754 else
1755 bp->flow_ctrl = FLOW_CTRL_TX;
1756 break;
1502 1757
1503 switch (pause_result) { /* ASYM P ASYM P */ 1758 case FLOW_CTRL_TX:
1504 case 0xb: /* 1 0 1 1 */ 1759 bp->flow_ctrl = FLOW_CTRL_TX;
1505 bp->flow_ctrl = FLOW_CTRL_TX; 1760 break;
1506 break;
1507
1508 case 0xe: /* 1 1 1 0 */
1509 bp->flow_ctrl = FLOW_CTRL_RX;
1510 break;
1511 1761
1512 case 0x5: /* 0 1 0 1 */ 1762 case FLOW_CTRL_RX:
1513 case 0x7: /* 0 1 1 1 */ 1763 if (bp->dev->mtu <= 4500)
1514 case 0xd: /* 1 1 0 1 */ 1764 bp->flow_ctrl = FLOW_CTRL_RX;
1515 case 0xf: /* 1 1 1 1 */ 1765 break;
1516 bp->flow_ctrl = FLOW_CTRL_BOTH;
1517 break;
1518 1766
1519 default: 1767 case FLOW_CTRL_BOTH:
1520 break; 1768 if (bp->dev->mtu <= 4500)
1521 } 1769 bp->flow_ctrl = FLOW_CTRL_BOTH;
1770 else
1771 bp->flow_ctrl = FLOW_CTRL_TX;
1772 break;
1522 1773
1523 } else { /* forced mode */ 1774 case FLOW_CTRL_NONE:
1524 switch (bp->req_flow_ctrl) { 1775 default:
1525 case FLOW_CTRL_AUTO: 1776 break;
1526 if (bp->dev->mtu <= 4500) 1777 }
1527 bp->flow_ctrl = FLOW_CTRL_BOTH; 1778 } else { /* forced mode */
1528 else 1779 switch (bp->req_flow_ctrl) {
1529 bp->flow_ctrl = FLOW_CTRL_TX; 1780 case FLOW_CTRL_AUTO:
1530 break; 1781 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
1782 " req_autoneg 0x%x\n",
1783 bp->req_flow_ctrl, bp->req_autoneg);
1784 break;
1531 1785
1532 case FLOW_CTRL_TX: 1786 case FLOW_CTRL_TX:
1533 case FLOW_CTRL_RX: 1787 case FLOW_CTRL_RX:
1534 case FLOW_CTRL_BOTH: 1788 case FLOW_CTRL_BOTH:
1535 bp->flow_ctrl = bp->req_flow_ctrl; 1789 bp->flow_ctrl = bp->req_flow_ctrl;
1536 break; 1790 break;
1537 1791
1538 case FLOW_CTRL_NONE: 1792 case FLOW_CTRL_NONE:
1539 default: 1793 default:
1540 break; 1794 break;
1795 }
1541 } 1796 }
1542 } 1797 }
1543 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl); 1798 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
@@ -1548,9 +1803,9 @@ static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1548 bp->link_status = 0; 1803 bp->link_status = 0;
1549 1804
1550 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { 1805 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
1551 DP(NETIF_MSG_LINK, "link up\n"); 1806 DP(NETIF_MSG_LINK, "phy link up\n");
1552 1807
1553 bp->link_up = 1; 1808 bp->phy_link_up = 1;
1554 bp->link_status |= LINK_STATUS_LINK_UP; 1809 bp->link_status |= LINK_STATUS_LINK_UP;
1555 1810
1556 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) 1811 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
@@ -1659,20 +1914,20 @@ static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1659 bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED; 1914 bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1660 1915
1661 } else { /* link_down */ 1916 } else { /* link_down */
1662 DP(NETIF_MSG_LINK, "link down\n"); 1917 DP(NETIF_MSG_LINK, "phy link down\n");
1663 1918
1664 bp->link_up = 0; 1919 bp->phy_link_up = 0;
1665 1920
1666 bp->line_speed = 0; 1921 bp->line_speed = 0;
1667 bp->duplex = DUPLEX_FULL; 1922 bp->duplex = DUPLEX_FULL;
1668 bp->flow_ctrl = 0; 1923 bp->flow_ctrl = 0;
1669 } 1924 }
1670 1925
1671 DP(NETIF_MSG_LINK, "gp_status 0x%x link_up %d\n" 1926 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %d\n"
1672 DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x" 1927 DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x"
1673 " link_status 0x%x\n", 1928 " link_status 0x%x\n",
1674 gp_status, bp->link_up, bp->line_speed, bp->duplex, bp->flow_ctrl, 1929 gp_status, bp->phy_link_up, bp->line_speed, bp->duplex,
1675 bp->link_status); 1930 bp->flow_ctrl, bp->link_status);
1676} 1931}
1677 1932
1678static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g) 1933static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
@@ -1680,40 +1935,40 @@ static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
1680 int port = bp->port; 1935 int port = bp->port;
1681 1936
1682 /* first reset all status 1937 /* first reset all status
1683 * we asume only one line will be change at a time */ 1938 * we assume only one line will be change at a time */
1684 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 1939 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1685 (NIG_XGXS0_LINK_STATUS | 1940 (NIG_STATUS_XGXS0_LINK10G |
1686 NIG_SERDES0_LINK_STATUS | 1941 NIG_STATUS_XGXS0_LINK_STATUS |
1687 NIG_STATUS_INTERRUPT_XGXS0_LINK10G)); 1942 NIG_STATUS_SERDES0_LINK_STATUS));
1688 if (bp->link_up) { 1943 if (bp->phy_link_up) {
1689 if (is_10g) { 1944 if (is_10g) {
1690 /* Disable the 10G link interrupt 1945 /* Disable the 10G link interrupt
1691 * by writing 1 to the status register 1946 * by writing 1 to the status register
1692 */ 1947 */
1693 DP(NETIF_MSG_LINK, "10G XGXS link up\n"); 1948 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
1694 bnx2x_bits_en(bp, 1949 bnx2x_bits_en(bp,
1695 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 1950 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1696 NIG_STATUS_INTERRUPT_XGXS0_LINK10G); 1951 NIG_STATUS_XGXS0_LINK10G);
1697 1952
1698 } else if (bp->phy_flags & PHY_XGXS_FLAG) { 1953 } else if (bp->phy_flags & PHY_XGXS_FLAG) {
1699 /* Disable the link interrupt 1954 /* Disable the link interrupt
1700 * by writing 1 to the relevant lane 1955 * by writing 1 to the relevant lane
1701 * in the status register 1956 * in the status register
1702 */ 1957 */
1703 DP(NETIF_MSG_LINK, "1G XGXS link up\n"); 1958 DP(NETIF_MSG_LINK, "1G XGXS phy link up\n");
1704 bnx2x_bits_en(bp, 1959 bnx2x_bits_en(bp,
1705 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 1960 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1706 ((1 << bp->ser_lane) << 1961 ((1 << bp->ser_lane) <<
1707 NIG_XGXS0_LINK_STATUS_SIZE)); 1962 NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
1708 1963
1709 } else { /* SerDes */ 1964 } else { /* SerDes */
1710 DP(NETIF_MSG_LINK, "SerDes link up\n"); 1965 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
1711 /* Disable the link interrupt 1966 /* Disable the link interrupt
1712 * by writing 1 to the status register 1967 * by writing 1 to the status register
1713 */ 1968 */
1714 bnx2x_bits_en(bp, 1969 bnx2x_bits_en(bp,
1715 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 1970 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1716 NIG_SERDES0_LINK_STATUS); 1971 NIG_STATUS_SERDES0_LINK_STATUS);
1717 } 1972 }
1718 1973
1719 } else { /* link_down */ 1974 } else { /* link_down */
@@ -1724,91 +1979,182 @@ static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
1724{ 1979{
1725 u32 ext_phy_type; 1980 u32 ext_phy_type;
1726 u32 ext_phy_addr; 1981 u32 ext_phy_addr;
1727 u32 local_phy; 1982 u32 val1 = 0, val2;
1728 u32 val = 0;
1729 u32 rx_sd, pcs_status; 1983 u32 rx_sd, pcs_status;
1730 1984
1731 if (bp->phy_flags & PHY_XGXS_FLAG) { 1985 if (bp->phy_flags & PHY_XGXS_FLAG) {
1732 local_phy = bp->phy_addr;
1733 ext_phy_addr = ((bp->ext_phy_config & 1986 ext_phy_addr = ((bp->ext_phy_config &
1734 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> 1987 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1735 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); 1988 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1736 bp->phy_addr = (u8)ext_phy_addr;
1737 1989
1738 ext_phy_type = XGXS_EXT_PHY_TYPE(bp); 1990 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
1739 switch (ext_phy_type) { 1991 switch (ext_phy_type) {
1740 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 1992 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1741 DP(NETIF_MSG_LINK, "XGXS Direct\n"); 1993 DP(NETIF_MSG_LINK, "XGXS Direct\n");
1742 val = 1; 1994 val1 = 1;
1743 break; 1995 break;
1744 1996
1745 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 1997 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1746 DP(NETIF_MSG_LINK, "XGXS 8705\n"); 1998 DP(NETIF_MSG_LINK, "XGXS 8705\n");
1747 bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD, 1999 bnx2x_mdio45_read(bp, ext_phy_addr,
1748 EXT_PHY_OPT_LASI_STATUS, &val); 2000 EXT_PHY_OPT_WIS_DEVAD,
1749 DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val); 2001 EXT_PHY_OPT_LASI_STATUS, &val1);
1750 2002 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
1751 bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD, 2003
1752 EXT_PHY_OPT_LASI_STATUS, &val); 2004 bnx2x_mdio45_read(bp, ext_phy_addr,
1753 DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val); 2005 EXT_PHY_OPT_WIS_DEVAD,
1754 2006 EXT_PHY_OPT_LASI_STATUS, &val1);
1755 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 2007 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
2008
2009 bnx2x_mdio45_read(bp, ext_phy_addr,
2010 EXT_PHY_OPT_PMA_PMD_DEVAD,
1756 EXT_PHY_OPT_PMD_RX_SD, &rx_sd); 2011 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
1757 val = (rx_sd & 0x1); 2012 DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
2013 val1 = (rx_sd & 0x1);
1758 break; 2014 break;
1759 2015
1760 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 2016 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
1761 DP(NETIF_MSG_LINK, "XGXS 8706\n"); 2017 DP(NETIF_MSG_LINK, "XGXS 8706\n");
1762 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 2018 bnx2x_mdio45_read(bp, ext_phy_addr,
1763 EXT_PHY_OPT_LASI_STATUS, &val); 2019 EXT_PHY_OPT_PMA_PMD_DEVAD,
1764 DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val); 2020 EXT_PHY_OPT_LASI_STATUS, &val1);
1765 2021 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
1766 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 2022
1767 EXT_PHY_OPT_LASI_STATUS, &val); 2023 bnx2x_mdio45_read(bp, ext_phy_addr,
1768 DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val); 2024 EXT_PHY_OPT_PMA_PMD_DEVAD,
1769 2025 EXT_PHY_OPT_LASI_STATUS, &val1);
1770 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 2026 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2027
2028 bnx2x_mdio45_read(bp, ext_phy_addr,
2029 EXT_PHY_OPT_PMA_PMD_DEVAD,
1771 EXT_PHY_OPT_PMD_RX_SD, &rx_sd); 2030 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
1772 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PCS_DEVAD, 2031 bnx2x_mdio45_read(bp, ext_phy_addr,
1773 EXT_PHY_OPT_PCS_STATUS, &pcs_status); 2032 EXT_PHY_OPT_PCS_DEVAD,
2033 EXT_PHY_OPT_PCS_STATUS, &pcs_status);
2034 bnx2x_mdio45_read(bp, ext_phy_addr,
2035 EXT_PHY_AUTO_NEG_DEVAD,
2036 EXT_PHY_OPT_AN_LINK_STATUS, &val2);
2037
1774 DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x" 2038 DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
1775 " pcs_status 0x%x\n", rx_sd, pcs_status); 2039 " pcs_status 0x%x 1Gbps link_status 0x%x 0x%x\n",
1776 /* link is up if both bit 0 of pmd_rx and 2040 rx_sd, pcs_status, val2, (val2 & (1<<1)));
1777 * bit 0 of pcs_status are set 2041 /* link is up if both bit 0 of pmd_rx_sd and
2042 * bit 0 of pcs_status are set, or if the autoneg bit
2043 1 is set
1778 */ 2044 */
1779 val = (rx_sd & pcs_status); 2045 val1 = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
2046 break;
2047
2048 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2049 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2050
2051 /* clear the interrupt LASI status register */
2052 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2053 ext_phy_addr,
2054 EXT_PHY_KR_PCS_DEVAD,
2055 EXT_PHY_KR_LASI_STATUS, &val2);
2056 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2057 ext_phy_addr,
2058 EXT_PHY_KR_PCS_DEVAD,
2059 EXT_PHY_KR_LASI_STATUS, &val1);
2060 DP(NETIF_MSG_LINK, "KR LASI status 0x%x->0x%x\n",
2061 val2, val1);
2062 /* Check the LASI */
2063 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2064 ext_phy_addr,
2065 EXT_PHY_KR_PMA_PMD_DEVAD,
2066 0x9003, &val2);
2067 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2068 ext_phy_addr,
2069 EXT_PHY_KR_PMA_PMD_DEVAD,
2070 0x9003, &val1);
2071 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
2072 val2, val1);
2073 /* Check the link status */
2074 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2075 ext_phy_addr,
2076 EXT_PHY_KR_PCS_DEVAD,
2077 EXT_PHY_KR_PCS_STATUS, &val2);
2078 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
2079 /* Check the link status on 1.1.2 */
2080 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2081 ext_phy_addr,
2082 EXT_PHY_OPT_PMA_PMD_DEVAD,
2083 EXT_PHY_KR_STATUS, &val2);
2084 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2085 ext_phy_addr,
2086 EXT_PHY_OPT_PMA_PMD_DEVAD,
2087 EXT_PHY_KR_STATUS, &val1);
2088 DP(NETIF_MSG_LINK,
2089 "KR PMA status 0x%x->0x%x\n", val2, val1);
2090 val1 = ((val1 & 4) == 4);
2091 /* If 1G was requested assume the link is up */
2092 if (!(bp->req_autoneg & AUTONEG_SPEED) &&
2093 (bp->req_line_speed == SPEED_1000))
2094 val1 = 1;
2095 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2096 break;
2097
2098 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2099 bnx2x_mdio45_read(bp, ext_phy_addr,
2100 EXT_PHY_OPT_PMA_PMD_DEVAD,
2101 EXT_PHY_OPT_LASI_STATUS, &val2);
2102 bnx2x_mdio45_read(bp, ext_phy_addr,
2103 EXT_PHY_OPT_PMA_PMD_DEVAD,
2104 EXT_PHY_OPT_LASI_STATUS, &val1);
2105 DP(NETIF_MSG_LINK,
2106 "10G-base-T LASI status 0x%x->0x%x\n", val2, val1);
2107 bnx2x_mdio45_read(bp, ext_phy_addr,
2108 EXT_PHY_OPT_PMA_PMD_DEVAD,
2109 EXT_PHY_KR_STATUS, &val2);
2110 bnx2x_mdio45_read(bp, ext_phy_addr,
2111 EXT_PHY_OPT_PMA_PMD_DEVAD,
2112 EXT_PHY_KR_STATUS, &val1);
2113 DP(NETIF_MSG_LINK,
2114 "10G-base-T PMA status 0x%x->0x%x\n", val2, val1);
2115 val1 = ((val1 & 4) == 4);
2116 /* if link is up
2117 * print the AN outcome of the SFX7101 PHY
2118 */
2119 if (val1) {
2120 bnx2x_mdio45_read(bp, ext_phy_addr,
2121 EXT_PHY_KR_AUTO_NEG_DEVAD,
2122 0x21, &val2);
2123 DP(NETIF_MSG_LINK,
2124 "SFX7101 AN status 0x%x->%s\n", val2,
2125 (val2 & (1<<14)) ? "Master" : "Slave");
2126 }
1780 break; 2127 break;
1781 2128
1782 default: 2129 default:
1783 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n", 2130 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
1784 bp->ext_phy_config); 2131 bp->ext_phy_config);
1785 val = 0; 2132 val1 = 0;
1786 break; 2133 break;
1787 } 2134 }
1788 bp->phy_addr = local_phy;
1789 2135
1790 } else { /* SerDes */ 2136 } else { /* SerDes */
1791 ext_phy_type = SERDES_EXT_PHY_TYPE(bp); 2137 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
1792 switch (ext_phy_type) { 2138 switch (ext_phy_type) {
1793 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: 2139 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
1794 DP(NETIF_MSG_LINK, "SerDes Direct\n"); 2140 DP(NETIF_MSG_LINK, "SerDes Direct\n");
1795 val = 1; 2141 val1 = 1;
1796 break; 2142 break;
1797 2143
1798 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: 2144 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
1799 DP(NETIF_MSG_LINK, "SerDes 5482\n"); 2145 DP(NETIF_MSG_LINK, "SerDes 5482\n");
1800 val = 1; 2146 val1 = 1;
1801 break; 2147 break;
1802 2148
1803 default: 2149 default:
1804 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n", 2150 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
1805 bp->ext_phy_config); 2151 bp->ext_phy_config);
1806 val = 0; 2152 val1 = 0;
1807 break; 2153 break;
1808 } 2154 }
1809 } 2155 }
1810 2156
1811 return val; 2157 return val1;
1812} 2158}
1813 2159
1814static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb) 2160static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
@@ -1819,7 +2165,7 @@ static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
1819 u32 wb_write[2]; 2165 u32 wb_write[2];
1820 u32 val; 2166 u32 val;
1821 2167
1822 DP(NETIF_MSG_LINK, "enableing BigMAC\n"); 2168 DP(NETIF_MSG_LINK, "enabling BigMAC\n");
1823 /* reset and unreset the BigMac */ 2169 /* reset and unreset the BigMac */
1824 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 2170 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1825 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 2171 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -1933,6 +2279,35 @@ static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
1933 bp->stats_state = STATS_STATE_ENABLE; 2279 bp->stats_state = STATS_STATE_ENABLE;
1934} 2280}
1935 2281
2282static void bnx2x_bmac_rx_disable(struct bnx2x *bp)
2283{
2284 int port = bp->port;
2285 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2286 NIG_REG_INGRESS_BMAC0_MEM;
2287 u32 wb_write[2];
2288
2289 /* Only if the bmac is out of reset */
2290 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
2291 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)) {
2292 /* Clear Rx Enable bit in BMAC_CONTROL register */
2293#ifdef BNX2X_DMAE_RD
2294 bnx2x_read_dmae(bp, bmac_addr +
2295 BIGMAC_REGISTER_BMAC_CONTROL, 2);
2296 wb_write[0] = *bnx2x_sp(bp, wb_data[0]);
2297 wb_write[1] = *bnx2x_sp(bp, wb_data[1]);
2298#else
2299 wb_write[0] = REG_RD(bp,
2300 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL);
2301 wb_write[1] = REG_RD(bp,
2302 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL + 4);
2303#endif
2304 wb_write[0] &= ~BMAC_CONTROL_RX_ENABLE;
2305 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2306 wb_write, 2);
2307 msleep(1);
2308 }
2309}
2310
1936static void bnx2x_emac_enable(struct bnx2x *bp) 2311static void bnx2x_emac_enable(struct bnx2x *bp)
1937{ 2312{
1938 int port = bp->port; 2313 int port = bp->port;
@@ -1940,7 +2315,7 @@ static void bnx2x_emac_enable(struct bnx2x *bp)
1940 u32 val; 2315 u32 val;
1941 int timeout; 2316 int timeout;
1942 2317
1943 DP(NETIF_MSG_LINK, "enableing EMAC\n"); 2318 DP(NETIF_MSG_LINK, "enabling EMAC\n");
1944 /* reset and unreset the emac core */ 2319 /* reset and unreset the emac core */
1945 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 2320 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1946 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); 2321 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
@@ -2033,7 +2408,7 @@ static void bnx2x_emac_enable(struct bnx2x *bp)
2033 EMAC_TX_MODE_EXT_PAUSE_EN); 2408 EMAC_TX_MODE_EXT_PAUSE_EN);
2034 } 2409 }
2035 2410
2036 /* KEEP_VLAN_TAG, promiscous */ 2411 /* KEEP_VLAN_TAG, promiscuous */
2037 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); 2412 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
2038 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; 2413 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
2039 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val); 2414 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
@@ -2161,7 +2536,6 @@ static void bnx2x_pbf_update(struct bnx2x *bp)
2161 u32 count = 1000; 2536 u32 count = 1000;
2162 u32 pause = 0; 2537 u32 pause = 0;
2163 2538
2164
2165 /* disable port */ 2539 /* disable port */
2166 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); 2540 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2167 2541
@@ -2232,7 +2606,7 @@ static void bnx2x_pbf_update(struct bnx2x *bp)
2232static void bnx2x_update_mng(struct bnx2x *bp) 2606static void bnx2x_update_mng(struct bnx2x *bp)
2233{ 2607{
2234 if (!nomcp) 2608 if (!nomcp)
2235 SHMEM_WR(bp, drv_fw_mb[bp->port].link_status, 2609 SHMEM_WR(bp, port_mb[bp->port].link_status,
2236 bp->link_status); 2610 bp->link_status);
2237} 2611}
2238 2612
@@ -2294,19 +2668,19 @@ static void bnx2x_link_down(struct bnx2x *bp)
2294 DP(BNX2X_MSG_STATS, "stats_state - STOP\n"); 2668 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2295 } 2669 }
2296 2670
2297 /* indicate link down */ 2671 /* indicate no mac active */
2298 bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG); 2672 bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
2299 2673
2300 /* reset BigMac */ 2674 /* update shared memory */
2301 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 2675 bnx2x_update_mng(bp);
2302 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2303 2676
2304 /* ignore drain flag interrupt */
2305 /* activate nig drain */ 2677 /* activate nig drain */
2306 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 2678 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2307 2679
2308 /* update shared memory */ 2680 /* reset BigMac */
2309 bnx2x_update_mng(bp); 2681 bnx2x_bmac_rx_disable(bp);
2682 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2683 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2310 2684
2311 /* indicate link down */ 2685 /* indicate link down */
2312 bnx2x_link_report(bp); 2686 bnx2x_link_report(bp);
@@ -2317,14 +2691,15 @@ static void bnx2x_init_mac_stats(struct bnx2x *bp);
2317/* This function is called upon link interrupt */ 2691/* This function is called upon link interrupt */
2318static void bnx2x_link_update(struct bnx2x *bp) 2692static void bnx2x_link_update(struct bnx2x *bp)
2319{ 2693{
2320 u32 gp_status;
2321 int port = bp->port; 2694 int port = bp->port;
2322 int i; 2695 int i;
2696 u32 gp_status;
2323 int link_10g; 2697 int link_10g;
2324 2698
2325 DP(NETIF_MSG_LINK, "port %x, is xgxs %x, stat_mask 0x%x," 2699 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
2326 " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x," 2700 " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
2327 " 10G %x, XGXS_LINK %x\n", port, (bp->phy_flags & PHY_XGXS_FLAG), 2701 " 10G %x, XGXS_LINK %x\n", port,
2702 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2328 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4), 2703 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
2329 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask, 2704 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
2330 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18), 2705 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
@@ -2336,7 +2711,7 @@ static void bnx2x_link_update(struct bnx2x *bp)
2336 might_sleep(); 2711 might_sleep();
2337 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS); 2712 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
2338 /* avoid fast toggling */ 2713 /* avoid fast toggling */
2339 for (i = 0 ; i < 10 ; i++) { 2714 for (i = 0; i < 10; i++) {
2340 msleep(10); 2715 msleep(10);
2341 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1, 2716 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
2342 &gp_status); 2717 &gp_status);
@@ -2351,7 +2726,8 @@ static void bnx2x_link_update(struct bnx2x *bp)
2351 bnx2x_link_int_ack(bp, link_10g); 2726 bnx2x_link_int_ack(bp, link_10g);
2352 2727
2353 /* link is up only if both local phy and external phy are up */ 2728 /* link is up only if both local phy and external phy are up */
2354 if (bp->link_up && bnx2x_ext_phy_is_link_up(bp)) { 2729 bp->link_up = (bp->phy_link_up && bnx2x_ext_phy_is_link_up(bp));
2730 if (bp->link_up) {
2355 if (link_10g) { 2731 if (link_10g) {
2356 bnx2x_bmac_enable(bp, 0); 2732 bnx2x_bmac_enable(bp, 0);
2357 bnx2x_leds_set(bp, SPEED_10000); 2733 bnx2x_leds_set(bp, SPEED_10000);
@@ -2427,7 +2803,9 @@ static void bnx2x_reset_unicore(struct bnx2x *bp)
2427 } 2803 }
2428 } 2804 }
2429 2805
2430 BNX2X_ERR("BUG! unicore is still in reset!\n"); 2806 BNX2X_ERR("BUG! %s (0x%x) is still in reset!\n",
2807 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2808 bp->phy_addr);
2431} 2809}
2432 2810
2433static void bnx2x_set_swap_lanes(struct bnx2x *bp) 2811static void bnx2x_set_swap_lanes(struct bnx2x *bp)
@@ -2475,12 +2853,12 @@ static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2475 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT); 2853 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
2476 2854
2477 bnx2x_mdio22_write(bp, 2855 bnx2x_mdio22_write(bp,
2478 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, 2856 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
2479 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); 2857 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
2480 2858
2481 bnx2x_mdio22_read(bp, 2859 bnx2x_mdio22_read(bp,
2482 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 2860 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2483 &control2); 2861 &control2);
2484 2862
2485 if (bp->autoneg & AUTONEG_PARALLEL) { 2863 if (bp->autoneg & AUTONEG_PARALLEL) {
2486 control2 |= 2864 control2 |=
@@ -2490,8 +2868,14 @@ static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2490 ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; 2868 ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2491 } 2869 }
2492 bnx2x_mdio22_write(bp, 2870 bnx2x_mdio22_write(bp,
2493 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 2871 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2494 control2); 2872 control2);
2873
2874 /* Disable parallel detection of HiG */
2875 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2876 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
2877 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
2878 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
2495 } 2879 }
2496} 2880}
2497 2881
@@ -2625,7 +3009,7 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
2625 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G); 3009 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2626 3010
2627 /* set extended capabilities */ 3011 /* set extended capabilities */
2628 if (bp->advertising & ADVERTISED_2500baseT_Full) 3012 if (bp->advertising & ADVERTISED_2500baseX_Full)
2629 val |= MDIO_OVER_1G_UP1_2_5G; 3013 val |= MDIO_OVER_1G_UP1_2_5G;
2630 if (bp->advertising & ADVERTISED_10000baseT_Full) 3014 if (bp->advertising & ADVERTISED_10000baseT_Full)
2631 val |= MDIO_OVER_1G_UP1_10G; 3015 val |= MDIO_OVER_1G_UP1_10G;
@@ -2641,20 +3025,91 @@ static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
2641 /* for AN, we are always publishing full duplex */ 3025 /* for AN, we are always publishing full duplex */
2642 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; 3026 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
2643 3027
2644 /* set pause */ 3028 /* resolve pause mode and advertisement
2645 switch (bp->pause_mode) { 3029 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
2646 case PAUSE_SYMMETRIC: 3030 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
2647 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC; 3031 switch (bp->req_flow_ctrl) {
2648 break; 3032 case FLOW_CTRL_AUTO:
2649 case PAUSE_ASYMMETRIC: 3033 if (bp->dev->mtu <= 4500) {
2650 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 3034 an_adv |=
2651 break; 3035 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2652 case PAUSE_BOTH: 3036 bp->advertising |= (ADVERTISED_Pause |
2653 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 3037 ADVERTISED_Asym_Pause);
2654 break; 3038 } else {
2655 case PAUSE_NONE: 3039 an_adv |=
2656 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; 3040 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2657 break; 3041 bp->advertising |= ADVERTISED_Asym_Pause;
3042 }
3043 break;
3044
3045 case FLOW_CTRL_TX:
3046 an_adv |=
3047 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3048 bp->advertising |= ADVERTISED_Asym_Pause;
3049 break;
3050
3051 case FLOW_CTRL_RX:
3052 if (bp->dev->mtu <= 4500) {
3053 an_adv |=
3054 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3055 bp->advertising |= (ADVERTISED_Pause |
3056 ADVERTISED_Asym_Pause);
3057 } else {
3058 an_adv |=
3059 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3060 bp->advertising &= ~(ADVERTISED_Pause |
3061 ADVERTISED_Asym_Pause);
3062 }
3063 break;
3064
3065 case FLOW_CTRL_BOTH:
3066 if (bp->dev->mtu <= 4500) {
3067 an_adv |=
3068 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3069 bp->advertising |= (ADVERTISED_Pause |
3070 ADVERTISED_Asym_Pause);
3071 } else {
3072 an_adv |=
3073 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3074 bp->advertising |= ADVERTISED_Asym_Pause;
3075 }
3076 break;
3077
3078 case FLOW_CTRL_NONE:
3079 default:
3080 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3081 bp->advertising &= ~(ADVERTISED_Pause |
3082 ADVERTISED_Asym_Pause);
3083 break;
3084 }
3085 } else { /* forced mode */
3086 switch (bp->req_flow_ctrl) {
3087 case FLOW_CTRL_AUTO:
3088 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
3089 " req_autoneg 0x%x\n",
3090 bp->req_flow_ctrl, bp->req_autoneg);
3091 break;
3092
3093 case FLOW_CTRL_TX:
3094 an_adv |=
3095 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3096 bp->advertising |= ADVERTISED_Asym_Pause;
3097 break;
3098
3099 case FLOW_CTRL_RX:
3100 case FLOW_CTRL_BOTH:
3101 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3102 bp->advertising |= (ADVERTISED_Pause |
3103 ADVERTISED_Asym_Pause);
3104 break;
3105
3106 case FLOW_CTRL_NONE:
3107 default:
3108 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3109 bp->advertising &= ~(ADVERTISED_Pause |
3110 ADVERTISED_Asym_Pause);
3111 break;
3112 }
2658 } 3113 }
2659 3114
2660 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0); 3115 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
@@ -2752,47 +3207,162 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
2752static void bnx2x_link_int_enable(struct bnx2x *bp) 3207static void bnx2x_link_int_enable(struct bnx2x *bp)
2753{ 3208{
2754 int port = bp->port; 3209 int port = bp->port;
3210 u32 ext_phy_type;
3211 u32 mask;
2755 3212
2756 /* setting the status to report on link up 3213 /* setting the status to report on link up
2757 for either XGXS or SerDes */ 3214 for either XGXS or SerDes */
2758 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 3215 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2759 (NIG_XGXS0_LINK_STATUS | 3216 (NIG_STATUS_XGXS0_LINK10G |
2760 NIG_STATUS_INTERRUPT_XGXS0_LINK10G | 3217 NIG_STATUS_XGXS0_LINK_STATUS |
2761 NIG_SERDES0_LINK_STATUS)); 3218 NIG_STATUS_SERDES0_LINK_STATUS));
2762 3219
2763 if (bp->phy_flags & PHY_XGXS_FLAG) { 3220 if (bp->phy_flags & PHY_XGXS_FLAG) {
2764 /* TBD - 3221 mask = (NIG_MASK_XGXS0_LINK10G |
2765 * in force mode (not AN) we can enable just the relevant 3222 NIG_MASK_XGXS0_LINK_STATUS);
2766 * interrupt 3223 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
2767 * Even in AN we might enable only one according to the AN 3224 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
2768 * speed mask 3225 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
2769 */ 3226 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
2770 bnx2x_bits_en(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 3227 (ext_phy_type !=
2771 (NIG_MASK_XGXS0_LINK_STATUS | 3228 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
2772 NIG_MASK_XGXS0_LINK10G)); 3229 mask |= NIG_MASK_MI_INT;
2773 DP(NETIF_MSG_LINK, "enable XGXS interrupt\n"); 3230 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3231 }
2774 3232
2775 } else { /* SerDes */ 3233 } else { /* SerDes */
2776 bnx2x_bits_en(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 3234 mask = NIG_MASK_SERDES0_LINK_STATUS;
2777 NIG_MASK_SERDES0_LINK_STATUS); 3235 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
2778 DP(NETIF_MSG_LINK, "enable SerDes interrupt\n"); 3236 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3237 if ((ext_phy_type !=
3238 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
3239 (ext_phy_type !=
3240 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
3241 mask |= NIG_MASK_MI_INT;
3242 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3243 }
2779 } 3244 }
3245 bnx2x_bits_en(bp,
3246 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3247 mask);
3248 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
3249 " int_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
3250 " 10G %x, XGXS_LINK %x\n", port,
3251 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
3252 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
3253 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3254 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
3255 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
3256 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
3257 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
3258 );
3259}
3260
3261static void bnx2x_bcm8072_external_rom_boot(struct bnx2x *bp)
3262{
3263 u32 ext_phy_addr = ((bp->ext_phy_config &
3264 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3265 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3266 u32 fw_ver1, fw_ver2;
3267
3268 /* Need to wait 200ms after reset */
3269 msleep(200);
3270 /* Boot port from external ROM
3271 * Set ser_boot_ctl bit in the MISC_CTRL1 register
3272 */
3273 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3274 EXT_PHY_KR_PMA_PMD_DEVAD,
3275 EXT_PHY_KR_MISC_CTRL1, 0x0001);
3276
3277 /* Reset internal microprocessor */
3278 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3279 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3280 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3281 /* set micro reset = 0 */
3282 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3283 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3284 EXT_PHY_KR_ROM_MICRO_RESET);
3285 /* Reset internal microprocessor */
3286 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3287 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3288 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3289 /* wait for 100ms for code download via SPI port */
3290 msleep(100);
3291
3292 /* Clear ser_boot_ctl bit */
3293 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3294 EXT_PHY_KR_PMA_PMD_DEVAD,
3295 EXT_PHY_KR_MISC_CTRL1, 0x0000);
3296 /* Wait 100ms */
3297 msleep(100);
3298
3299 /* Print the PHY FW version */
3300 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3301 EXT_PHY_KR_PMA_PMD_DEVAD,
3302 0xca19, &fw_ver1);
3303 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3304 EXT_PHY_KR_PMA_PMD_DEVAD,
3305 0xca1a, &fw_ver2);
3306 DP(NETIF_MSG_LINK,
3307 "8072 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
3308}
3309
3310static void bnx2x_bcm8072_force_10G(struct bnx2x *bp)
3311{
3312 u32 ext_phy_addr = ((bp->ext_phy_config &
3313 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3314 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3315
3316 /* Force KR or KX */
3317 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3318 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL,
3319 0x2040);
3320 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3321 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL2,
3322 0x000b);
3323 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3324 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_PMD_CTRL,
3325 0x0000);
3326 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3327 EXT_PHY_KR_AUTO_NEG_DEVAD, EXT_PHY_KR_CTRL,
3328 0x0000);
2780} 3329}
2781 3330
2782static void bnx2x_ext_phy_init(struct bnx2x *bp) 3331static void bnx2x_ext_phy_init(struct bnx2x *bp)
2783{ 3332{
2784 int port = bp->port;
2785 u32 ext_phy_type; 3333 u32 ext_phy_type;
2786 u32 ext_phy_addr; 3334 u32 ext_phy_addr;
2787 u32 local_phy; 3335 u32 cnt;
3336 u32 ctrl;
3337 u32 val = 0;
2788 3338
2789 if (bp->phy_flags & PHY_XGXS_FLAG) { 3339 if (bp->phy_flags & PHY_XGXS_FLAG) {
2790 local_phy = bp->phy_addr;
2791 ext_phy_addr = ((bp->ext_phy_config & 3340 ext_phy_addr = ((bp->ext_phy_config &
2792 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> 3341 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2793 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); 3342 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2794 3343
2795 ext_phy_type = XGXS_EXT_PHY_TYPE(bp); 3344 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3345 /* Make sure that the soft reset is off (expect for the 8072:
3346 * due to the lock, it will be done inside the specific
3347 * handling)
3348 */
3349 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3350 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3351 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
3352 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)) {
3353 /* Wait for soft reset to get cleared upto 1 sec */
3354 for (cnt = 0; cnt < 1000; cnt++) {
3355 bnx2x_mdio45_read(bp, ext_phy_addr,
3356 EXT_PHY_OPT_PMA_PMD_DEVAD,
3357 EXT_PHY_OPT_CNTL, &ctrl);
3358 if (!(ctrl & (1<<15)))
3359 break;
3360 msleep(1);
3361 }
3362 DP(NETIF_MSG_LINK,
3363 "control reg 0x%x (after %d ms)\n", ctrl, cnt);
3364 }
3365
2796 switch (ext_phy_type) { 3366 switch (ext_phy_type) {
2797 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 3367 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
2798 DP(NETIF_MSG_LINK, "XGXS Direct\n"); 3368 DP(NETIF_MSG_LINK, "XGXS Direct\n");
@@ -2800,49 +3370,235 @@ static void bnx2x_ext_phy_init(struct bnx2x *bp)
2800 3370
2801 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 3371 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
2802 DP(NETIF_MSG_LINK, "XGXS 8705\n"); 3372 DP(NETIF_MSG_LINK, "XGXS 8705\n");
2803 bnx2x_bits_en(bp,
2804 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2805 NIG_MASK_MI_INT);
2806 DP(NETIF_MSG_LINK, "enabled extenal phy int\n");
2807 3373
2808 bp->phy_addr = ext_phy_type; 3374 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
2809 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 3375 EXT_PHY_OPT_PMA_PMD_DEVAD,
2810 EXT_PHY_OPT_PMD_MISC_CNTL, 3376 EXT_PHY_OPT_PMD_MISC_CNTL,
2811 0x8288); 3377 0x8288);
2812 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 3378 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3379 EXT_PHY_OPT_PMA_PMD_DEVAD,
2813 EXT_PHY_OPT_PHY_IDENTIFIER, 3380 EXT_PHY_OPT_PHY_IDENTIFIER,
2814 0x7fbf); 3381 0x7fbf);
2815 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 3382 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3383 EXT_PHY_OPT_PMA_PMD_DEVAD,
2816 EXT_PHY_OPT_CMU_PLL_BYPASS, 3384 EXT_PHY_OPT_CMU_PLL_BYPASS,
2817 0x0100); 3385 0x0100);
2818 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_WIS_DEVAD, 3386 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3387 EXT_PHY_OPT_WIS_DEVAD,
2819 EXT_PHY_OPT_LASI_CNTL, 0x1); 3388 EXT_PHY_OPT_LASI_CNTL, 0x1);
2820 break; 3389 break;
2821 3390
2822 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 3391 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2823 DP(NETIF_MSG_LINK, "XGXS 8706\n"); 3392 DP(NETIF_MSG_LINK, "XGXS 8706\n");
2824 bnx2x_bits_en(bp, 3393
2825 NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 3394 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
2826 NIG_MASK_MI_INT); 3395 /* Force speed */
2827 DP(NETIF_MSG_LINK, "enabled extenal phy int\n"); 3396 if (bp->req_line_speed == SPEED_10000) {
2828 3397 DP(NETIF_MSG_LINK,
2829 bp->phy_addr = ext_phy_type; 3398 "XGXS 8706 force 10Gbps\n");
2830 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 3399 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
2831 EXT_PHY_OPT_PMD_DIGITAL_CNT, 3400 EXT_PHY_OPT_PMA_PMD_DEVAD,
2832 0x400); 3401 EXT_PHY_OPT_PMD_DIGITAL_CNT,
2833 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 3402 0x400);
3403 } else {
3404 /* Force 1Gbps */
3405 DP(NETIF_MSG_LINK,
3406 "XGXS 8706 force 1Gbps\n");
3407
3408 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3409 EXT_PHY_OPT_PMA_PMD_DEVAD,
3410 EXT_PHY_OPT_CNTL,
3411 0x0040);
3412
3413 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3414 EXT_PHY_OPT_PMA_PMD_DEVAD,
3415 EXT_PHY_OPT_CNTL2,
3416 0x000D);
3417 }
3418
3419 /* Enable LASI */
3420 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3421 EXT_PHY_OPT_PMA_PMD_DEVAD,
3422 EXT_PHY_OPT_LASI_CNTL,
3423 0x1);
3424 } else {
3425 /* AUTONEG */
3426 /* Allow CL37 through CL73 */
3427 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3428 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3429 EXT_PHY_AUTO_NEG_DEVAD,
3430 EXT_PHY_OPT_AN_CL37_CL73,
3431 0x040c);
3432
3433 /* Enable Full-Duplex advertisment on CL37 */
3434 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3435 EXT_PHY_AUTO_NEG_DEVAD,
3436 EXT_PHY_OPT_AN_CL37_FD,
3437 0x0020);
3438 /* Enable CL37 AN */
3439 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3440 EXT_PHY_AUTO_NEG_DEVAD,
3441 EXT_PHY_OPT_AN_CL37_AN,
3442 0x1000);
3443 /* Advertise 10G/1G support */
3444 if (bp->advertising &
3445 ADVERTISED_1000baseT_Full)
3446 val = (1<<5);
3447 if (bp->advertising &
3448 ADVERTISED_10000baseT_Full)
3449 val |= (1<<7);
3450
3451 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3452 EXT_PHY_AUTO_NEG_DEVAD,
3453 EXT_PHY_OPT_AN_ADV, val);
3454 /* Enable LASI */
3455 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3456 EXT_PHY_OPT_PMA_PMD_DEVAD,
3457 EXT_PHY_OPT_LASI_CNTL,
3458 0x1);
3459
3460 /* Enable clause 73 AN */
3461 bnx2x_mdio45_write(bp, ext_phy_addr,
3462 EXT_PHY_AUTO_NEG_DEVAD,
3463 EXT_PHY_OPT_CNTL,
3464 0x1200);
3465 }
3466 break;
3467
3468 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3469 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3470 /* Wait for soft reset to get cleared upto 1 sec */
3471 for (cnt = 0; cnt < 1000; cnt++) {
3472 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
3473 ext_phy_addr,
3474 EXT_PHY_OPT_PMA_PMD_DEVAD,
3475 EXT_PHY_OPT_CNTL, &ctrl);
3476 if (!(ctrl & (1<<15)))
3477 break;
3478 msleep(1);
3479 }
3480 DP(NETIF_MSG_LINK,
3481 "8072 control reg 0x%x (after %d ms)\n",
3482 ctrl, cnt);
3483
3484 bnx2x_bcm8072_external_rom_boot(bp);
3485 DP(NETIF_MSG_LINK, "Finshed loading 8072 KR ROM\n");
3486
3487 /* enable LASI */
3488 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3489 ext_phy_addr,
3490 EXT_PHY_KR_PMA_PMD_DEVAD,
3491 0x9000, 0x0400);
3492 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3493 ext_phy_addr,
3494 EXT_PHY_KR_PMA_PMD_DEVAD,
3495 EXT_PHY_KR_LASI_CNTL, 0x0004);
3496
3497 /* If this is forced speed, set to KR or KX
3498 * (all other are not supported)
3499 */
3500 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3501 if (bp->req_line_speed == SPEED_10000) {
3502 bnx2x_bcm8072_force_10G(bp);
3503 DP(NETIF_MSG_LINK,
3504 "Forced speed 10G on 8072\n");
3505 /* unlock */
3506 bnx2x_hw_unlock(bp,
3507 HW_LOCK_RESOURCE_8072_MDIO);
3508 break;
3509 } else
3510 val = (1<<5);
3511 } else {
3512
3513 /* Advertise 10G/1G support */
3514 if (bp->advertising &
3515 ADVERTISED_1000baseT_Full)
3516 val = (1<<5);
3517 if (bp->advertising &
3518 ADVERTISED_10000baseT_Full)
3519 val |= (1<<7);
3520 }
3521 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3522 ext_phy_addr,
3523 EXT_PHY_KR_AUTO_NEG_DEVAD,
3524 0x11, val);
3525 /* Add support for CL37 ( passive mode ) I */
3526 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3527 ext_phy_addr,
3528 EXT_PHY_KR_AUTO_NEG_DEVAD,
3529 0x8370, 0x040c);
3530 /* Add support for CL37 ( passive mode ) II */
3531 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3532 ext_phy_addr,
3533 EXT_PHY_KR_AUTO_NEG_DEVAD,
3534 0xffe4, 0x20);
3535 /* Add support for CL37 ( passive mode ) III */
3536 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3537 ext_phy_addr,
3538 EXT_PHY_KR_AUTO_NEG_DEVAD,
3539 0xffe0, 0x1000);
3540 /* Restart autoneg */
3541 msleep(500);
3542 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3543 ext_phy_addr,
3544 EXT_PHY_KR_AUTO_NEG_DEVAD,
3545 EXT_PHY_KR_CTRL, 0x1200);
3546 DP(NETIF_MSG_LINK, "8072 Autoneg Restart: "
3547 "1G %ssupported 10G %ssupported\n",
3548 (val & (1<<5)) ? "" : "not ",
3549 (val & (1<<7)) ? "" : "not ");
3550
3551 /* unlock */
3552 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3553 break;
3554
3555 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3556 DP(NETIF_MSG_LINK,
3557 "Setting the SFX7101 LASI indication\n");
3558 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3559 EXT_PHY_OPT_PMA_PMD_DEVAD,
2834 EXT_PHY_OPT_LASI_CNTL, 0x1); 3560 EXT_PHY_OPT_LASI_CNTL, 0x1);
3561 DP(NETIF_MSG_LINK,
3562 "Setting the SFX7101 LED to blink on traffic\n");
3563 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3564 EXT_PHY_OPT_PMA_PMD_DEVAD,
3565 0xC007, (1<<3));
3566
3567 /* read modify write pause advertizing */
3568 bnx2x_mdio45_read(bp, ext_phy_addr,
3569 EXT_PHY_KR_AUTO_NEG_DEVAD,
3570 EXT_PHY_KR_AUTO_NEG_ADVERT, &val);
3571 val &= ~EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH;
3572 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3573 if (bp->advertising & ADVERTISED_Pause)
3574 val |= EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE;
3575
3576 if (bp->advertising & ADVERTISED_Asym_Pause) {
3577 val |=
3578 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC;
3579 }
3580 DP(NETIF_MSG_LINK, "SFX7101 AN advertize 0x%x\n", val);
3581 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3582 EXT_PHY_KR_AUTO_NEG_DEVAD,
3583 EXT_PHY_KR_AUTO_NEG_ADVERT, val);
3584 /* Restart autoneg */
3585 bnx2x_mdio45_read(bp, ext_phy_addr,
3586 EXT_PHY_KR_AUTO_NEG_DEVAD,
3587 EXT_PHY_KR_CTRL, &val);
3588 val |= 0x200;
3589 bnx2x_mdio45_write(bp, ext_phy_addr,
3590 EXT_PHY_KR_AUTO_NEG_DEVAD,
3591 EXT_PHY_KR_CTRL, val);
2835 break; 3592 break;
2836 3593
2837 default: 3594 default:
2838 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n", 3595 BNX2X_ERR("BAD XGXS ext_phy_config 0x%x\n",
2839 bp->ext_phy_config); 3596 bp->ext_phy_config);
2840 break; 3597 break;
2841 } 3598 }
2842 bp->phy_addr = local_phy;
2843 3599
2844 } else { /* SerDes */ 3600 } else { /* SerDes */
2845/* ext_phy_addr = ((bp->ext_phy_config & 3601/* ext_phy_addr = ((bp->ext_phy_config &
2846 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >> 3602 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
2847 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT); 3603 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
2848*/ 3604*/
@@ -2854,10 +3610,6 @@ static void bnx2x_ext_phy_init(struct bnx2x *bp)
2854 3610
2855 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: 3611 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2856 DP(NETIF_MSG_LINK, "SerDes 5482\n"); 3612 DP(NETIF_MSG_LINK, "SerDes 5482\n");
2857 bnx2x_bits_en(bp,
2858 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2859 NIG_MASK_MI_INT);
2860 DP(NETIF_MSG_LINK, "enabled extenal phy int\n");
2861 break; 3613 break;
2862 3614
2863 default: 3615 default:
@@ -2871,8 +3623,22 @@ static void bnx2x_ext_phy_init(struct bnx2x *bp)
2871static void bnx2x_ext_phy_reset(struct bnx2x *bp) 3623static void bnx2x_ext_phy_reset(struct bnx2x *bp)
2872{ 3624{
2873 u32 ext_phy_type; 3625 u32 ext_phy_type;
2874 u32 ext_phy_addr; 3626 u32 ext_phy_addr = ((bp->ext_phy_config &
2875 u32 local_phy; 3627 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3628 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3629 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3630
3631 /* The PHY reset is controled by GPIO 1
3632 * Give it 1ms of reset pulse
3633 */
3634 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3635 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3636 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3637 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3638 msleep(1);
3639 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3640 MISC_REGISTERS_GPIO_OUTPUT_HIGH);
3641 }
2876 3642
2877 if (bp->phy_flags & PHY_XGXS_FLAG) { 3643 if (bp->phy_flags & PHY_XGXS_FLAG) {
2878 ext_phy_type = XGXS_EXT_PHY_TYPE(bp); 3644 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
@@ -2883,15 +3649,24 @@ static void bnx2x_ext_phy_reset(struct bnx2x *bp)
2883 3649
2884 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 3650 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
2885 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 3651 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2886 DP(NETIF_MSG_LINK, "XGXS 8705/6\n"); 3652 DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
2887 local_phy = bp->phy_addr; 3653 bnx2x_mdio45_write(bp, ext_phy_addr,
2888 ext_phy_addr = ((bp->ext_phy_config & 3654 EXT_PHY_OPT_PMA_PMD_DEVAD,
2889 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2890 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2891 bp->phy_addr = (u8)ext_phy_addr;
2892 bnx2x_mdio45_write(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2893 EXT_PHY_OPT_CNTL, 0xa040); 3655 EXT_PHY_OPT_CNTL, 0xa040);
2894 bp->phy_addr = local_phy; 3656 break;
3657
3658 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3659 DP(NETIF_MSG_LINK, "XGXS 8072\n");
3660 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3661 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3662 ext_phy_addr,
3663 EXT_PHY_KR_PMA_PMD_DEVAD,
3664 0, 1<<15);
3665 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3666 break;
3667
3668 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3669 DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
2895 break; 3670 break;
2896 3671
2897 default: 3672 default:
@@ -2930,6 +3705,7 @@ static void bnx2x_link_initialize(struct bnx2x *bp)
2930 NIG_MASK_SERDES0_LINK_STATUS | 3705 NIG_MASK_SERDES0_LINK_STATUS |
2931 NIG_MASK_MI_INT)); 3706 NIG_MASK_MI_INT));
2932 3707
3708 /* Activate the external PHY */
2933 bnx2x_ext_phy_reset(bp); 3709 bnx2x_ext_phy_reset(bp);
2934 3710
2935 bnx2x_set_aer_mmd(bp); 3711 bnx2x_set_aer_mmd(bp);
@@ -2994,13 +3770,13 @@ static void bnx2x_link_initialize(struct bnx2x *bp)
2994 /* AN enabled */ 3770 /* AN enabled */
2995 bnx2x_set_brcm_cl37_advertisment(bp); 3771 bnx2x_set_brcm_cl37_advertisment(bp);
2996 3772
2997 /* program duplex & pause advertisment (for aneg) */ 3773 /* program duplex & pause advertisement (for aneg) */
2998 bnx2x_set_ieee_aneg_advertisment(bp); 3774 bnx2x_set_ieee_aneg_advertisment(bp);
2999 3775
3000 /* enable autoneg */ 3776 /* enable autoneg */
3001 bnx2x_set_autoneg(bp); 3777 bnx2x_set_autoneg(bp);
3002 3778
3003 /* enalbe and restart AN */ 3779 /* enable and restart AN */
3004 bnx2x_restart_autoneg(bp); 3780 bnx2x_restart_autoneg(bp);
3005 } 3781 }
3006 3782
@@ -3010,11 +3786,11 @@ static void bnx2x_link_initialize(struct bnx2x *bp)
3010 bnx2x_initialize_sgmii_process(bp); 3786 bnx2x_initialize_sgmii_process(bp);
3011 } 3787 }
3012 3788
3013 /* enable the interrupt */
3014 bnx2x_link_int_enable(bp);
3015
3016 /* init ext phy and enable link state int */ 3789 /* init ext phy and enable link state int */
3017 bnx2x_ext_phy_init(bp); 3790 bnx2x_ext_phy_init(bp);
3791
3792 /* enable the interrupt */
3793 bnx2x_link_int_enable(bp);
3018} 3794}
3019 3795
3020static void bnx2x_phy_deassert(struct bnx2x *bp) 3796static void bnx2x_phy_deassert(struct bnx2x *bp)
@@ -3073,6 +3849,11 @@ static int bnx2x_phy_init(struct bnx2x *bp)
3073static void bnx2x_link_reset(struct bnx2x *bp) 3849static void bnx2x_link_reset(struct bnx2x *bp)
3074{ 3850{
3075 int port = bp->port; 3851 int port = bp->port;
3852 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3853
3854 /* update shared memory */
3855 bp->link_status = 0;
3856 bnx2x_update_mng(bp);
3076 3857
3077 /* disable attentions */ 3858 /* disable attentions */
3078 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 3859 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
@@ -3081,21 +3862,45 @@ static void bnx2x_link_reset(struct bnx2x *bp)
3081 NIG_MASK_SERDES0_LINK_STATUS | 3862 NIG_MASK_SERDES0_LINK_STATUS |
3082 NIG_MASK_MI_INT)); 3863 NIG_MASK_MI_INT));
3083 3864
3084 bnx2x_ext_phy_reset(bp); 3865 /* activate nig drain */
3866 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3867
3868 /* disable nig egress interface */
3869 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
3870 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3871
3872 /* Stop BigMac rx */
3873 bnx2x_bmac_rx_disable(bp);
3874
3875 /* disable emac */
3876 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
3877
3878 msleep(10);
3879
3880 /* The PHY reset is controled by GPIO 1
3881 * Hold it as output low
3882 */
3883 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3884 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3885 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3886 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3887 DP(NETIF_MSG_LINK, "reset external PHY\n");
3888 }
3085 3889
3086 /* reset the SerDes/XGXS */ 3890 /* reset the SerDes/XGXS */
3087 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, 3891 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3088 (0x1ff << (port*16))); 3892 (0x1ff << (port*16)));
3089 3893
3090 /* reset EMAC / BMAC and disable NIG interfaces */ 3894 /* reset BigMac */
3091 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0); 3895 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3092 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0); 3896 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3093 3897
3094 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0); 3898 /* disable nig ingress interface */
3899 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
3095 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0); 3900 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
3096 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3097 3901
3098 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 3902 /* set link down */
3903 bp->link_up = 0;
3099} 3904}
3100 3905
3101#ifdef BNX2X_XGXS_LB 3906#ifdef BNX2X_XGXS_LB
@@ -3158,7 +3963,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3158 int port = bp->port; 3963 int port = bp->port;
3159 3964
3160 DP(NETIF_MSG_TIMER, 3965 DP(NETIF_MSG_TIMER,
3161 "spe (%x:%x) command %x hw_cid %x data (%x:%x) left %x\n", 3966 "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
3162 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) + 3967 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
3163 (void *)bp->spq_prod_bd - (void *)bp->spq), command, 3968 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
3164 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left); 3969 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
@@ -3176,6 +3981,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3176 bnx2x_panic(); 3981 bnx2x_panic();
3177 return -EBUSY; 3982 return -EBUSY;
3178 } 3983 }
3984
3179 /* CID needs port number to be encoded int it */ 3985 /* CID needs port number to be encoded int it */
3180 bp->spq_prod_bd->hdr.conn_and_cmd_data = 3986 bp->spq_prod_bd->hdr.conn_and_cmd_data =
3181 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) | 3987 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
@@ -3282,8 +4088,8 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3282 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8; 4088 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
3283 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4089 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3284 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4090 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3285 u32 nig_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 4091 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3286 NIG_REG_MASK_INTERRUPT_PORT0; 4092 NIG_REG_MASK_INTERRUPT_PORT0;
3287 4093
3288 if (~bp->aeu_mask & (asserted & 0xff)) 4094 if (~bp->aeu_mask & (asserted & 0xff))
3289 BNX2X_ERR("IGU ERROR\n"); 4095 BNX2X_ERR("IGU ERROR\n");
@@ -3301,15 +4107,11 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3301 4107
3302 if (asserted & ATTN_HARD_WIRED_MASK) { 4108 if (asserted & ATTN_HARD_WIRED_MASK) {
3303 if (asserted & ATTN_NIG_FOR_FUNC) { 4109 if (asserted & ATTN_NIG_FOR_FUNC) {
3304 u32 nig_status_port;
3305 u32 nig_int_addr = port ?
3306 NIG_REG_STATUS_INTERRUPT_PORT1 :
3307 NIG_REG_STATUS_INTERRUPT_PORT0;
3308 4110
3309 bp->nig_mask = REG_RD(bp, nig_mask_addr); 4111 /* save nig interrupt mask */
3310 REG_WR(bp, nig_mask_addr, 0); 4112 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
4113 REG_WR(bp, nig_int_mask_addr, 0);
3311 4114
3312 nig_status_port = REG_RD(bp, nig_int_addr);
3313 bnx2x_link_update(bp); 4115 bnx2x_link_update(bp);
3314 4116
3315 /* handle unicore attn? */ 4117 /* handle unicore attn? */
@@ -3362,15 +4164,132 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3362 4164
3363 /* now set back the mask */ 4165 /* now set back the mask */
3364 if (asserted & ATTN_NIG_FOR_FUNC) 4166 if (asserted & ATTN_NIG_FOR_FUNC)
3365 REG_WR(bp, nig_mask_addr, bp->nig_mask); 4167 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
3366} 4168}
3367 4169
3368static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 4170static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
3369{ 4171{
3370 int port = bp->port; 4172 int port = bp->port;
3371 int index; 4173 int reg_offset;
4174 u32 val;
4175
4176 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4177
4178 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4179 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4180
4181 val = REG_RD(bp, reg_offset);
4182 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4183 REG_WR(bp, reg_offset, val);
4184
4185 BNX2X_ERR("SPIO5 hw attention\n");
4186
4187 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
4188 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
4189 /* Fan failure attention */
4190
4191 /* The PHY reset is controled by GPIO 1 */
4192 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
4193 MISC_REGISTERS_GPIO_OUTPUT_LOW);
4194 /* Low power mode is controled by GPIO 2 */
4195 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4196 MISC_REGISTERS_GPIO_OUTPUT_LOW);
4197 /* mark the failure */
4198 bp->ext_phy_config &=
4199 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4200 bp->ext_phy_config |=
4201 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4202 SHMEM_WR(bp,
4203 dev_info.port_hw_config[port].
4204 external_phy_config,
4205 bp->ext_phy_config);
4206 /* log the failure */
4207 printk(KERN_ERR PFX "Fan Failure on Network"
4208 " Controller %s has caused the driver to"
4209 " shutdown the card to prevent permanent"
4210 " damage. Please contact Dell Support for"
4211 " assistance\n", bp->dev->name);
4212 break;
4213
4214 default:
4215 break;
4216 }
4217 }
4218}
4219
4220static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4221{
4222 u32 val;
4223
4224 if (attn & BNX2X_DOORQ_ASSERT) {
4225
4226 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4227 BNX2X_ERR("DB hw attention 0x%x\n", val);
4228 /* DORQ discard attention */
4229 if (val & 0x2)
4230 BNX2X_ERR("FATAL error from DORQ\n");
4231 }
4232}
4233
4234static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4235{
4236 u32 val;
4237
4238 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4239
4240 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4241 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4242 /* CFC error attention */
4243 if (val & 0x2)
4244 BNX2X_ERR("FATAL error from CFC\n");
4245 }
4246
4247 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4248
4249 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4250 BNX2X_ERR("PXP hw attention 0x%x\n", val);
4251 /* RQ_USDMDP_FIFO_OVERFLOW */
4252 if (val & 0x18000)
4253 BNX2X_ERR("FATAL error from PXP\n");
4254 }
4255}
4256
4257static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4258{
4259 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4260
4261 if (attn & BNX2X_MC_ASSERT_BITS) {
4262
4263 BNX2X_ERR("MC assert!\n");
4264 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4265 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4266 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4267 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4268 bnx2x_panic();
4269
4270 } else if (attn & BNX2X_MCP_ASSERT) {
4271
4272 BNX2X_ERR("MCP assert!\n");
4273 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4274 bnx2x_mc_assert(bp);
4275
4276 } else
4277 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4278 }
4279
4280 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4281
4282 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4283 BNX2X_ERR("LATCHED attention 0x%x (masked)\n", attn);
4284 }
4285}
4286
4287static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4288{
3372 struct attn_route attn; 4289 struct attn_route attn;
3373 struct attn_route group_mask; 4290 struct attn_route group_mask;
4291 int port = bp->port;
4292 int index;
3374 u32 reg_addr; 4293 u32 reg_addr;
3375 u32 val; 4294 u32 val;
3376 4295
@@ -3391,64 +4310,14 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3391 DP(NETIF_MSG_HW, "group[%d]: %llx\n", index, 4310 DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
3392 (unsigned long long)group_mask.sig[0]); 4311 (unsigned long long)group_mask.sig[0]);
3393 4312
3394 if (attn.sig[3] & group_mask.sig[3] & 4313 bnx2x_attn_int_deasserted3(bp,
3395 EVEREST_GEN_ATTN_IN_USE_MASK) { 4314 attn.sig[3] & group_mask.sig[3]);
3396 4315 bnx2x_attn_int_deasserted1(bp,
3397 if (attn.sig[3] & BNX2X_MC_ASSERT_BITS) { 4316 attn.sig[1] & group_mask.sig[1]);
3398 4317 bnx2x_attn_int_deasserted2(bp,
3399 BNX2X_ERR("MC assert!\n"); 4318 attn.sig[2] & group_mask.sig[2]);
3400 bnx2x_panic(); 4319 bnx2x_attn_int_deasserted0(bp,
3401 4320 attn.sig[0] & group_mask.sig[0]);
3402 } else if (attn.sig[3] & BNX2X_MCP_ASSERT) {
3403
3404 BNX2X_ERR("MCP assert!\n");
3405 REG_WR(bp,
3406 MISC_REG_AEU_GENERAL_ATTN_11, 0);
3407 bnx2x_mc_assert(bp);
3408
3409 } else {
3410 BNX2X_ERR("UNKOWEN HW ASSERT!\n");
3411 }
3412 }
3413
3414 if (attn.sig[1] & group_mask.sig[1] &
3415 BNX2X_DOORQ_ASSERT) {
3416
3417 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3418 BNX2X_ERR("DB hw attention 0x%x\n", val);
3419 /* DORQ discard attention */
3420 if (val & 0x2)
3421 BNX2X_ERR("FATAL error from DORQ\n");
3422 }
3423
3424 if (attn.sig[2] & group_mask.sig[2] &
3425 AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3426
3427 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3428 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3429 /* CFC error attention */
3430 if (val & 0x2)
3431 BNX2X_ERR("FATAL error from CFC\n");
3432 }
3433
3434 if (attn.sig[2] & group_mask.sig[2] &
3435 AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3436
3437 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3438 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3439 /* RQ_USDMDP_FIFO_OVERFLOW */
3440 if (val & 0x18000)
3441 BNX2X_ERR("FATAL error from PXP\n");
3442 }
3443
3444 if (attn.sig[3] & group_mask.sig[3] &
3445 EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3446
3447 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
3448 0x7ff);
3449 DP(NETIF_MSG_HW, "got latched bits 0x%x\n",
3450 attn.sig[3]);
3451 }
3452 4321
3453 if ((attn.sig[0] & group_mask.sig[0] & 4322 if ((attn.sig[0] & group_mask.sig[0] &
3454 HW_INTERRUT_ASSERT_SET_0) || 4323 HW_INTERRUT_ASSERT_SET_0) ||
@@ -3456,7 +4325,15 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3456 HW_INTERRUT_ASSERT_SET_1) || 4325 HW_INTERRUT_ASSERT_SET_1) ||
3457 (attn.sig[2] & group_mask.sig[2] & 4326 (attn.sig[2] & group_mask.sig[2] &
3458 HW_INTERRUT_ASSERT_SET_2)) 4327 HW_INTERRUT_ASSERT_SET_2))
3459 BNX2X_ERR("FATAL HW block attention\n"); 4328 BNX2X_ERR("FATAL HW block attention"
4329 " set0 0x%x set1 0x%x"
4330 " set2 0x%x\n",
4331 (attn.sig[0] & group_mask.sig[0] &
4332 HW_INTERRUT_ASSERT_SET_0),
4333 (attn.sig[1] & group_mask.sig[1] &
4334 HW_INTERRUT_ASSERT_SET_1),
4335 (attn.sig[2] & group_mask.sig[2] &
4336 HW_INTERRUT_ASSERT_SET_2));
3460 4337
3461 if ((attn.sig[0] & group_mask.sig[0] & 4338 if ((attn.sig[0] & group_mask.sig[0] &
3462 HW_PRTY_ASSERT_SET_0) || 4339 HW_PRTY_ASSERT_SET_0) ||
@@ -3464,7 +4341,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3464 HW_PRTY_ASSERT_SET_1) || 4341 HW_PRTY_ASSERT_SET_1) ||
3465 (attn.sig[2] & group_mask.sig[2] & 4342 (attn.sig[2] & group_mask.sig[2] &
3466 HW_PRTY_ASSERT_SET_2)) 4343 HW_PRTY_ASSERT_SET_2))
3467 BNX2X_ERR("FATAL HW block parity atention\n"); 4344 BNX2X_ERR("FATAL HW block parity attention\n");
3468 } 4345 }
3469 } 4346 }
3470 4347
@@ -3529,7 +4406,7 @@ static void bnx2x_sp_task(struct work_struct *work)
3529 4406
3530 /* Return here if interrupt is disabled */ 4407 /* Return here if interrupt is disabled */
3531 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 4408 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3532 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); 4409 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
3533 return; 4410 return;
3534 } 4411 }
3535 4412
@@ -3539,12 +4416,11 @@ static void bnx2x_sp_task(struct work_struct *work)
3539 4416
3540 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status); 4417 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3541 4418
3542 if (status & 0x1) { 4419 /* HW attentions */
3543 /* HW attentions */ 4420 if (status & 0x1)
3544 bnx2x_attn_int(bp); 4421 bnx2x_attn_int(bp);
3545 }
3546 4422
3547 /* CStorm events: query_stats, cfc delete ramrods */ 4423 /* CStorm events: query_stats, port delete ramrod */
3548 if (status & 0x2) 4424 if (status & 0x2)
3549 bp->stat_pending = 0; 4425 bp->stat_pending = 0;
3550 4426
@@ -3558,6 +4434,7 @@ static void bnx2x_sp_task(struct work_struct *work)
3558 IGU_INT_NOP, 1); 4434 IGU_INT_NOP, 1);
3559 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx), 4435 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3560 IGU_INT_ENABLE, 1); 4436 IGU_INT_ENABLE, 1);
4437
3561} 4438}
3562 4439
3563static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 4440static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -3567,11 +4444,11 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3567 4444
3568 /* Return here if interrupt is disabled */ 4445 /* Return here if interrupt is disabled */
3569 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 4446 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3570 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); 4447 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
3571 return IRQ_HANDLED; 4448 return IRQ_HANDLED;
3572 } 4449 }
3573 4450
3574 bnx2x_ack_sb(bp, 16, XSTORM_ID, 0, IGU_INT_DISABLE, 0); 4451 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
3575 4452
3576#ifdef BNX2X_STOP_ON_ERROR 4453#ifdef BNX2X_STOP_ON_ERROR
3577 if (unlikely(bp->panic)) 4454 if (unlikely(bp->panic))
@@ -3906,7 +4783,7 @@ static void bnx2x_stop_stats(struct bnx2x *bp)
3906 4783
3907 while (bp->stats_state != STATS_STATE_DISABLE) { 4784 while (bp->stats_state != STATS_STATE_DISABLE) {
3908 if (!timeout) { 4785 if (!timeout) {
3909 BNX2X_ERR("timeout wating for stats stop\n"); 4786 BNX2X_ERR("timeout waiting for stats stop\n");
3910 break; 4787 break;
3911 } 4788 }
3912 timeout--; 4789 timeout--;
@@ -4173,39 +5050,37 @@ static void bnx2x_update_net_stats(struct bnx2x *bp)
4173 5050
4174 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); 5051 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4175 5052
4176 nstats->tx_bytes = 5053 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4177 bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4178 5054
4179 nstats->rx_dropped = estats->checksum_discard + 5055 nstats->rx_dropped = estats->checksum_discard + estats->mac_discard;
4180 estats->mac_discard;
4181 nstats->tx_dropped = 0; 5056 nstats->tx_dropped = 0;
4182 5057
4183 nstats->multicast = 5058 nstats->multicast =
4184 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi); 5059 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
4185 5060
4186 nstats->collisions = 5061 nstats->collisions = estats->single_collision_transmit_frames +
4187 estats->single_collision_transmit_frames + 5062 estats->multiple_collision_transmit_frames +
4188 estats->multiple_collision_transmit_frames + 5063 estats->late_collision_frames +
4189 estats->late_collision_frames + 5064 estats->excessive_collision_frames;
4190 estats->excessive_collision_frames;
4191 5065
4192 nstats->rx_length_errors = estats->runt_packets_received + 5066 nstats->rx_length_errors = estats->runt_packets_received +
4193 estats->jabber_packets_received; 5067 estats->jabber_packets_received;
4194 nstats->rx_over_errors = estats->no_buff_discard; 5068 nstats->rx_over_errors = estats->brb_discard +
5069 estats->brb_truncate_discard;
4195 nstats->rx_crc_errors = estats->crc_receive_errors; 5070 nstats->rx_crc_errors = estats->crc_receive_errors;
4196 nstats->rx_frame_errors = estats->alignment_errors; 5071 nstats->rx_frame_errors = estats->alignment_errors;
4197 nstats->rx_fifo_errors = estats->brb_discard + 5072 nstats->rx_fifo_errors = estats->no_buff_discard;
4198 estats->brb_truncate_discard;
4199 nstats->rx_missed_errors = estats->xxoverflow_discard; 5073 nstats->rx_missed_errors = estats->xxoverflow_discard;
4200 5074
4201 nstats->rx_errors = nstats->rx_length_errors + 5075 nstats->rx_errors = nstats->rx_length_errors +
4202 nstats->rx_over_errors + 5076 nstats->rx_over_errors +
4203 nstats->rx_crc_errors + 5077 nstats->rx_crc_errors +
4204 nstats->rx_frame_errors + 5078 nstats->rx_frame_errors +
4205 nstats->rx_fifo_errors; 5079 nstats->rx_fifo_errors +
5080 nstats->rx_missed_errors;
4206 5081
4207 nstats->tx_aborted_errors = estats->late_collision_frames + 5082 nstats->tx_aborted_errors = estats->late_collision_frames +
4208 estats->excessive_collision_frames; 5083 estats->excessive_collision_frames;
4209 nstats->tx_carrier_errors = estats->false_carrier_detections; 5084 nstats->tx_carrier_errors = estats->false_carrier_detections;
4210 nstats->tx_fifo_errors = 0; 5085 nstats->tx_fifo_errors = 0;
4211 nstats->tx_heartbeat_errors = 0; 5086 nstats->tx_heartbeat_errors = 0;
@@ -4334,7 +5209,7 @@ static void bnx2x_timer(unsigned long data)
4334 return; 5209 return;
4335 5210
4336 if (atomic_read(&bp->intr_sem) != 0) 5211 if (atomic_read(&bp->intr_sem) != 0)
4337 goto bnx2x_restart_timer; 5212 goto timer_restart;
4338 5213
4339 if (poll) { 5214 if (poll) {
4340 struct bnx2x_fastpath *fp = &bp->fp[0]; 5215 struct bnx2x_fastpath *fp = &bp->fp[0];
@@ -4344,7 +5219,7 @@ static void bnx2x_timer(unsigned long data)
4344 rc = bnx2x_rx_int(fp, 1000); 5219 rc = bnx2x_rx_int(fp, 1000);
4345 } 5220 }
4346 5221
4347 if (!nomcp && (bp->bc_ver >= 0x040003)) { 5222 if (!nomcp) {
4348 int port = bp->port; 5223 int port = bp->port;
4349 u32 drv_pulse; 5224 u32 drv_pulse;
4350 u32 mcp_pulse; 5225 u32 mcp_pulse;
@@ -4353,9 +5228,9 @@ static void bnx2x_timer(unsigned long data)
4353 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 5228 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4354 /* TBD - add SYSTEM_TIME */ 5229 /* TBD - add SYSTEM_TIME */
4355 drv_pulse = bp->fw_drv_pulse_wr_seq; 5230 drv_pulse = bp->fw_drv_pulse_wr_seq;
4356 SHMEM_WR(bp, drv_fw_mb[port].drv_pulse_mb, drv_pulse); 5231 SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
4357 5232
4358 mcp_pulse = (SHMEM_RD(bp, drv_fw_mb[port].mcp_pulse_mb) & 5233 mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
4359 MCP_PULSE_SEQ_MASK); 5234 MCP_PULSE_SEQ_MASK);
4360 /* The delta between driver pulse and mcp response 5235 /* The delta between driver pulse and mcp response
4361 * should be 1 (before mcp response) or 0 (after mcp response) 5236 * should be 1 (before mcp response) or 0 (after mcp response)
@@ -4369,11 +5244,11 @@ static void bnx2x_timer(unsigned long data)
4369 } 5244 }
4370 5245
4371 if (bp->stats_state == STATS_STATE_DISABLE) 5246 if (bp->stats_state == STATS_STATE_DISABLE)
4372 goto bnx2x_restart_timer; 5247 goto timer_restart;
4373 5248
4374 bnx2x_update_stats(bp); 5249 bnx2x_update_stats(bp);
4375 5250
4376bnx2x_restart_timer: 5251timer_restart:
4377 mod_timer(&bp->timer, jiffies + bp->current_interval); 5252 mod_timer(&bp->timer, jiffies + bp->current_interval);
4378} 5253}
4379 5254
@@ -4438,6 +5313,9 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4438 atten_status_block); 5313 atten_status_block);
4439 def_sb->atten_status_block.status_block_id = id; 5314 def_sb->atten_status_block.status_block_id = id;
4440 5315
5316 bp->def_att_idx = 0;
5317 bp->attn_state = 0;
5318
4441 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 5319 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4442 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 5320 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4443 5321
@@ -4472,6 +5350,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4472 u_def_status_block); 5350 u_def_status_block);
4473 def_sb->u_def_status_block.status_block_id = id; 5351 def_sb->u_def_status_block.status_block_id = id;
4474 5352
5353 bp->def_u_idx = 0;
5354
4475 REG_WR(bp, BAR_USTRORM_INTMEM + 5355 REG_WR(bp, BAR_USTRORM_INTMEM +
4476 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); 5356 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4477 REG_WR(bp, BAR_USTRORM_INTMEM + 5357 REG_WR(bp, BAR_USTRORM_INTMEM +
@@ -4489,6 +5369,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4489 c_def_status_block); 5369 c_def_status_block);
4490 def_sb->c_def_status_block.status_block_id = id; 5370 def_sb->c_def_status_block.status_block_id = id;
4491 5371
5372 bp->def_c_idx = 0;
5373
4492 REG_WR(bp, BAR_CSTRORM_INTMEM + 5374 REG_WR(bp, BAR_CSTRORM_INTMEM +
4493 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); 5375 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4494 REG_WR(bp, BAR_CSTRORM_INTMEM + 5376 REG_WR(bp, BAR_CSTRORM_INTMEM +
@@ -4506,6 +5388,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4506 t_def_status_block); 5388 t_def_status_block);
4507 def_sb->t_def_status_block.status_block_id = id; 5389 def_sb->t_def_status_block.status_block_id = id;
4508 5390
5391 bp->def_t_idx = 0;
5392
4509 REG_WR(bp, BAR_TSTRORM_INTMEM + 5393 REG_WR(bp, BAR_TSTRORM_INTMEM +
4510 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); 5394 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4511 REG_WR(bp, BAR_TSTRORM_INTMEM + 5395 REG_WR(bp, BAR_TSTRORM_INTMEM +
@@ -4523,6 +5407,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4523 x_def_status_block); 5407 x_def_status_block);
4524 def_sb->x_def_status_block.status_block_id = id; 5408 def_sb->x_def_status_block.status_block_id = id;
4525 5409
5410 bp->def_x_idx = 0;
5411
4526 REG_WR(bp, BAR_XSTRORM_INTMEM + 5412 REG_WR(bp, BAR_XSTRORM_INTMEM +
4527 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); 5413 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4528 REG_WR(bp, BAR_XSTRORM_INTMEM + 5414 REG_WR(bp, BAR_XSTRORM_INTMEM +
@@ -4535,6 +5421,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4535 REG_WR16(bp, BAR_XSTRORM_INTMEM + 5421 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4536 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1); 5422 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4537 5423
5424 bp->stat_pending = 0;
5425
4538 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 5426 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4539} 5427}
4540 5428
@@ -4626,7 +5514,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4626 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod; 5514 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
4627 fp->rx_pkt = fp->rx_calls = 0; 5515 fp->rx_pkt = fp->rx_calls = 0;
4628 5516
4629 /* Warning! this will genrate an interrupt (to the TSTORM) */ 5517 /* Warning! this will generate an interrupt (to the TSTORM) */
4630 /* must only be done when chip is initialized */ 5518 /* must only be done when chip is initialized */
4631 REG_WR(bp, BAR_TSTRORM_INTMEM + 5519 REG_WR(bp, BAR_TSTRORM_INTMEM +
4632 TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod); 5520 TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
@@ -4678,7 +5566,6 @@ static void bnx2x_init_sp_ring(struct bnx2x *bp)
4678 5566
4679 bp->spq_left = MAX_SPQ_PENDING; 5567 bp->spq_left = MAX_SPQ_PENDING;
4680 bp->spq_prod_idx = 0; 5568 bp->spq_prod_idx = 0;
4681 bp->dsb_sp_prod_idx = 0;
4682 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 5569 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4683 bp->spq_prod_bd = bp->spq; 5570 bp->spq_prod_bd = bp->spq;
4684 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; 5571 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
@@ -4755,6 +5642,42 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
4755 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); 5642 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4756} 5643}
4757 5644
5645static void bnx2x_set_client_config(struct bnx2x *bp)
5646{
5647#ifdef BCM_VLAN
5648 int mode = bp->rx_mode;
5649#endif
5650 int i, port = bp->port;
5651 struct tstorm_eth_client_config tstorm_client = {0};
5652
5653 tstorm_client.mtu = bp->dev->mtu;
5654 tstorm_client.statistics_counter_id = 0;
5655 tstorm_client.config_flags =
5656 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
5657#ifdef BCM_VLAN
5658 if (mode && bp->vlgrp) {
5659 tstorm_client.config_flags |=
5660 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
5661 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5662 }
5663#endif
5664 if (mode != BNX2X_RX_MODE_PROMISC)
5665 tstorm_client.drop_flags =
5666 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR;
5667
5668 for_each_queue(bp, i) {
5669 REG_WR(bp, BAR_TSTRORM_INTMEM +
5670 TSTORM_CLIENT_CONFIG_OFFSET(port, i),
5671 ((u32 *)&tstorm_client)[0]);
5672 REG_WR(bp, BAR_TSTRORM_INTMEM +
5673 TSTORM_CLIENT_CONFIG_OFFSET(port, i) + 4,
5674 ((u32 *)&tstorm_client)[1]);
5675 }
5676
5677/* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
5678 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
5679}
5680
4758static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) 5681static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4759{ 5682{
4760 int mode = bp->rx_mode; 5683 int mode = bp->rx_mode;
@@ -4794,41 +5717,9 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4794/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i, 5717/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4795 ((u32 *)&tstorm_mac_filter)[i]); */ 5718 ((u32 *)&tstorm_mac_filter)[i]); */
4796 } 5719 }
4797}
4798 5720
4799static void bnx2x_set_client_config(struct bnx2x *bp, int client_id) 5721 if (mode != BNX2X_RX_MODE_NONE)
4800{ 5722 bnx2x_set_client_config(bp);
4801#ifdef BCM_VLAN
4802 int mode = bp->rx_mode;
4803#endif
4804 int port = bp->port;
4805 struct tstorm_eth_client_config tstorm_client = {0};
4806
4807 tstorm_client.mtu = bp->dev->mtu;
4808 tstorm_client.statistics_counter_id = 0;
4809 tstorm_client.config_flags =
4810 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4811#ifdef BCM_VLAN
4812 if (mode && bp->vlgrp) {
4813 tstorm_client.config_flags |=
4814 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4815 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4816 }
4817#endif
4818 tstorm_client.drop_flags = (TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR |
4819 TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR |
4820 TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR |
4821 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR);
4822
4823 REG_WR(bp, BAR_TSTRORM_INTMEM +
4824 TSTORM_CLIENT_CONFIG_OFFSET(port, client_id),
4825 ((u32 *)&tstorm_client)[0]);
4826 REG_WR(bp, BAR_TSTRORM_INTMEM +
4827 TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) + 4,
4828 ((u32 *)&tstorm_client)[1]);
4829
4830/* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
4831 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
4832} 5723}
4833 5724
4834static void bnx2x_init_internal(struct bnx2x *bp) 5725static void bnx2x_init_internal(struct bnx2x *bp)
@@ -4836,7 +5727,6 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4836 int port = bp->port; 5727 int port = bp->port;
4837 struct tstorm_eth_function_common_config tstorm_config = {0}; 5728 struct tstorm_eth_function_common_config tstorm_config = {0};
4838 struct stats_indication_flags stats_flags = {0}; 5729 struct stats_indication_flags stats_flags = {0};
4839 int i;
4840 5730
4841 if (is_multi(bp)) { 5731 if (is_multi(bp)) {
4842 tstorm_config.config_flags = MULTI_FLAGS; 5732 tstorm_config.config_flags = MULTI_FLAGS;
@@ -4850,13 +5740,9 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4850/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n", 5740/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
4851 (*(u32 *)&tstorm_config)); */ 5741 (*(u32 *)&tstorm_config)); */
4852 5742
4853 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx untill link is up */ 5743 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4854 bnx2x_set_storm_rx_mode(bp); 5744 bnx2x_set_storm_rx_mode(bp);
4855 5745
4856 for_each_queue(bp, i)
4857 bnx2x_set_client_config(bp, i);
4858
4859
4860 stats_flags.collect_eth = cpu_to_le32(1); 5746 stats_flags.collect_eth = cpu_to_le32(1);
4861 5747
4862 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 5748 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
@@ -4902,7 +5788,7 @@ static void bnx2x_nic_init(struct bnx2x *bp)
4902 bnx2x_init_internal(bp); 5788 bnx2x_init_internal(bp);
4903 bnx2x_init_stats(bp); 5789 bnx2x_init_stats(bp);
4904 bnx2x_init_ind_table(bp); 5790 bnx2x_init_ind_table(bp);
4905 bnx2x_enable_int(bp); 5791 bnx2x_int_enable(bp);
4906 5792
4907} 5793}
4908 5794
@@ -5265,8 +6151,10 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
5265 if (mode & 0x1) { /* init common */ 6151 if (mode & 0x1) { /* init common */
5266 DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n", 6152 DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n",
5267 func, mode); 6153 func, mode);
5268 REG_WR(bp, MISC_REG_RESET_REG_1, 0xffffffff); 6154 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5269 REG_WR(bp, MISC_REG_RESET_REG_2, 0xfffc); 6155 0xffffffff);
6156 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6157 0xfffc);
5270 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END); 6158 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5271 6159
5272 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100); 6160 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
@@ -5359,7 +6247,7 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
5359 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8); 6247 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
5360#endif 6248#endif
5361 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END); 6249 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5362 /* softrest pulse */ 6250 /* soft reset pulse */
5363 REG_WR(bp, QM_REG_SOFT_RESET, 1); 6251 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5364 REG_WR(bp, QM_REG_SOFT_RESET, 0); 6252 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5365 6253
@@ -5413,7 +6301,7 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
5413 REG_WR(bp, SRC_REG_SOFT_RST, 1); 6301 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5414 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) { 6302 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5415 REG_WR(bp, i, 0xc0cac01a); 6303 REG_WR(bp, i, 0xc0cac01a);
5416 /* TODO: repleace with something meaningfull */ 6304 /* TODO: replace with something meaningful */
5417 } 6305 }
5418 /* SRCH COMMON comes here */ 6306 /* SRCH COMMON comes here */
5419 REG_WR(bp, SRC_REG_SOFT_RST, 0); 6307 REG_WR(bp, SRC_REG_SOFT_RST, 0);
@@ -5486,6 +6374,28 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
5486 enable_blocks_attention(bp); 6374 enable_blocks_attention(bp);
5487 /* enable_blocks_parity(bp); */ 6375 /* enable_blocks_parity(bp); */
5488 6376
6377 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6378 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6379 /* Fan failure is indicated by SPIO 5 */
6380 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6381 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6382
6383 /* set to active low mode */
6384 val = REG_RD(bp, MISC_REG_SPIO_INT);
6385 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6386 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6387 REG_WR(bp, MISC_REG_SPIO_INT, val);
6388
6389 /* enable interrupt to signal the IGU */
6390 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6391 val |= (1 << MISC_REGISTERS_SPIO_5);
6392 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6393 break;
6394
6395 default:
6396 break;
6397 }
6398
5489 } /* end of common init */ 6399 } /* end of common init */
5490 6400
5491 /* per port init */ 6401 /* per port init */
@@ -5645,9 +6555,21 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
5645 /* Port MCP comes here */ 6555 /* Port MCP comes here */
5646 /* Port DMAE comes here */ 6556 /* Port DMAE comes here */
5647 6557
6558 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6559 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6560 /* add SPIO 5 to group 0 */
6561 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6562 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6563 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6564 break;
6565
6566 default:
6567 break;
6568 }
6569
5648 bnx2x_link_reset(bp); 6570 bnx2x_link_reset(bp);
5649 6571
5650 /* Reset pciex errors for debug */ 6572 /* Reset PCIE errors for debug */
5651 REG_WR(bp, 0x2114, 0xffffffff); 6573 REG_WR(bp, 0x2114, 0xffffffff);
5652 REG_WR(bp, 0x2120, 0xffffffff); 6574 REG_WR(bp, 0x2120, 0xffffffff);
5653 REG_WR(bp, 0x2814, 0xffffffff); 6575 REG_WR(bp, 0x2814, 0xffffffff);
@@ -5669,9 +6591,9 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
5669 port = bp->port; 6591 port = bp->port;
5670 6592
5671 bp->fw_drv_pulse_wr_seq = 6593 bp->fw_drv_pulse_wr_seq =
5672 (SHMEM_RD(bp, drv_fw_mb[port].drv_pulse_mb) & 6594 (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) &
5673 DRV_PULSE_SEQ_MASK); 6595 DRV_PULSE_SEQ_MASK);
5674 bp->fw_mb = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_param); 6596 bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param);
5675 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n", 6597 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n",
5676 bp->fw_drv_pulse_wr_seq, bp->fw_mb); 6598 bp->fw_drv_pulse_wr_seq, bp->fw_mb);
5677 } else { 6599 } else {
@@ -5681,16 +6603,15 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
5681 return 0; 6603 return 0;
5682} 6604}
5683 6605
5684 6606/* send the MCP a request, block until there is a reply */
5685/* send the MCP a request, block untill there is a reply */
5686static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) 6607static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5687{ 6608{
5688 u32 rc = 0;
5689 u32 seq = ++bp->fw_seq;
5690 int port = bp->port; 6609 int port = bp->port;
6610 u32 seq = ++bp->fw_seq;
6611 u32 rc = 0;
5691 6612
5692 SHMEM_WR(bp, drv_fw_mb[port].drv_mb_header, command|seq); 6613 SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq));
5693 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", command|seq); 6614 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5694 6615
5695 /* let the FW do it's magic ... */ 6616 /* let the FW do it's magic ... */
5696 msleep(100); /* TBD */ 6617 msleep(100); /* TBD */
@@ -5698,19 +6619,20 @@ static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5698 if (CHIP_REV_IS_SLOW(bp)) 6619 if (CHIP_REV_IS_SLOW(bp))
5699 msleep(900); 6620 msleep(900);
5700 6621
5701 rc = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_header); 6622 rc = SHMEM_RD(bp, func_mb[port].fw_mb_header);
5702
5703 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq); 6623 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
5704 6624
5705 /* is this a reply to our command? */ 6625 /* is this a reply to our command? */
5706 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 6626 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5707 rc &= FW_MSG_CODE_MASK; 6627 rc &= FW_MSG_CODE_MASK;
6628
5708 } else { 6629 } else {
5709 /* FW BUG! */ 6630 /* FW BUG! */
5710 BNX2X_ERR("FW failed to respond!\n"); 6631 BNX2X_ERR("FW failed to respond!\n");
5711 bnx2x_fw_dump(bp); 6632 bnx2x_fw_dump(bp);
5712 rc = 0; 6633 rc = 0;
5713 } 6634 }
6635
5714 return rc; 6636 return rc;
5715} 6637}
5716 6638
@@ -5869,7 +6791,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
5869 for (i = 0; i < 16*1024; i += 64) 6791 for (i = 0; i < 16*1024; i += 64)
5870 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64; 6792 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5871 6793
5872 /* now sixup the last line in the block to point to the next block */ 6794 /* now fixup the last line in the block to point to the next block */
5873 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping; 6795 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5874 6796
5875 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */ 6797 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
@@ -5950,22 +6872,19 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5950 int i; 6872 int i;
5951 6873
5952 free_irq(bp->msix_table[0].vector, bp->dev); 6874 free_irq(bp->msix_table[0].vector, bp->dev);
5953 DP(NETIF_MSG_IFDOWN, "rleased sp irq (%d)\n", 6875 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5954 bp->msix_table[0].vector); 6876 bp->msix_table[0].vector);
5955 6877
5956 for_each_queue(bp, i) { 6878 for_each_queue(bp, i) {
5957 DP(NETIF_MSG_IFDOWN, "about to rlease fp #%d->%d irq " 6879 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
5958 "state(%x)\n", i, bp->msix_table[i + 1].vector, 6880 "state(%x)\n", i, bp->msix_table[i + 1].vector,
5959 bnx2x_fp(bp, i, state)); 6881 bnx2x_fp(bp, i, state));
5960 6882
5961 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) { 6883 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5962 6884 BNX2X_ERR("IRQ of fp #%d being freed while "
5963 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]); 6885 "state != closed\n", i);
5964 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_CLOSED;
5965
5966 } else
5967 DP(NETIF_MSG_IFDOWN, "irq not freed\n");
5968 6886
6887 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
5969 } 6888 }
5970 6889
5971} 6890}
@@ -5995,7 +6914,7 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
5995 6914
5996 if (pci_enable_msix(bp->pdev, &bp->msix_table[0], 6915 if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
5997 bp->num_queues + 1)){ 6916 bp->num_queues + 1)){
5998 BNX2X_ERR("failed to enable msix\n"); 6917 BNX2X_LOG("failed to enable MSI-X\n");
5999 return -1; 6918 return -1;
6000 6919
6001 } 6920 }
@@ -6010,11 +6929,8 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
6010static int bnx2x_req_msix_irqs(struct bnx2x *bp) 6929static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6011{ 6930{
6012 6931
6013
6014 int i, rc; 6932 int i, rc;
6015 6933
6016 DP(NETIF_MSG_IFUP, "about to request sp irq\n");
6017
6018 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0, 6934 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6019 bp->dev->name, bp->dev); 6935 bp->dev->name, bp->dev);
6020 6936
@@ -6029,7 +6945,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6029 bp->dev->name, &bp->fp[i]); 6945 bp->dev->name, &bp->fp[i]);
6030 6946
6031 if (rc) { 6947 if (rc) {
6032 BNX2X_ERR("request fp #%d irq failed\n", i); 6948 BNX2X_ERR("request fp #%d irq failed "
6949 "rc %d\n", i, rc);
6033 bnx2x_free_msix_irqs(bp); 6950 bnx2x_free_msix_irqs(bp);
6034 return -EBUSY; 6951 return -EBUSY;
6035 } 6952 }
@@ -6109,8 +7026,8 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6109 /* can take a while if any port is running */ 7026 /* can take a while if any port is running */
6110 int timeout = 500; 7027 int timeout = 500;
6111 7028
6112 /* DP("waiting for state to become %d on IDX [%d]\n", 7029 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6113 state, sb_idx); */ 7030 poll ? "polling" : "waiting", state, idx);
6114 7031
6115 might_sleep(); 7032 might_sleep();
6116 7033
@@ -6128,7 +7045,7 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6128 7045
6129 mb(); /* state is changed by bnx2x_sp_event()*/ 7046 mb(); /* state is changed by bnx2x_sp_event()*/
6130 7047
6131 if (*state_p != state) 7048 if (*state_p == state)
6132 return 0; 7049 return 0;
6133 7050
6134 timeout--; 7051 timeout--;
@@ -6136,17 +7053,17 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6136 7053
6137 } 7054 }
6138 7055
6139
6140 /* timeout! */ 7056 /* timeout! */
6141 BNX2X_ERR("timeout waiting for ramrod %d on %d\n", state, idx); 7057 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6142 return -EBUSY; 7058 poll ? "polling" : "waiting", state, idx);
6143 7059
7060 return -EBUSY;
6144} 7061}
6145 7062
6146static int bnx2x_setup_leading(struct bnx2x *bp) 7063static int bnx2x_setup_leading(struct bnx2x *bp)
6147{ 7064{
6148 7065
6149 /* reset IGU staae */ 7066 /* reset IGU state */
6150 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 7067 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6151 7068
6152 /* SETUP ramrod */ 7069 /* SETUP ramrod */
@@ -6162,12 +7079,13 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6162 /* reset IGU state */ 7079 /* reset IGU state */
6163 bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 7080 bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6164 7081
7082 /* SETUP ramrod */
6165 bp->fp[index].state = BNX2X_FP_STATE_OPENING; 7083 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6166 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0); 7084 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6167 7085
6168 /* Wait for completion */ 7086 /* Wait for completion */
6169 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index, 7087 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6170 &(bp->fp[index].state), 1); 7088 &(bp->fp[index].state), 0);
6171 7089
6172} 7090}
6173 7091
@@ -6177,8 +7095,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev);
6177 7095
6178static int bnx2x_nic_load(struct bnx2x *bp, int req_irq) 7096static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
6179{ 7097{
6180 int rc; 7098 u32 load_code;
6181 int i = 0; 7099 int i;
6182 7100
6183 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 7101 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6184 7102
@@ -6188,26 +7106,28 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
6188 initialized, otherwise - not. 7106 initialized, otherwise - not.
6189 */ 7107 */
6190 if (!nomcp) { 7108 if (!nomcp) {
6191 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); 7109 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6192 if (rc == FW_MSG_CODE_DRV_LOAD_REFUSED) { 7110 if (!load_code) {
7111 BNX2X_ERR("MCP response failure, unloading\n");
7112 return -EBUSY;
7113 }
7114 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7115 BNX2X_ERR("MCP refused load request, unloading\n");
6193 return -EBUSY; /* other port in diagnostic mode */ 7116 return -EBUSY; /* other port in diagnostic mode */
6194 } 7117 }
6195 } else { 7118 } else {
6196 rc = FW_MSG_CODE_DRV_LOAD_COMMON; 7119 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6197 } 7120 }
6198 7121
6199 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
6200
6201 /* if we can't use msix we only need one fp, 7122 /* if we can't use msix we only need one fp,
6202 * so try to enable msix with the requested number of fp's 7123 * so try to enable msix with the requested number of fp's
6203 * and fallback to inta with one fp 7124 * and fallback to inta with one fp
6204 */ 7125 */
6205 if (req_irq) { 7126 if (req_irq) {
6206
6207 if (use_inta) { 7127 if (use_inta) {
6208 bp->num_queues = 1; 7128 bp->num_queues = 1;
6209 } else { 7129 } else {
6210 if (use_multi > 1 && use_multi <= 16) 7130 if ((use_multi > 1) && (use_multi <= 16))
6211 /* user requested number */ 7131 /* user requested number */
6212 bp->num_queues = use_multi; 7132 bp->num_queues = use_multi;
6213 else if (use_multi == 1) 7133 else if (use_multi == 1)
@@ -6216,15 +7136,17 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
6216 bp->num_queues = 1; 7136 bp->num_queues = 1;
6217 7137
6218 if (bnx2x_enable_msix(bp)) { 7138 if (bnx2x_enable_msix(bp)) {
6219 /* faild to enable msix */ 7139 /* failed to enable msix */
6220 bp->num_queues = 1; 7140 bp->num_queues = 1;
6221 if (use_multi) 7141 if (use_multi)
6222 BNX2X_ERR("Muti requested but failed" 7142 BNX2X_ERR("Multi requested but failed"
6223 " to enable MSI-X\n"); 7143 " to enable MSI-X\n");
6224 } 7144 }
6225 } 7145 }
6226 } 7146 }
6227 7147
7148 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
7149
6228 if (bnx2x_alloc_mem(bp)) 7150 if (bnx2x_alloc_mem(bp))
6229 return -ENOMEM; 7151 return -ENOMEM;
6230 7152
@@ -6232,13 +7154,13 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
6232 if (bp->flags & USING_MSIX_FLAG) { 7154 if (bp->flags & USING_MSIX_FLAG) {
6233 if (bnx2x_req_msix_irqs(bp)) { 7155 if (bnx2x_req_msix_irqs(bp)) {
6234 pci_disable_msix(bp->pdev); 7156 pci_disable_msix(bp->pdev);
6235 goto out_error; 7157 goto load_error;
6236 } 7158 }
6237 7159
6238 } else { 7160 } else {
6239 if (bnx2x_req_irq(bp)) { 7161 if (bnx2x_req_irq(bp)) {
6240 BNX2X_ERR("IRQ request failed, aborting\n"); 7162 BNX2X_ERR("IRQ request failed, aborting\n");
6241 goto out_error; 7163 goto load_error;
6242 } 7164 }
6243 } 7165 }
6244 } 7166 }
@@ -6249,31 +7171,25 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
6249 7171
6250 7172
6251 /* Initialize HW */ 7173 /* Initialize HW */
6252 if (bnx2x_function_init(bp, (rc == FW_MSG_CODE_DRV_LOAD_COMMON))) { 7174 if (bnx2x_function_init(bp,
7175 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON))) {
6253 BNX2X_ERR("HW init failed, aborting\n"); 7176 BNX2X_ERR("HW init failed, aborting\n");
6254 goto out_error; 7177 goto load_error;
6255 } 7178 }
6256 7179
6257 7180
6258 atomic_set(&bp->intr_sem, 0); 7181 atomic_set(&bp->intr_sem, 0);
6259 7182
6260 /* Reenable SP tasklet */
6261 /*if (bp->sp_task_en) { */
6262 /* tasklet_enable(&bp->sp_task);*/
6263 /*} else { */
6264 /* bp->sp_task_en = 1; */
6265 /*} */
6266 7183
6267 /* Setup NIC internals and enable interrupts */ 7184 /* Setup NIC internals and enable interrupts */
6268 bnx2x_nic_init(bp); 7185 bnx2x_nic_init(bp);
6269 7186
6270 /* Send LOAD_DONE command to MCP */ 7187 /* Send LOAD_DONE command to MCP */
6271 if (!nomcp) { 7188 if (!nomcp) {
6272 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); 7189 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6273 DP(NETIF_MSG_IFUP, "rc = 0x%x\n", rc); 7190 if (!load_code) {
6274 if (!rc) {
6275 BNX2X_ERR("MCP response failure, unloading\n"); 7191 BNX2X_ERR("MCP response failure, unloading\n");
6276 goto int_disable; 7192 goto load_int_disable;
6277 } 7193 }
6278 } 7194 }
6279 7195
@@ -6285,11 +7201,11 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
6285 napi_enable(&bnx2x_fp(bp, i, napi)); 7201 napi_enable(&bnx2x_fp(bp, i, napi));
6286 7202
6287 if (bnx2x_setup_leading(bp)) 7203 if (bnx2x_setup_leading(bp))
6288 goto stop_netif; 7204 goto load_stop_netif;
6289 7205
6290 for_each_nondefault_queue(bp, i) 7206 for_each_nondefault_queue(bp, i)
6291 if (bnx2x_setup_multi(bp, i)) 7207 if (bnx2x_setup_multi(bp, i))
6292 goto stop_netif; 7208 goto load_stop_netif;
6293 7209
6294 bnx2x_set_mac_addr(bp); 7210 bnx2x_set_mac_addr(bp);
6295 7211
@@ -6313,42 +7229,24 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
6313 7229
6314 return 0; 7230 return 0;
6315 7231
6316stop_netif: 7232load_stop_netif:
6317 for_each_queue(bp, i) 7233 for_each_queue(bp, i)
6318 napi_disable(&bnx2x_fp(bp, i, napi)); 7234 napi_disable(&bnx2x_fp(bp, i, napi));
6319 7235
6320int_disable: 7236load_int_disable:
6321 bnx2x_disable_int_sync(bp); 7237 bnx2x_int_disable_sync(bp);
6322 7238
6323 bnx2x_free_skbs(bp); 7239 bnx2x_free_skbs(bp);
6324 bnx2x_free_irq(bp); 7240 bnx2x_free_irq(bp);
6325 7241
6326out_error: 7242load_error:
6327 bnx2x_free_mem(bp); 7243 bnx2x_free_mem(bp);
6328 7244
6329 /* TBD we really need to reset the chip 7245 /* TBD we really need to reset the chip
6330 if we want to recover from this */ 7246 if we want to recover from this */
6331 return rc; 7247 return -EBUSY;
6332} 7248}
6333 7249
6334static void bnx2x_netif_stop(struct bnx2x *bp)
6335{
6336 int i;
6337
6338 bp->rx_mode = BNX2X_RX_MODE_NONE;
6339 bnx2x_set_storm_rx_mode(bp);
6340
6341 bnx2x_disable_int_sync(bp);
6342 bnx2x_link_reset(bp);
6343
6344 for_each_queue(bp, i)
6345 napi_disable(&bnx2x_fp(bp, i, napi));
6346
6347 if (netif_running(bp->dev)) {
6348 netif_tx_disable(bp->dev);
6349 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6350 }
6351}
6352 7250
6353static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) 7251static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6354{ 7252{
@@ -6401,20 +7299,20 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6401 7299
6402 int rc; 7300 int rc;
6403 7301
6404 /* halt the connnection */ 7302 /* halt the connection */
6405 bp->fp[index].state = BNX2X_FP_STATE_HALTING; 7303 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6406 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); 7304 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6407 7305
6408 7306
6409 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, 7307 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6410 &(bp->fp[index].state), 1); 7308 &(bp->fp[index].state), 1);
6411 if (rc) /* timout */ 7309 if (rc) /* timeout */
6412 return rc; 7310 return rc;
6413 7311
6414 /* delete cfc entry */ 7312 /* delete cfc entry */
6415 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1); 7313 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6416 7314
6417 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_DELETED, index, 7315 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6418 &(bp->fp[index].state), 1); 7316 &(bp->fp[index].state), 1);
6419 7317
6420} 7318}
@@ -6422,8 +7320,8 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6422 7320
6423static void bnx2x_stop_leading(struct bnx2x *bp) 7321static void bnx2x_stop_leading(struct bnx2x *bp)
6424{ 7322{
6425 7323 u16 dsb_sp_prod_idx;
6426 /* if the other port is hadling traffic, 7324 /* if the other port is handling traffic,
6427 this can take a lot of time */ 7325 this can take a lot of time */
6428 int timeout = 500; 7326 int timeout = 500;
6429 7327
@@ -6437,52 +7335,71 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
6437 &(bp->fp[0].state), 1)) 7335 &(bp->fp[0].state), 1))
6438 return; 7336 return;
6439 7337
6440 bp->dsb_sp_prod_idx = *bp->dsb_sp_prod; 7338 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6441 7339
6442 /* Send CFC_DELETE ramrod */ 7340 /* Send PORT_DELETE ramrod */
6443 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1); 7341 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6444 7342
6445 /* 7343 /* Wait for completion to arrive on default status block
6446 Wait for completion.
6447 we are going to reset the chip anyway 7344 we are going to reset the chip anyway
6448 so there is not much to do if this times out 7345 so there is not much to do if this times out
6449 */ 7346 */
6450 while (bp->dsb_sp_prod_idx == *bp->dsb_sp_prod && timeout) { 7347 while ((dsb_sp_prod_idx == *bp->dsb_sp_prod) && timeout) {
6451 timeout--; 7348 timeout--;
6452 msleep(1); 7349 msleep(1);
6453 } 7350 }
6454 7351 if (!timeout) {
7352 DP(NETIF_MSG_IFDOWN, "timeout polling for completion "
7353 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7354 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7355 }
7356 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7357 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6455} 7358}
6456 7359
6457static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq) 7360
7361static int bnx2x_nic_unload(struct bnx2x *bp, int free_irq)
6458{ 7362{
6459 u32 reset_code = 0; 7363 u32 reset_code = 0;
6460 int rc; 7364 int i, timeout;
6461 int i;
6462 7365
6463 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 7366 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6464 7367
6465 /* Calling flush_scheduled_work() may deadlock because 7368 del_timer_sync(&bp->timer);
6466 * linkwatch_event() may be on the workqueue and it will try to get
6467 * the rtnl_lock which we are holding.
6468 */
6469 7369
6470 while (bp->in_reset_task) 7370 bp->rx_mode = BNX2X_RX_MODE_NONE;
6471 msleep(1); 7371 bnx2x_set_storm_rx_mode(bp);
6472 7372
6473 /* Delete the timer: do it before disabling interrupts, as it 7373 if (netif_running(bp->dev)) {
6474 may be stil STAT_QUERY ramrod pending after stopping the timer */ 7374 netif_tx_disable(bp->dev);
6475 del_timer_sync(&bp->timer); 7375 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7376 }
7377
7378 /* Wait until all fast path tasks complete */
7379 for_each_queue(bp, i) {
7380 struct bnx2x_fastpath *fp = &bp->fp[i];
7381
7382 timeout = 1000;
7383 while (bnx2x_has_work(fp) && (timeout--))
7384 msleep(1);
7385 if (!timeout)
7386 BNX2X_ERR("timeout waiting for queue[%d]\n", i);
7387 }
6476 7388
6477 /* Wait until stat ramrod returns and all SP tasks complete */ 7389 /* Wait until stat ramrod returns and all SP tasks complete */
6478 while (bp->stat_pending && (bp->spq_left != MAX_SPQ_PENDING)) 7390 timeout = 1000;
7391 while ((bp->stat_pending || (bp->spq_left != MAX_SPQ_PENDING)) &&
7392 (timeout--))
6479 msleep(1); 7393 msleep(1);
6480 7394
6481 /* Stop fast path, disable MAC, disable interrupts, disable napi */ 7395 for_each_queue(bp, i)
6482 bnx2x_netif_stop(bp); 7396 napi_disable(&bnx2x_fp(bp, i, napi));
7397 /* Disable interrupts after Tx and Rx are disabled on stack level */
7398 bnx2x_int_disable_sync(bp);
6483 7399
6484 if (bp->flags & NO_WOL_FLAG) 7400 if (bp->flags & NO_WOL_FLAG)
6485 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 7401 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7402
6486 else if (bp->wol) { 7403 else if (bp->wol) {
6487 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1; 7404 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
6488 u8 *mac_addr = bp->dev->dev_addr; 7405 u8 *mac_addr = bp->dev->dev_addr;
@@ -6499,28 +7416,37 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq)
6499 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val); 7416 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
6500 7417
6501 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 7418 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7419
6502 } else 7420 } else
6503 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 7421 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6504 7422
7423 /* Close multi and leading connections */
6505 for_each_nondefault_queue(bp, i) 7424 for_each_nondefault_queue(bp, i)
6506 if (bnx2x_stop_multi(bp, i)) 7425 if (bnx2x_stop_multi(bp, i))
6507 goto error; 7426 goto unload_error;
6508
6509 7427
6510 bnx2x_stop_leading(bp); 7428 bnx2x_stop_leading(bp);
7429 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
7430 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
7431 DP(NETIF_MSG_IFDOWN, "failed to close leading properly!"
7432 "state 0x%x fp[0].state 0x%x",
7433 bp->state, bp->fp[0].state);
7434 }
7435
7436unload_error:
7437 bnx2x_link_reset(bp);
6511 7438
6512error:
6513 if (!nomcp) 7439 if (!nomcp)
6514 rc = bnx2x_fw_command(bp, reset_code); 7440 reset_code = bnx2x_fw_command(bp, reset_code);
6515 else 7441 else
6516 rc = FW_MSG_CODE_DRV_UNLOAD_COMMON; 7442 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6517 7443
6518 /* Release IRQs */ 7444 /* Release IRQs */
6519 if (fre_irq) 7445 if (free_irq)
6520 bnx2x_free_irq(bp); 7446 bnx2x_free_irq(bp);
6521 7447
6522 /* Reset the chip */ 7448 /* Reset the chip */
6523 bnx2x_reset_chip(bp, rc); 7449 bnx2x_reset_chip(bp, reset_code);
6524 7450
6525 /* Report UNLOAD_DONE to MCP */ 7451 /* Report UNLOAD_DONE to MCP */
6526 if (!nomcp) 7452 if (!nomcp)
@@ -6531,8 +7457,7 @@ error:
6531 bnx2x_free_mem(bp); 7457 bnx2x_free_mem(bp);
6532 7458
6533 bp->state = BNX2X_STATE_CLOSED; 7459 bp->state = BNX2X_STATE_CLOSED;
6534 /* Set link down */ 7460
6535 bp->link_up = 0;
6536 netif_carrier_off(bp->dev); 7461 netif_carrier_off(bp->dev);
6537 7462
6538 return 0; 7463 return 0;
@@ -6568,7 +7493,7 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
6568 SUPPORTED_100baseT_Half | 7493 SUPPORTED_100baseT_Half |
6569 SUPPORTED_100baseT_Full | 7494 SUPPORTED_100baseT_Full |
6570 SUPPORTED_1000baseT_Full | 7495 SUPPORTED_1000baseT_Full |
6571 SUPPORTED_2500baseT_Full | 7496 SUPPORTED_2500baseX_Full |
6572 SUPPORTED_TP | SUPPORTED_FIBRE | 7497 SUPPORTED_TP | SUPPORTED_FIBRE |
6573 SUPPORTED_Autoneg | 7498 SUPPORTED_Autoneg |
6574 SUPPORTED_Pause | 7499 SUPPORTED_Pause |
@@ -6581,10 +7506,10 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
6581 7506
6582 bp->phy_flags |= PHY_SGMII_FLAG; 7507 bp->phy_flags |= PHY_SGMII_FLAG;
6583 7508
6584 bp->supported |= (/* SUPPORTED_10baseT_Half | 7509 bp->supported |= (SUPPORTED_10baseT_Half |
6585 SUPPORTED_10baseT_Full | 7510 SUPPORTED_10baseT_Full |
6586 SUPPORTED_100baseT_Half | 7511 SUPPORTED_100baseT_Half |
6587 SUPPORTED_100baseT_Full |*/ 7512 SUPPORTED_100baseT_Full |
6588 SUPPORTED_1000baseT_Full | 7513 SUPPORTED_1000baseT_Full |
6589 SUPPORTED_TP | SUPPORTED_FIBRE | 7514 SUPPORTED_TP | SUPPORTED_FIBRE |
6590 SUPPORTED_Autoneg | 7515 SUPPORTED_Autoneg |
@@ -6620,7 +7545,7 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
6620 SUPPORTED_100baseT_Half | 7545 SUPPORTED_100baseT_Half |
6621 SUPPORTED_100baseT_Full | 7546 SUPPORTED_100baseT_Full |
6622 SUPPORTED_1000baseT_Full | 7547 SUPPORTED_1000baseT_Full |
6623 SUPPORTED_2500baseT_Full | 7548 SUPPORTED_2500baseX_Full |
6624 SUPPORTED_10000baseT_Full | 7549 SUPPORTED_10000baseT_Full |
6625 SUPPORTED_TP | SUPPORTED_FIBRE | 7550 SUPPORTED_TP | SUPPORTED_FIBRE |
6626 SUPPORTED_Autoneg | 7551 SUPPORTED_Autoneg |
@@ -6629,12 +7554,46 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
6629 break; 7554 break;
6630 7555
6631 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 7556 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7557 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7558 ext_phy_type);
7559
7560 bp->supported |= (SUPPORTED_10000baseT_Full |
7561 SUPPORTED_FIBRE |
7562 SUPPORTED_Pause |
7563 SUPPORTED_Asym_Pause);
7564 break;
7565
6632 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 7566 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6633 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705/6)\n", 7567 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7568 ext_phy_type);
7569
7570 bp->supported |= (SUPPORTED_10000baseT_Full |
7571 SUPPORTED_1000baseT_Full |
7572 SUPPORTED_Autoneg |
7573 SUPPORTED_FIBRE |
7574 SUPPORTED_Pause |
7575 SUPPORTED_Asym_Pause);
7576 break;
7577
7578 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7579 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6634 ext_phy_type); 7580 ext_phy_type);
6635 7581
6636 bp->supported |= (SUPPORTED_10000baseT_Full | 7582 bp->supported |= (SUPPORTED_10000baseT_Full |
7583 SUPPORTED_1000baseT_Full |
6637 SUPPORTED_FIBRE | 7584 SUPPORTED_FIBRE |
7585 SUPPORTED_Autoneg |
7586 SUPPORTED_Pause |
7587 SUPPORTED_Asym_Pause);
7588 break;
7589
7590 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7591 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7592 ext_phy_type);
7593
7594 bp->supported |= (SUPPORTED_10000baseT_Full |
7595 SUPPORTED_TP |
7596 SUPPORTED_Autoneg |
6638 SUPPORTED_Pause | 7597 SUPPORTED_Pause |
6639 SUPPORTED_Asym_Pause); 7598 SUPPORTED_Asym_Pause);
6640 break; 7599 break;
@@ -6691,7 +7650,7 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
6691 SUPPORTED_1000baseT_Full); 7650 SUPPORTED_1000baseT_Full);
6692 7651
6693 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 7652 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6694 bp->supported &= ~SUPPORTED_2500baseT_Full; 7653 bp->supported &= ~SUPPORTED_2500baseX_Full;
6695 7654
6696 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) 7655 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6697 bp->supported &= ~SUPPORTED_10000baseT_Full; 7656 bp->supported &= ~SUPPORTED_10000baseT_Full;
@@ -6711,13 +7670,8 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6711 bp->req_line_speed = 0; 7670 bp->req_line_speed = 0;
6712 bp->advertising = bp->supported; 7671 bp->advertising = bp->supported;
6713 } else { 7672 } else {
6714 u32 ext_phy_type; 7673 if (XGXS_EXT_PHY_TYPE(bp) ==
6715 7674 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) {
6716 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
6717 if ((ext_phy_type ==
6718 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
6719 (ext_phy_type ==
6720 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
6721 /* force 10G, no AN */ 7675 /* force 10G, no AN */
6722 bp->req_line_speed = SPEED_10000; 7676 bp->req_line_speed = SPEED_10000;
6723 bp->advertising = 7677 bp->advertising =
@@ -6734,8 +7688,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6734 break; 7688 break;
6735 7689
6736 case PORT_FEATURE_LINK_SPEED_10M_FULL: 7690 case PORT_FEATURE_LINK_SPEED_10M_FULL:
6737 if (bp->speed_cap_mask & 7691 if (bp->supported & SUPPORTED_10baseT_Full) {
6738 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
6739 bp->req_line_speed = SPEED_10; 7692 bp->req_line_speed = SPEED_10;
6740 bp->advertising = (ADVERTISED_10baseT_Full | 7693 bp->advertising = (ADVERTISED_10baseT_Full |
6741 ADVERTISED_TP); 7694 ADVERTISED_TP);
@@ -6749,8 +7702,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6749 break; 7702 break;
6750 7703
6751 case PORT_FEATURE_LINK_SPEED_10M_HALF: 7704 case PORT_FEATURE_LINK_SPEED_10M_HALF:
6752 if (bp->speed_cap_mask & 7705 if (bp->supported & SUPPORTED_10baseT_Half) {
6753 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
6754 bp->req_line_speed = SPEED_10; 7706 bp->req_line_speed = SPEED_10;
6755 bp->req_duplex = DUPLEX_HALF; 7707 bp->req_duplex = DUPLEX_HALF;
6756 bp->advertising = (ADVERTISED_10baseT_Half | 7708 bp->advertising = (ADVERTISED_10baseT_Half |
@@ -6765,8 +7717,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6765 break; 7717 break;
6766 7718
6767 case PORT_FEATURE_LINK_SPEED_100M_FULL: 7719 case PORT_FEATURE_LINK_SPEED_100M_FULL:
6768 if (bp->speed_cap_mask & 7720 if (bp->supported & SUPPORTED_100baseT_Full) {
6769 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
6770 bp->req_line_speed = SPEED_100; 7721 bp->req_line_speed = SPEED_100;
6771 bp->advertising = (ADVERTISED_100baseT_Full | 7722 bp->advertising = (ADVERTISED_100baseT_Full |
6772 ADVERTISED_TP); 7723 ADVERTISED_TP);
@@ -6780,8 +7731,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6780 break; 7731 break;
6781 7732
6782 case PORT_FEATURE_LINK_SPEED_100M_HALF: 7733 case PORT_FEATURE_LINK_SPEED_100M_HALF:
6783 if (bp->speed_cap_mask & 7734 if (bp->supported & SUPPORTED_100baseT_Half) {
6784 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
6785 bp->req_line_speed = SPEED_100; 7735 bp->req_line_speed = SPEED_100;
6786 bp->req_duplex = DUPLEX_HALF; 7736 bp->req_duplex = DUPLEX_HALF;
6787 bp->advertising = (ADVERTISED_100baseT_Half | 7737 bp->advertising = (ADVERTISED_100baseT_Half |
@@ -6796,8 +7746,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6796 break; 7746 break;
6797 7747
6798 case PORT_FEATURE_LINK_SPEED_1G: 7748 case PORT_FEATURE_LINK_SPEED_1G:
6799 if (bp->speed_cap_mask & 7749 if (bp->supported & SUPPORTED_1000baseT_Full) {
6800 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) {
6801 bp->req_line_speed = SPEED_1000; 7750 bp->req_line_speed = SPEED_1000;
6802 bp->advertising = (ADVERTISED_1000baseT_Full | 7751 bp->advertising = (ADVERTISED_1000baseT_Full |
6803 ADVERTISED_TP); 7752 ADVERTISED_TP);
@@ -6811,10 +7760,9 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6811 break; 7760 break;
6812 7761
6813 case PORT_FEATURE_LINK_SPEED_2_5G: 7762 case PORT_FEATURE_LINK_SPEED_2_5G:
6814 if (bp->speed_cap_mask & 7763 if (bp->supported & SUPPORTED_2500baseX_Full) {
6815 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) {
6816 bp->req_line_speed = SPEED_2500; 7764 bp->req_line_speed = SPEED_2500;
6817 bp->advertising = (ADVERTISED_2500baseT_Full | 7765 bp->advertising = (ADVERTISED_2500baseX_Full |
6818 ADVERTISED_TP); 7766 ADVERTISED_TP);
6819 } else { 7767 } else {
6820 BNX2X_ERR("NVRAM config error. " 7768 BNX2X_ERR("NVRAM config error. "
@@ -6828,15 +7776,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6828 case PORT_FEATURE_LINK_SPEED_10G_CX4: 7776 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6829 case PORT_FEATURE_LINK_SPEED_10G_KX4: 7777 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6830 case PORT_FEATURE_LINK_SPEED_10G_KR: 7778 case PORT_FEATURE_LINK_SPEED_10G_KR:
6831 if (!(bp->phy_flags & PHY_XGXS_FLAG)) { 7779 if (bp->supported & SUPPORTED_10000baseT_Full) {
6832 BNX2X_ERR("NVRAM config error. "
6833 "Invalid link_config 0x%x"
6834 " phy_flags 0x%x\n",
6835 bp->link_config, bp->phy_flags);
6836 return;
6837 }
6838 if (bp->speed_cap_mask &
6839 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
6840 bp->req_line_speed = SPEED_10000; 7780 bp->req_line_speed = SPEED_10000;
6841 bp->advertising = (ADVERTISED_10000baseT_Full | 7781 bp->advertising = (ADVERTISED_10000baseT_Full |
6842 ADVERTISED_FIBRE); 7782 ADVERTISED_FIBRE);
@@ -6863,43 +7803,13 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6863 7803
6864 bp->req_flow_ctrl = (bp->link_config & 7804 bp->req_flow_ctrl = (bp->link_config &
6865 PORT_FEATURE_FLOW_CONTROL_MASK); 7805 PORT_FEATURE_FLOW_CONTROL_MASK);
6866 /* Please refer to Table 28B-3 of the 802.3ab-1999 spec */ 7806 if ((bp->req_flow_ctrl == FLOW_CTRL_AUTO) &&
6867 switch (bp->req_flow_ctrl) { 7807 (bp->supported & SUPPORTED_Autoneg))
6868 case FLOW_CTRL_AUTO:
6869 bp->req_autoneg |= AUTONEG_FLOW_CTRL; 7808 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
6870 if (bp->dev->mtu <= 4500) {
6871 bp->pause_mode = PAUSE_BOTH;
6872 bp->advertising |= (ADVERTISED_Pause |
6873 ADVERTISED_Asym_Pause);
6874 } else {
6875 bp->pause_mode = PAUSE_ASYMMETRIC;
6876 bp->advertising |= ADVERTISED_Asym_Pause;
6877 }
6878 break;
6879
6880 case FLOW_CTRL_TX:
6881 bp->pause_mode = PAUSE_ASYMMETRIC;
6882 bp->advertising |= ADVERTISED_Asym_Pause;
6883 break;
6884
6885 case FLOW_CTRL_RX:
6886 case FLOW_CTRL_BOTH:
6887 bp->pause_mode = PAUSE_BOTH;
6888 bp->advertising |= (ADVERTISED_Pause |
6889 ADVERTISED_Asym_Pause);
6890 break;
6891 7809
6892 case FLOW_CTRL_NONE: 7810 BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x"
6893 default: 7811 " advertising 0x%x\n",
6894 bp->pause_mode = PAUSE_NONE; 7812 bp->req_autoneg, bp->req_flow_ctrl, bp->advertising);
6895 bp->advertising &= ~(ADVERTISED_Pause |
6896 ADVERTISED_Asym_Pause);
6897 break;
6898 }
6899 BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x\n"
6900 KERN_INFO " pause_mode %d advertising 0x%x\n",
6901 bp->req_autoneg, bp->req_flow_ctrl,
6902 bp->pause_mode, bp->advertising);
6903} 7813}
6904 7814
6905static void bnx2x_get_hwinfo(struct bnx2x *bp) 7815static void bnx2x_get_hwinfo(struct bnx2x *bp)
@@ -6933,15 +7843,15 @@ static void bnx2x_get_hwinfo(struct bnx2x *bp)
6933 val = SHMEM_RD(bp, validity_map[port]); 7843 val = SHMEM_RD(bp, validity_map[port]);
6934 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 7844 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6935 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 7845 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6936 BNX2X_ERR("MCP validity signature bad\n"); 7846 BNX2X_ERR("BAD MCP validity signature\n");
6937 7847
6938 bp->fw_seq = (SHMEM_RD(bp, drv_fw_mb[port].drv_mb_header) & 7848 bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) &
6939 DRV_MSG_SEQ_NUMBER_MASK); 7849 DRV_MSG_SEQ_NUMBER_MASK);
6940 7850
6941 bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 7851 bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6942 7852 bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6943 bp->serdes_config = 7853 bp->serdes_config =
6944 SHMEM_RD(bp, dev_info.port_hw_config[bp->port].serdes_config); 7854 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
6945 bp->lane_config = 7855 bp->lane_config =
6946 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); 7856 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6947 bp->ext_phy_config = 7857 bp->ext_phy_config =
@@ -6954,13 +7864,13 @@ static void bnx2x_get_hwinfo(struct bnx2x *bp)
6954 bp->link_config = 7864 bp->link_config =
6955 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); 7865 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6956 7866
6957 BNX2X_DEV_INFO("hw_config (%08x) serdes_config (%08x)\n" 7867 BNX2X_DEV_INFO("hw_config (%08x) board (%08x) serdes_config (%08x)\n"
6958 KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n" 7868 KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n"
6959 KERN_INFO " speed_cap_mask (%08x) link_config (%08x)" 7869 KERN_INFO " speed_cap_mask (%08x) link_config (%08x)"
6960 " fw_seq (%08x)\n", 7870 " fw_seq (%08x)\n",
6961 bp->hw_config, bp->serdes_config, bp->lane_config, 7871 bp->hw_config, bp->board, bp->serdes_config,
6962 bp->ext_phy_config, bp->speed_cap_mask, 7872 bp->lane_config, bp->ext_phy_config,
6963 bp->link_config, bp->fw_seq); 7873 bp->speed_cap_mask, bp->link_config, bp->fw_seq);
6964 7874
6965 switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK); 7875 switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK);
6966 bnx2x_link_settings_supported(bp, switch_cfg); 7876 bnx2x_link_settings_supported(bp, switch_cfg);
@@ -7014,14 +7924,8 @@ static void bnx2x_get_hwinfo(struct bnx2x *bp)
7014 return; 7924 return;
7015 7925
7016set_mac: /* only supposed to happen on emulation/FPGA */ 7926set_mac: /* only supposed to happen on emulation/FPGA */
7017 BNX2X_ERR("warning constant MAC workaround active\n"); 7927 BNX2X_ERR("warning rendom MAC workaround active\n");
7018 bp->dev->dev_addr[0] = 0; 7928 random_ether_addr(bp->dev->dev_addr);
7019 bp->dev->dev_addr[1] = 0x50;
7020 bp->dev->dev_addr[2] = 0xc2;
7021 bp->dev->dev_addr[3] = 0x2c;
7022 bp->dev->dev_addr[4] = 0x71;
7023 bp->dev->dev_addr[5] = port ? 0x0d : 0x0e;
7024
7025 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6); 7929 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7026 7930
7027} 7931}
@@ -7048,19 +7952,34 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7048 } 7952 }
7049 7953
7050 if (bp->phy_flags & PHY_XGXS_FLAG) { 7954 if (bp->phy_flags & PHY_XGXS_FLAG) {
7051 cmd->port = PORT_FIBRE; 7955 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7052 } else { 7956
7957 switch (ext_phy_type) {
7958 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7959 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7960 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7961 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7962 cmd->port = PORT_FIBRE;
7963 break;
7964
7965 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7966 cmd->port = PORT_TP;
7967 break;
7968
7969 default:
7970 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7971 bp->ext_phy_config);
7972 }
7973 } else
7053 cmd->port = PORT_TP; 7974 cmd->port = PORT_TP;
7054 }
7055 7975
7056 cmd->phy_address = bp->phy_addr; 7976 cmd->phy_address = bp->phy_addr;
7057 cmd->transceiver = XCVR_INTERNAL; 7977 cmd->transceiver = XCVR_INTERNAL;
7058 7978
7059 if (bp->req_autoneg & AUTONEG_SPEED) { 7979 if (bp->req_autoneg & AUTONEG_SPEED)
7060 cmd->autoneg = AUTONEG_ENABLE; 7980 cmd->autoneg = AUTONEG_ENABLE;
7061 } else { 7981 else
7062 cmd->autoneg = AUTONEG_DISABLE; 7982 cmd->autoneg = AUTONEG_DISABLE;
7063 }
7064 7983
7065 cmd->maxtxpkt = 0; 7984 cmd->maxtxpkt = 0;
7066 cmd->maxrxpkt = 0; 7985 cmd->maxrxpkt = 0;
@@ -7091,8 +8010,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7091 8010
7092 switch (cmd->port) { 8011 switch (cmd->port) {
7093 case PORT_TP: 8012 case PORT_TP:
7094 if (!(bp->supported & SUPPORTED_TP)) 8013 if (!(bp->supported & SUPPORTED_TP)) {
8014 DP(NETIF_MSG_LINK, "TP not supported\n");
7095 return -EINVAL; 8015 return -EINVAL;
8016 }
7096 8017
7097 if (bp->phy_flags & PHY_XGXS_FLAG) { 8018 if (bp->phy_flags & PHY_XGXS_FLAG) {
7098 bnx2x_link_reset(bp); 8019 bnx2x_link_reset(bp);
@@ -7102,8 +8023,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7102 break; 8023 break;
7103 8024
7104 case PORT_FIBRE: 8025 case PORT_FIBRE:
7105 if (!(bp->supported & SUPPORTED_FIBRE)) 8026 if (!(bp->supported & SUPPORTED_FIBRE)) {
8027 DP(NETIF_MSG_LINK, "FIBRE not supported\n");
7106 return -EINVAL; 8028 return -EINVAL;
8029 }
7107 8030
7108 if (!(bp->phy_flags & PHY_XGXS_FLAG)) { 8031 if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
7109 bnx2x_link_reset(bp); 8032 bnx2x_link_reset(bp);
@@ -7113,12 +8036,15 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7113 break; 8036 break;
7114 8037
7115 default: 8038 default:
8039 DP(NETIF_MSG_LINK, "Unknown port type\n");
7116 return -EINVAL; 8040 return -EINVAL;
7117 } 8041 }
7118 8042
7119 if (cmd->autoneg == AUTONEG_ENABLE) { 8043 if (cmd->autoneg == AUTONEG_ENABLE) {
7120 if (!(bp->supported & SUPPORTED_Autoneg)) 8044 if (!(bp->supported & SUPPORTED_Autoneg)) {
8045 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
7121 return -EINVAL; 8046 return -EINVAL;
8047 }
7122 8048
7123 /* advertise the requested speed and duplex if supported */ 8049 /* advertise the requested speed and duplex if supported */
7124 cmd->advertising &= bp->supported; 8050 cmd->advertising &= bp->supported;
@@ -7133,14 +8059,22 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7133 switch (cmd->speed) { 8059 switch (cmd->speed) {
7134 case SPEED_10: 8060 case SPEED_10:
7135 if (cmd->duplex == DUPLEX_FULL) { 8061 if (cmd->duplex == DUPLEX_FULL) {
7136 if (!(bp->supported & SUPPORTED_10baseT_Full)) 8062 if (!(bp->supported &
8063 SUPPORTED_10baseT_Full)) {
8064 DP(NETIF_MSG_LINK,
8065 "10M full not supported\n");
7137 return -EINVAL; 8066 return -EINVAL;
8067 }
7138 8068
7139 advertising = (ADVERTISED_10baseT_Full | 8069 advertising = (ADVERTISED_10baseT_Full |
7140 ADVERTISED_TP); 8070 ADVERTISED_TP);
7141 } else { 8071 } else {
7142 if (!(bp->supported & SUPPORTED_10baseT_Half)) 8072 if (!(bp->supported &
8073 SUPPORTED_10baseT_Half)) {
8074 DP(NETIF_MSG_LINK,
8075 "10M half not supported\n");
7143 return -EINVAL; 8076 return -EINVAL;
8077 }
7144 8078
7145 advertising = (ADVERTISED_10baseT_Half | 8079 advertising = (ADVERTISED_10baseT_Half |
7146 ADVERTISED_TP); 8080 ADVERTISED_TP);
@@ -7150,15 +8084,21 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7150 case SPEED_100: 8084 case SPEED_100:
7151 if (cmd->duplex == DUPLEX_FULL) { 8085 if (cmd->duplex == DUPLEX_FULL) {
7152 if (!(bp->supported & 8086 if (!(bp->supported &
7153 SUPPORTED_100baseT_Full)) 8087 SUPPORTED_100baseT_Full)) {
8088 DP(NETIF_MSG_LINK,
8089 "100M full not supported\n");
7154 return -EINVAL; 8090 return -EINVAL;
8091 }
7155 8092
7156 advertising = (ADVERTISED_100baseT_Full | 8093 advertising = (ADVERTISED_100baseT_Full |
7157 ADVERTISED_TP); 8094 ADVERTISED_TP);
7158 } else { 8095 } else {
7159 if (!(bp->supported & 8096 if (!(bp->supported &
7160 SUPPORTED_100baseT_Half)) 8097 SUPPORTED_100baseT_Half)) {
8098 DP(NETIF_MSG_LINK,
8099 "100M half not supported\n");
7161 return -EINVAL; 8100 return -EINVAL;
8101 }
7162 8102
7163 advertising = (ADVERTISED_100baseT_Half | 8103 advertising = (ADVERTISED_100baseT_Half |
7164 ADVERTISED_TP); 8104 ADVERTISED_TP);
@@ -7166,39 +8106,54 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7166 break; 8106 break;
7167 8107
7168 case SPEED_1000: 8108 case SPEED_1000:
7169 if (cmd->duplex != DUPLEX_FULL) 8109 if (cmd->duplex != DUPLEX_FULL) {
8110 DP(NETIF_MSG_LINK, "1G half not supported\n");
7170 return -EINVAL; 8111 return -EINVAL;
8112 }
7171 8113
7172 if (!(bp->supported & SUPPORTED_1000baseT_Full)) 8114 if (!(bp->supported & SUPPORTED_1000baseT_Full)) {
8115 DP(NETIF_MSG_LINK, "1G full not supported\n");
7173 return -EINVAL; 8116 return -EINVAL;
8117 }
7174 8118
7175 advertising = (ADVERTISED_1000baseT_Full | 8119 advertising = (ADVERTISED_1000baseT_Full |
7176 ADVERTISED_TP); 8120 ADVERTISED_TP);
7177 break; 8121 break;
7178 8122
7179 case SPEED_2500: 8123 case SPEED_2500:
7180 if (cmd->duplex != DUPLEX_FULL) 8124 if (cmd->duplex != DUPLEX_FULL) {
8125 DP(NETIF_MSG_LINK,
8126 "2.5G half not supported\n");
7181 return -EINVAL; 8127 return -EINVAL;
8128 }
7182 8129
7183 if (!(bp->supported & SUPPORTED_2500baseT_Full)) 8130 if (!(bp->supported & SUPPORTED_2500baseX_Full)) {
8131 DP(NETIF_MSG_LINK,
8132 "2.5G full not supported\n");
7184 return -EINVAL; 8133 return -EINVAL;
8134 }
7185 8135
7186 advertising = (ADVERTISED_2500baseT_Full | 8136 advertising = (ADVERTISED_2500baseX_Full |
7187 ADVERTISED_TP); 8137 ADVERTISED_TP);
7188 break; 8138 break;
7189 8139
7190 case SPEED_10000: 8140 case SPEED_10000:
7191 if (cmd->duplex != DUPLEX_FULL) 8141 if (cmd->duplex != DUPLEX_FULL) {
8142 DP(NETIF_MSG_LINK, "10G half not supported\n");
7192 return -EINVAL; 8143 return -EINVAL;
8144 }
7193 8145
7194 if (!(bp->supported & SUPPORTED_10000baseT_Full)) 8146 if (!(bp->supported & SUPPORTED_10000baseT_Full)) {
8147 DP(NETIF_MSG_LINK, "10G full not supported\n");
7195 return -EINVAL; 8148 return -EINVAL;
8149 }
7196 8150
7197 advertising = (ADVERTISED_10000baseT_Full | 8151 advertising = (ADVERTISED_10000baseT_Full |
7198 ADVERTISED_FIBRE); 8152 ADVERTISED_FIBRE);
7199 break; 8153 break;
7200 8154
7201 default: 8155 default:
8156 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7202 return -EINVAL; 8157 return -EINVAL;
7203 } 8158 }
7204 8159
@@ -7398,8 +8353,7 @@ static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7398static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val, 8353static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7399 u32 cmd_flags) 8354 u32 cmd_flags)
7400{ 8355{
7401 int rc; 8356 int count, i, rc;
7402 int count, i;
7403 u32 val; 8357 u32 val;
7404 8358
7405 /* build the command word */ 8359 /* build the command word */
@@ -7452,13 +8406,13 @@ static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7452 8406
7453 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { 8407 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7454 DP(NETIF_MSG_NVM, 8408 DP(NETIF_MSG_NVM,
7455 "Invalid paramter: offset 0x%x buf_size 0x%x\n", 8409 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
7456 offset, buf_size); 8410 offset, buf_size);
7457 return -EINVAL; 8411 return -EINVAL;
7458 } 8412 }
7459 8413
7460 if (offset + buf_size > bp->flash_size) { 8414 if (offset + buf_size > bp->flash_size) {
7461 DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +" 8415 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7462 " buf_size (0x%x) > flash_size (0x%x)\n", 8416 " buf_size (0x%x) > flash_size (0x%x)\n",
7463 offset, buf_size, bp->flash_size); 8417 offset, buf_size, bp->flash_size);
7464 return -EINVAL; 8418 return -EINVAL;
@@ -7519,8 +8473,7 @@ static int bnx2x_get_eeprom(struct net_device *dev,
7519static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, 8473static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
7520 u32 cmd_flags) 8474 u32 cmd_flags)
7521{ 8475{
7522 int rc; 8476 int count, i, rc;
7523 int count, i;
7524 8477
7525 /* build the command word */ 8478 /* build the command word */
7526 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR; 8479 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
@@ -7557,7 +8510,7 @@ static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
7557 return rc; 8510 return rc;
7558} 8511}
7559 8512
7560#define BYTE_OFFSET(offset) (8 * (offset & 0x03)) 8513#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
7561 8514
7562static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, 8515static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
7563 int buf_size) 8516 int buf_size)
@@ -7568,7 +8521,7 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
7568 u32 val; 8521 u32 val;
7569 8522
7570 if (offset + buf_size > bp->flash_size) { 8523 if (offset + buf_size > bp->flash_size) {
7571 DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +" 8524 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7572 " buf_size (0x%x) > flash_size (0x%x)\n", 8525 " buf_size (0x%x) > flash_size (0x%x)\n",
7573 offset, buf_size, bp->flash_size); 8526 offset, buf_size, bp->flash_size);
7574 return -EINVAL; 8527 return -EINVAL;
@@ -7621,13 +8574,13 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
7621 8574
7622 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { 8575 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7623 DP(NETIF_MSG_NVM, 8576 DP(NETIF_MSG_NVM,
7624 "Invalid paramter: offset 0x%x buf_size 0x%x\n", 8577 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
7625 offset, buf_size); 8578 offset, buf_size);
7626 return -EINVAL; 8579 return -EINVAL;
7627 } 8580 }
7628 8581
7629 if (offset + buf_size > bp->flash_size) { 8582 if (offset + buf_size > bp->flash_size) {
7630 DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +" 8583 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7631 " buf_size (0x%x) > flash_size (0x%x)\n", 8584 " buf_size (0x%x) > flash_size (0x%x)\n",
7632 offset, buf_size, bp->flash_size); 8585 offset, buf_size, bp->flash_size);
7633 return -EINVAL; 8586 return -EINVAL;
@@ -7788,52 +8741,29 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
7788 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", 8741 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
7789 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); 8742 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
7790 8743
7791 bp->req_flow_ctrl = FLOW_CTRL_AUTO;
7792 if (epause->autoneg) { 8744 if (epause->autoneg) {
7793 bp->req_autoneg |= AUTONEG_FLOW_CTRL; 8745 if (!(bp->supported & SUPPORTED_Autoneg)) {
7794 if (bp->dev->mtu <= 4500) { 8746 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
7795 bp->pause_mode = PAUSE_BOTH; 8747 return -EINVAL;
7796 bp->advertising |= (ADVERTISED_Pause |
7797 ADVERTISED_Asym_Pause);
7798 } else {
7799 bp->pause_mode = PAUSE_ASYMMETRIC;
7800 bp->advertising |= ADVERTISED_Asym_Pause;
7801 } 8748 }
7802 8749
7803 } else { 8750 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
8751 } else
7804 bp->req_autoneg &= ~AUTONEG_FLOW_CTRL; 8752 bp->req_autoneg &= ~AUTONEG_FLOW_CTRL;
7805 8753
7806 if (epause->rx_pause) 8754 bp->req_flow_ctrl = FLOW_CTRL_AUTO;
7807 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7808 if (epause->tx_pause)
7809 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7810
7811 switch (bp->req_flow_ctrl) {
7812 case FLOW_CTRL_AUTO:
7813 bp->req_flow_ctrl = FLOW_CTRL_NONE;
7814 bp->pause_mode = PAUSE_NONE;
7815 bp->advertising &= ~(ADVERTISED_Pause |
7816 ADVERTISED_Asym_Pause);
7817 break;
7818 8755
7819 case FLOW_CTRL_TX: 8756 if (epause->rx_pause)
7820 bp->pause_mode = PAUSE_ASYMMETRIC; 8757 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7821 bp->advertising |= ADVERTISED_Asym_Pause; 8758 if (epause->tx_pause)
7822 break; 8759 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7823 8760
7824 case FLOW_CTRL_RX: 8761 if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
7825 case FLOW_CTRL_BOTH: 8762 (bp->req_flow_ctrl == FLOW_CTRL_AUTO))
7826 bp->pause_mode = PAUSE_BOTH; 8763 bp->req_flow_ctrl = FLOW_CTRL_NONE;
7827 bp->advertising |= (ADVERTISED_Pause |
7828 ADVERTISED_Asym_Pause);
7829 break;
7830 }
7831 }
7832 8764
7833 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n" 8765 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n",
7834 DP_LEVEL " pause_mode %d advertising 0x%x\n", 8766 bp->req_autoneg, bp->req_flow_ctrl);
7835 bp->req_autoneg, bp->req_flow_ctrl, bp->pause_mode,
7836 bp->advertising);
7837 8767
7838 bnx2x_stop_stats(bp); 8768 bnx2x_stop_stats(bp);
7839 bnx2x_link_initialize(bp); 8769 bnx2x_link_initialize(bp);
@@ -7906,81 +8836,87 @@ static void bnx2x_self_test(struct net_device *dev,
7906static struct { 8836static struct {
7907 char string[ETH_GSTRING_LEN]; 8837 char string[ETH_GSTRING_LEN];
7908} bnx2x_stats_str_arr[BNX2X_NUM_STATS] = { 8838} bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
7909 { "rx_bytes"}, /* 0 */ 8839 { "rx_bytes"},
7910 { "rx_error_bytes"}, /* 1 */ 8840 { "rx_error_bytes"},
7911 { "tx_bytes"}, /* 2 */ 8841 { "tx_bytes"},
7912 { "tx_error_bytes"}, /* 3 */ 8842 { "tx_error_bytes"},
7913 { "rx_ucast_packets"}, /* 4 */ 8843 { "rx_ucast_packets"},
7914 { "rx_mcast_packets"}, /* 5 */ 8844 { "rx_mcast_packets"},
7915 { "rx_bcast_packets"}, /* 6 */ 8845 { "rx_bcast_packets"},
7916 { "tx_ucast_packets"}, /* 7 */ 8846 { "tx_ucast_packets"},
7917 { "tx_mcast_packets"}, /* 8 */ 8847 { "tx_mcast_packets"},
7918 { "tx_bcast_packets"}, /* 9 */ 8848 { "tx_bcast_packets"},
7919 { "tx_mac_errors"}, /* 10 */ 8849 { "tx_mac_errors"}, /* 10 */
7920 { "tx_carrier_errors"}, /* 11 */ 8850 { "tx_carrier_errors"},
7921 { "rx_crc_errors"}, /* 12 */ 8851 { "rx_crc_errors"},
7922 { "rx_align_errors"}, /* 13 */ 8852 { "rx_align_errors"},
7923 { "tx_single_collisions"}, /* 14 */ 8853 { "tx_single_collisions"},
7924 { "tx_multi_collisions"}, /* 15 */ 8854 { "tx_multi_collisions"},
7925 { "tx_deferred"}, /* 16 */ 8855 { "tx_deferred"},
7926 { "tx_excess_collisions"}, /* 17 */ 8856 { "tx_excess_collisions"},
7927 { "tx_late_collisions"}, /* 18 */ 8857 { "tx_late_collisions"},
7928 { "tx_total_collisions"}, /* 19 */ 8858 { "tx_total_collisions"},
7929 { "rx_fragments"}, /* 20 */ 8859 { "rx_fragments"}, /* 20 */
7930 { "rx_jabbers"}, /* 21 */ 8860 { "rx_jabbers"},
7931 { "rx_undersize_packets"}, /* 22 */ 8861 { "rx_undersize_packets"},
7932 { "rx_oversize_packets"}, /* 23 */ 8862 { "rx_oversize_packets"},
7933 { "rx_xon_frames"}, /* 24 */ 8863 { "rx_xon_frames"},
7934 { "rx_xoff_frames"}, /* 25 */ 8864 { "rx_xoff_frames"},
7935 { "tx_xon_frames"}, /* 26 */ 8865 { "tx_xon_frames"},
7936 { "tx_xoff_frames"}, /* 27 */ 8866 { "tx_xoff_frames"},
7937 { "rx_mac_ctrl_frames"}, /* 28 */ 8867 { "rx_mac_ctrl_frames"},
7938 { "rx_filtered_packets"}, /* 29 */ 8868 { "rx_filtered_packets"},
7939 { "rx_discards"}, /* 30 */ 8869 { "rx_discards"}, /* 30 */
8870 { "brb_discard"},
8871 { "brb_truncate"},
8872 { "xxoverflow"}
7940}; 8873};
7941 8874
7942#define STATS_OFFSET32(offset_name) \ 8875#define STATS_OFFSET32(offset_name) \
7943 (offsetof(struct bnx2x_eth_stats, offset_name) / 4) 8876 (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
7944 8877
7945static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = { 8878static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
7946 STATS_OFFSET32(total_bytes_received_hi), /* 0 */ 8879 STATS_OFFSET32(total_bytes_received_hi),
7947 STATS_OFFSET32(stat_IfHCInBadOctets_hi), /* 1 */ 8880 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7948 STATS_OFFSET32(total_bytes_transmitted_hi), /* 2 */ 8881 STATS_OFFSET32(total_bytes_transmitted_hi),
7949 STATS_OFFSET32(stat_IfHCOutBadOctets_hi), /* 3 */ 8882 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7950 STATS_OFFSET32(total_unicast_packets_received_hi), /* 4 */ 8883 STATS_OFFSET32(total_unicast_packets_received_hi),
7951 STATS_OFFSET32(total_multicast_packets_received_hi), /* 5 */ 8884 STATS_OFFSET32(total_multicast_packets_received_hi),
7952 STATS_OFFSET32(total_broadcast_packets_received_hi), /* 6 */ 8885 STATS_OFFSET32(total_broadcast_packets_received_hi),
7953 STATS_OFFSET32(total_unicast_packets_transmitted_hi), /* 7 */ 8886 STATS_OFFSET32(total_unicast_packets_transmitted_hi),
7954 STATS_OFFSET32(total_multicast_packets_transmitted_hi), /* 8 */ 8887 STATS_OFFSET32(total_multicast_packets_transmitted_hi),
7955 STATS_OFFSET32(total_broadcast_packets_transmitted_hi), /* 9 */ 8888 STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
7956 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */ 8889 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
7957 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors), /* 11 */ 8890 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7958 STATS_OFFSET32(crc_receive_errors), /* 12 */ 8891 STATS_OFFSET32(crc_receive_errors),
7959 STATS_OFFSET32(alignment_errors), /* 13 */ 8892 STATS_OFFSET32(alignment_errors),
7960 STATS_OFFSET32(single_collision_transmit_frames), /* 14 */ 8893 STATS_OFFSET32(single_collision_transmit_frames),
7961 STATS_OFFSET32(multiple_collision_transmit_frames), /* 15 */ 8894 STATS_OFFSET32(multiple_collision_transmit_frames),
7962 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions), /* 16 */ 8895 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7963 STATS_OFFSET32(excessive_collision_frames), /* 17 */ 8896 STATS_OFFSET32(excessive_collision_frames),
7964 STATS_OFFSET32(late_collision_frames), /* 18 */ 8897 STATS_OFFSET32(late_collision_frames),
7965 STATS_OFFSET32(number_of_bugs_found_in_stats_spec), /* 19 */ 8898 STATS_OFFSET32(number_of_bugs_found_in_stats_spec),
7966 STATS_OFFSET32(runt_packets_received), /* 20 */ 8899 STATS_OFFSET32(runt_packets_received), /* 20 */
7967 STATS_OFFSET32(jabber_packets_received), /* 21 */ 8900 STATS_OFFSET32(jabber_packets_received),
7968 STATS_OFFSET32(error_runt_packets_received), /* 22 */ 8901 STATS_OFFSET32(error_runt_packets_received),
7969 STATS_OFFSET32(error_jabber_packets_received), /* 23 */ 8902 STATS_OFFSET32(error_jabber_packets_received),
7970 STATS_OFFSET32(pause_xon_frames_received), /* 24 */ 8903 STATS_OFFSET32(pause_xon_frames_received),
7971 STATS_OFFSET32(pause_xoff_frames_received), /* 25 */ 8904 STATS_OFFSET32(pause_xoff_frames_received),
7972 STATS_OFFSET32(pause_xon_frames_transmitted), /* 26 */ 8905 STATS_OFFSET32(pause_xon_frames_transmitted),
7973 STATS_OFFSET32(pause_xoff_frames_transmitted), /* 27 */ 8906 STATS_OFFSET32(pause_xoff_frames_transmitted),
7974 STATS_OFFSET32(control_frames_received), /* 28 */ 8907 STATS_OFFSET32(control_frames_received),
7975 STATS_OFFSET32(mac_filter_discard), /* 29 */ 8908 STATS_OFFSET32(mac_filter_discard),
7976 STATS_OFFSET32(no_buff_discard), /* 30 */ 8909 STATS_OFFSET32(no_buff_discard), /* 30 */
8910 STATS_OFFSET32(brb_discard),
8911 STATS_OFFSET32(brb_truncate_discard),
8912 STATS_OFFSET32(xxoverflow_discard)
7977}; 8913};
7978 8914
7979static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = { 8915static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
7980 8, 0, 8, 0, 8, 8, 8, 8, 8, 8, 8916 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
7981 4, 0, 4, 4, 4, 4, 4, 4, 4, 4, 8917 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
7982 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8918 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
7983 4, 8919 4, 4, 4, 4
7984}; 8920};
7985 8921
7986static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 8922static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -8138,9 +9074,7 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
8138 * net_device service functions 9074 * net_device service functions
8139 */ 9075 */
8140 9076
8141/* Called with rtnl_lock from vlan functions and also netif_tx_lock 9077/* called with netif_tx_lock from set_multicast */
8142 * from set_multicast.
8143 */
8144static void bnx2x_set_rx_mode(struct net_device *dev) 9078static void bnx2x_set_rx_mode(struct net_device *dev)
8145{ 9079{
8146 struct bnx2x *bp = netdev_priv(dev); 9080 struct bnx2x *bp = netdev_priv(dev);
@@ -8314,7 +9248,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8314 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT); 9248 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
8315 tx_bd->general_data |= 1; /* header nbd */ 9249 tx_bd->general_data |= 1; /* header nbd */
8316 9250
8317 /* remeber the first bd of the packet */ 9251 /* remember the first bd of the packet */
8318 tx_buf->first_bd = bd_prod; 9252 tx_buf->first_bd = bd_prod;
8319 9253
8320 DP(NETIF_MSG_TX_QUEUED, 9254 DP(NETIF_MSG_TX_QUEUED,
@@ -8334,7 +9268,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8334 9268
8335 /* for now NS flag is not used in Linux */ 9269 /* for now NS flag is not used in Linux */
8336 pbd->global_data = (len | 9270 pbd->global_data = (len |
8337 ((skb->protocol == ETH_P_8021Q) << 9271 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
8338 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT)); 9272 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
8339 pbd->ip_hlen = ip_hdrlen(skb) / 2; 9273 pbd->ip_hlen = ip_hdrlen(skb) / 2;
8340 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen); 9274 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
@@ -8343,7 +9277,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8343 9277
8344 tx_bd->bd_flags.as_bitfield |= 9278 tx_bd->bd_flags.as_bitfield |=
8345 ETH_TX_BD_FLAGS_TCP_CSUM; 9279 ETH_TX_BD_FLAGS_TCP_CSUM;
8346 pbd->tcp_flags = htonl(tcp_flag_word(skb)) & 0xFFFF; 9280 pbd->tcp_flags = pbd_tcp_flags(skb);
8347 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2); 9281 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
8348 pbd->tcp_pseudo_csum = swab16(th->check); 9282 pbd->tcp_pseudo_csum = swab16(th->check);
8349 9283
@@ -8387,7 +9321,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8387 9321
8388 if (skb_shinfo(skb)->gso_size && 9322 if (skb_shinfo(skb)->gso_size &&
8389 (skb->len > (bp->dev->mtu + ETH_HLEN))) { 9323 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
8390 int hlen = 2 * le32_to_cpu(pbd->total_hlen); 9324 int hlen = 2 * le16_to_cpu(pbd->total_hlen);
8391 9325
8392 DP(NETIF_MSG_TX_QUEUED, 9326 DP(NETIF_MSG_TX_QUEUED,
8393 "TSO packet len %d hlen %d total len %d tso size %d\n", 9327 "TSO packet len %d hlen %d total len %d tso size %d\n",
@@ -8427,7 +9361,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8427 tx_bd->vlan = cpu_to_le16(pkt_prod); 9361 tx_bd->vlan = cpu_to_le16(pkt_prod);
8428 /* this marks the bd 9362 /* this marks the bd
8429 * as one that has no individual mapping 9363 * as one that has no individual mapping
8430 * the FW ignors this flag in a bd not maked start 9364 * the FW ignores this flag in a bd not marked start
8431 */ 9365 */
8432 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO; 9366 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
8433 DP(NETIF_MSG_TX_QUEUED, 9367 DP(NETIF_MSG_TX_QUEUED,
@@ -8504,9 +9438,11 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8504 9438
8505 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod); 9439 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
8506 9440
8507 fp->hw_tx_prods->bds_prod += cpu_to_le16(nbd); 9441 fp->hw_tx_prods->bds_prod =
9442 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
8508 mb(); /* FW restriction: must not reorder writing nbd and packets */ 9443 mb(); /* FW restriction: must not reorder writing nbd and packets */
8509 fp->hw_tx_prods->packets_prod += cpu_to_le32(1); 9444 fp->hw_tx_prods->packets_prod =
9445 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8510 DOORBELL(bp, fp_index, 0); 9446 DOORBELL(bp, fp_index, 0);
8511 9447
8512 mmiowb(); 9448 mmiowb();
@@ -8525,11 +9461,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8525 return NETDEV_TX_OK; 9461 return NETDEV_TX_OK;
8526} 9462}
8527 9463
8528static struct net_device_stats *bnx2x_get_stats(struct net_device *dev)
8529{
8530 return &dev->stats;
8531}
8532
8533/* Called with rtnl_lock */ 9464/* Called with rtnl_lock */
8534static int bnx2x_open(struct net_device *dev) 9465static int bnx2x_open(struct net_device *dev)
8535{ 9466{
@@ -8543,16 +9474,13 @@ static int bnx2x_open(struct net_device *dev)
8543/* Called with rtnl_lock */ 9474/* Called with rtnl_lock */
8544static int bnx2x_close(struct net_device *dev) 9475static int bnx2x_close(struct net_device *dev)
8545{ 9476{
8546 int rc;
8547 struct bnx2x *bp = netdev_priv(dev); 9477 struct bnx2x *bp = netdev_priv(dev);
8548 9478
8549 /* Unload the driver, release IRQs */ 9479 /* Unload the driver, release IRQs */
8550 rc = bnx2x_nic_unload(bp, 1); 9480 bnx2x_nic_unload(bp, 1);
8551 if (rc) { 9481
8552 BNX2X_ERR("bnx2x_nic_unload failed: %d\n", rc); 9482 if (!CHIP_REV_IS_SLOW(bp))
8553 return rc; 9483 bnx2x_set_power_state(bp, PCI_D3hot);
8554 }
8555 bnx2x_set_power_state(bp, PCI_D3hot);
8556 9484
8557 return 0; 9485 return 0;
8558} 9486}
@@ -8584,7 +9512,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8584 case SIOCGMIIPHY: 9512 case SIOCGMIIPHY:
8585 data->phy_id = bp->phy_addr; 9513 data->phy_id = bp->phy_addr;
8586 9514
8587 /* fallthru */ 9515 /* fallthrough */
8588 case SIOCGMIIREG: { 9516 case SIOCGMIIREG: {
8589 u32 mii_regval; 9517 u32 mii_regval;
8590 9518
@@ -8633,7 +9561,7 @@ static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
8633 return -EINVAL; 9561 return -EINVAL;
8634 9562
8635 /* This does not race with packet allocation 9563 /* This does not race with packet allocation
8636 * because the actuall alloc size is 9564 * because the actual alloc size is
8637 * only updated as part of load 9565 * only updated as part of load
8638 */ 9566 */
8639 dev->mtu = new_mtu; 9567 dev->mtu = new_mtu;
@@ -8666,7 +9594,7 @@ static void bnx2x_vlan_rx_register(struct net_device *dev,
8666 9594
8667 bp->vlgrp = vlgrp; 9595 bp->vlgrp = vlgrp;
8668 if (netif_running(dev)) 9596 if (netif_running(dev))
8669 bnx2x_set_rx_mode(dev); 9597 bnx2x_set_client_config(bp);
8670} 9598}
8671#endif 9599#endif
8672 9600
@@ -8695,14 +9623,18 @@ static void bnx2x_reset_task(struct work_struct *work)
8695 if (!netif_running(bp->dev)) 9623 if (!netif_running(bp->dev))
8696 return; 9624 return;
8697 9625
8698 bp->in_reset_task = 1; 9626 rtnl_lock();
8699 9627
8700 bnx2x_netif_stop(bp); 9628 if (bp->state != BNX2X_STATE_OPEN) {
9629 DP(NETIF_MSG_TX_ERR, "state is %x, returning\n", bp->state);
9630 goto reset_task_exit;
9631 }
8701 9632
8702 bnx2x_nic_unload(bp, 0); 9633 bnx2x_nic_unload(bp, 0);
8703 bnx2x_nic_load(bp, 0); 9634 bnx2x_nic_load(bp, 0);
8704 9635
8705 bp->in_reset_task = 0; 9636reset_task_exit:
9637 rtnl_unlock();
8706} 9638}
8707 9639
8708static int __devinit bnx2x_init_board(struct pci_dev *pdev, 9640static int __devinit bnx2x_init_board(struct pci_dev *pdev,
@@ -8783,8 +9715,6 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev,
8783 9715
8784 spin_lock_init(&bp->phy_lock); 9716 spin_lock_init(&bp->phy_lock);
8785 9717
8786 bp->in_reset_task = 0;
8787
8788 INIT_WORK(&bp->reset_task, bnx2x_reset_task); 9718 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8789 INIT_WORK(&bp->sp_task, bnx2x_sp_task); 9719 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
8790 9720
@@ -8813,7 +9743,7 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev,
8813 bnx2x_get_hwinfo(bp); 9743 bnx2x_get_hwinfo(bp);
8814 9744
8815 if (CHIP_REV(bp) == CHIP_REV_FPGA) { 9745 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
8816 printk(KERN_ERR PFX "FPGA detacted. MCP disabled," 9746 printk(KERN_ERR PFX "FPGA detected. MCP disabled,"
8817 " will only init first device\n"); 9747 " will only init first device\n");
8818 onefunc = 1; 9748 onefunc = 1;
8819 nomcp = 1; 9749 nomcp = 1;
@@ -8882,14 +9812,32 @@ err_out:
8882 return rc; 9812 return rc;
8883} 9813}
8884 9814
9815static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
9816{
9817 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9818
9819 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9820 return val;
9821}
9822
9823/* return value of 1=2.5GHz 2=5GHz */
9824static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
9825{
9826 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9827
9828 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
9829 return val;
9830}
9831
8885static int __devinit bnx2x_init_one(struct pci_dev *pdev, 9832static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8886 const struct pci_device_id *ent) 9833 const struct pci_device_id *ent)
8887{ 9834{
8888 static int version_printed; 9835 static int version_printed;
8889 struct net_device *dev = NULL; 9836 struct net_device *dev = NULL;
8890 struct bnx2x *bp; 9837 struct bnx2x *bp;
8891 int rc, i; 9838 int rc;
8892 int port = PCI_FUNC(pdev->devfn); 9839 int port = PCI_FUNC(pdev->devfn);
9840 DECLARE_MAC_BUF(mac);
8893 9841
8894 if (version_printed++ == 0) 9842 if (version_printed++ == 0)
8895 printk(KERN_INFO "%s", version); 9843 printk(KERN_INFO "%s", version);
@@ -8906,6 +9854,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8906 9854
8907 if (port && onefunc) { 9855 if (port && onefunc) {
8908 printk(KERN_ERR PFX "second function disabled. exiting\n"); 9856 printk(KERN_ERR PFX "second function disabled. exiting\n");
9857 free_netdev(dev);
8909 return 0; 9858 return 0;
8910 } 9859 }
8911 9860
@@ -8918,7 +9867,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8918 dev->hard_start_xmit = bnx2x_start_xmit; 9867 dev->hard_start_xmit = bnx2x_start_xmit;
8919 dev->watchdog_timeo = TX_TIMEOUT; 9868 dev->watchdog_timeo = TX_TIMEOUT;
8920 9869
8921 dev->get_stats = bnx2x_get_stats;
8922 dev->ethtool_ops = &bnx2x_ethtool_ops; 9870 dev->ethtool_ops = &bnx2x_ethtool_ops;
8923 dev->open = bnx2x_open; 9871 dev->open = bnx2x_open;
8924 dev->stop = bnx2x_close; 9872 dev->stop = bnx2x_close;
@@ -8944,7 +9892,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8944 9892
8945 rc = register_netdev(dev); 9893 rc = register_netdev(dev);
8946 if (rc) { 9894 if (rc) {
8947 printk(KERN_ERR PFX "Cannot register net device\n"); 9895 dev_err(&pdev->dev, "Cannot register net device\n");
8948 if (bp->regview) 9896 if (bp->regview)
8949 iounmap(bp->regview); 9897 iounmap(bp->regview);
8950 if (bp->doorbells) 9898 if (bp->doorbells)
@@ -8959,32 +9907,30 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8959 pci_set_drvdata(pdev, dev); 9907 pci_set_drvdata(pdev, dev);
8960 9908
8961 bp->name = board_info[ent->driver_data].name; 9909 bp->name = board_info[ent->driver_data].name;
8962 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz " 9910 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
8963 "found at mem %lx, IRQ %d, ", 9911 " IRQ %d, ", dev->name, bp->name,
8964 dev->name, bp->name,
8965 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', 9912 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8966 ((CHIP_ID(bp) & 0x0ff0) >> 4), 9913 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8967 ((bp->flags & PCIX_FLAG) ? "-X" : ""), 9914 bnx2x_get_pcie_width(bp),
8968 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"), 9915 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
8969 bp->bus_speed_mhz, 9916 dev->base_addr, bp->pdev->irq);
8970 dev->base_addr, 9917 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
8971 bp->pdev->irq);
8972
8973 printk("node addr ");
8974 for (i = 0; i < 6; i++)
8975 printk("%2.2x", dev->dev_addr[i]);
8976 printk("\n");
8977
8978 return 0; 9918 return 0;
8979} 9919}
8980 9920
8981static void __devexit bnx2x_remove_one(struct pci_dev *pdev) 9921static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
8982{ 9922{
8983 struct net_device *dev = pci_get_drvdata(pdev); 9923 struct net_device *dev = pci_get_drvdata(pdev);
8984 struct bnx2x *bp = netdev_priv(dev); 9924 struct bnx2x *bp;
9925
9926 if (!dev) {
9927 /* we get here if init_one() fails */
9928 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
9929 return;
9930 }
9931
9932 bp = netdev_priv(dev);
8985 9933
8986 flush_scheduled_work();
8987 /*tasklet_kill(&bp->sp_task);*/
8988 unregister_netdev(dev); 9934 unregister_netdev(dev);
8989 9935
8990 if (bp->regview) 9936 if (bp->regview)
@@ -9002,34 +9948,43 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9002static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) 9948static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
9003{ 9949{
9004 struct net_device *dev = pci_get_drvdata(pdev); 9950 struct net_device *dev = pci_get_drvdata(pdev);
9005 struct bnx2x *bp = netdev_priv(dev); 9951 struct bnx2x *bp;
9006 int rc; 9952
9953 if (!dev)
9954 return 0;
9007 9955
9008 if (!netif_running(dev)) 9956 if (!netif_running(dev))
9009 return 0; 9957 return 0;
9010 9958
9011 rc = bnx2x_nic_unload(bp, 0); 9959 bp = netdev_priv(dev);
9012 if (!rc) 9960
9013 return rc; 9961 bnx2x_nic_unload(bp, 0);
9014 9962
9015 netif_device_detach(dev); 9963 netif_device_detach(dev);
9016 pci_save_state(pdev);
9017 9964
9965 pci_save_state(pdev);
9018 bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); 9966 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
9967
9019 return 0; 9968 return 0;
9020} 9969}
9021 9970
9022static int bnx2x_resume(struct pci_dev *pdev) 9971static int bnx2x_resume(struct pci_dev *pdev)
9023{ 9972{
9024 struct net_device *dev = pci_get_drvdata(pdev); 9973 struct net_device *dev = pci_get_drvdata(pdev);
9025 struct bnx2x *bp = netdev_priv(dev); 9974 struct bnx2x *bp;
9026 int rc; 9975 int rc;
9027 9976
9977 if (!dev) {
9978 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
9979 return -ENODEV;
9980 }
9981
9028 if (!netif_running(dev)) 9982 if (!netif_running(dev))
9029 return 0; 9983 return 0;
9030 9984
9031 pci_restore_state(pdev); 9985 bp = netdev_priv(dev);
9032 9986
9987 pci_restore_state(pdev);
9033 bnx2x_set_power_state(bp, PCI_D0); 9988 bnx2x_set_power_state(bp, PCI_D0);
9034 netif_device_attach(dev); 9989 netif_device_attach(dev);
9035 9990