aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x_main.c
diff options
context:
space:
mode:
authorVladislav Zolotarov <vladz@broadcom.com>2010-04-18 21:13:12 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-19 16:17:05 -0400
commit72fd0718332e6514fb9db325e89ffc694bb31f6e (patch)
tree944305857677a520b4ac33732feb139b52c8af29 /drivers/net/bnx2x_main.c
parentfc6055a5ba31e2c14e36e8939f9bf2b6d586a7f5 (diff)
bnx2x: Parity errors handling for 57710 and 57711
This patch introduces the parity errors handling code for 57710 and 57711 chips. HW is configured to stop all DMA transactions to the host and sending packets to the network once parity error is detected, which is meant to prevent silent data corruption. At the same time HW generates the attention interrupt to every function of the device where parity has been detected so that driver can start the recovery flow. The recovery is actually resetting the chip and restarting the driver on all active functions of the chip where the parity error has been reported. Signed-off-by: Vladislav Zolotarov <vladz@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r--drivers/net/bnx2x_main.c1045
1 files changed, 992 insertions, 53 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 63a17d604a98..2b6717748eb8 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -764,6 +764,40 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
764 * General service functions 764 * General service functions
765 */ 765 */
766 766
767/* Return true if succeeded to acquire the lock */
768static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
769{
770 u32 lock_status;
771 u32 resource_bit = (1 << resource);
772 int func = BP_FUNC(bp);
773 u32 hw_lock_control_reg;
774
775 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
776
777 /* Validating that the resource is within range */
778 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
779 DP(NETIF_MSG_HW,
780 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
781 resource, HW_LOCK_MAX_RESOURCE_VALUE);
782 return -EINVAL;
783 }
784
785 if (func <= 5)
786 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
787 else
788 hw_lock_control_reg =
789 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
790
791 /* Try to acquire the lock */
792 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
793 lock_status = REG_RD(bp, hw_lock_control_reg);
794 if (lock_status & resource_bit)
795 return true;
796
797 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
798 return false;
799}
800
767static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, 801static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
768 u8 storm, u16 index, u8 op, u8 update) 802 u8 storm, u16 index, u8 op, u8 update)
769{ 803{
@@ -1901,6 +1935,8 @@ static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1901 int func = BP_FUNC(bp); 1935 int func = BP_FUNC(bp);
1902 u32 hw_lock_control_reg; 1936 u32 hw_lock_control_reg;
1903 1937
1938 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1939
1904 /* Validating that the resource is within range */ 1940 /* Validating that the resource is within range */
1905 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1941 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1906 DP(NETIF_MSG_HW, 1942 DP(NETIF_MSG_HW,
@@ -2741,12 +2777,11 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2741/* acquire split MCP access lock register */ 2777/* acquire split MCP access lock register */
2742static int bnx2x_acquire_alr(struct bnx2x *bp) 2778static int bnx2x_acquire_alr(struct bnx2x *bp)
2743{ 2779{
2744 u32 i, j, val; 2780 u32 j, val;
2745 int rc = 0; 2781 int rc = 0;
2746 2782
2747 might_sleep(); 2783 might_sleep();
2748 i = 100; 2784 for (j = 0; j < 1000; j++) {
2749 for (j = 0; j < i*10; j++) {
2750 val = (1UL << 31); 2785 val = (1UL << 31);
2751 REG_WR(bp, GRCBASE_MCP + 0x9c, val); 2786 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2752 val = REG_RD(bp, GRCBASE_MCP + 0x9c); 2787 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
@@ -2766,9 +2801,7 @@ static int bnx2x_acquire_alr(struct bnx2x *bp)
2766/* release split MCP access lock register */ 2801/* release split MCP access lock register */
2767static void bnx2x_release_alr(struct bnx2x *bp) 2802static void bnx2x_release_alr(struct bnx2x *bp)
2768{ 2803{
2769 u32 val = 0; 2804 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2770
2771 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2772} 2805}
2773 2806
2774static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 2807static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
@@ -2824,7 +2857,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2824 2857
2825 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 2858 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2826 aeu_mask, asserted); 2859 aeu_mask, asserted);
2827 aeu_mask &= ~(asserted & 0xff); 2860 aeu_mask &= ~(asserted & 0x3ff);
2828 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 2861 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2829 2862
2830 REG_WR(bp, aeu_addr, aeu_mask); 2863 REG_WR(bp, aeu_addr, aeu_mask);
@@ -3105,10 +3138,311 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3105 } 3138 }
3106} 3139}
3107 3140
3108static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 3141static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3142static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3143
3144
3145#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3146#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3147#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3148#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3149#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3150#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3151/*
3152 * should be run under rtnl lock
3153 */
3154static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3155{
3156 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3157 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3158 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3159 barrier();
3160 mmiowb();
3161}
3162
3163/*
3164 * should be run under rtnl lock
3165 */
3166static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3167{
3168 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3169 val |= (1 << 16);
3170 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3171 barrier();
3172 mmiowb();
3173}
3174
3175/*
3176 * should be run under rtnl lock
3177 */
3178static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3179{
3180 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3181 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3182 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3183}
3184
3185/*
3186 * should be run under rtnl lock
3187 */
3188static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3189{
3190 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3191
3192 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3193
3194 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3195 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3196 barrier();
3197 mmiowb();
3198}
3199
3200/*
3201 * should be run under rtnl lock
3202 */
3203static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3204{
3205 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3206
3207 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3208
3209 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3210 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3211 barrier();
3212 mmiowb();
3213
3214 return val1;
3215}
3216
3217/*
3218 * should be run under rtnl lock
3219 */
3220static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3221{
3222 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3223}
3224
3225static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3226{
3227 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3228 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3229}
3230
3231static inline void _print_next_block(int idx, const char *blk)
3232{
3233 if (idx)
3234 pr_cont(", ");
3235 pr_cont("%s", blk);
3236}
3237
3238static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3239{
3240 int i = 0;
3241 u32 cur_bit = 0;
3242 for (i = 0; sig; i++) {
3243 cur_bit = ((u32)0x1 << i);
3244 if (sig & cur_bit) {
3245 switch (cur_bit) {
3246 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3247 _print_next_block(par_num++, "BRB");
3248 break;
3249 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3250 _print_next_block(par_num++, "PARSER");
3251 break;
3252 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3253 _print_next_block(par_num++, "TSDM");
3254 break;
3255 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3256 _print_next_block(par_num++, "SEARCHER");
3257 break;
3258 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3259 _print_next_block(par_num++, "TSEMI");
3260 break;
3261 }
3262
3263 /* Clear the bit */
3264 sig &= ~cur_bit;
3265 }
3266 }
3267
3268 return par_num;
3269}
3270
3271static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3272{
3273 int i = 0;
3274 u32 cur_bit = 0;
3275 for (i = 0; sig; i++) {
3276 cur_bit = ((u32)0x1 << i);
3277 if (sig & cur_bit) {
3278 switch (cur_bit) {
3279 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3280 _print_next_block(par_num++, "PBCLIENT");
3281 break;
3282 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3283 _print_next_block(par_num++, "QM");
3284 break;
3285 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3286 _print_next_block(par_num++, "XSDM");
3287 break;
3288 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3289 _print_next_block(par_num++, "XSEMI");
3290 break;
3291 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3292 _print_next_block(par_num++, "DOORBELLQ");
3293 break;
3294 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3295 _print_next_block(par_num++, "VAUX PCI CORE");
3296 break;
3297 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3298 _print_next_block(par_num++, "DEBUG");
3299 break;
3300 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3301 _print_next_block(par_num++, "USDM");
3302 break;
3303 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3304 _print_next_block(par_num++, "USEMI");
3305 break;
3306 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3307 _print_next_block(par_num++, "UPB");
3308 break;
3309 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3310 _print_next_block(par_num++, "CSDM");
3311 break;
3312 }
3313
3314 /* Clear the bit */
3315 sig &= ~cur_bit;
3316 }
3317 }
3318
3319 return par_num;
3320}
3321
3322static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3323{
3324 int i = 0;
3325 u32 cur_bit = 0;
3326 for (i = 0; sig; i++) {
3327 cur_bit = ((u32)0x1 << i);
3328 if (sig & cur_bit) {
3329 switch (cur_bit) {
3330 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3331 _print_next_block(par_num++, "CSEMI");
3332 break;
3333 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3334 _print_next_block(par_num++, "PXP");
3335 break;
3336 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3337 _print_next_block(par_num++,
3338 "PXPPCICLOCKCLIENT");
3339 break;
3340 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3341 _print_next_block(par_num++, "CFC");
3342 break;
3343 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3344 _print_next_block(par_num++, "CDU");
3345 break;
3346 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3347 _print_next_block(par_num++, "IGU");
3348 break;
3349 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3350 _print_next_block(par_num++, "MISC");
3351 break;
3352 }
3353
3354 /* Clear the bit */
3355 sig &= ~cur_bit;
3356 }
3357 }
3358
3359 return par_num;
3360}
3361
3362static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3363{
3364 int i = 0;
3365 u32 cur_bit = 0;
3366 for (i = 0; sig; i++) {
3367 cur_bit = ((u32)0x1 << i);
3368 if (sig & cur_bit) {
3369 switch (cur_bit) {
3370 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3371 _print_next_block(par_num++, "MCP ROM");
3372 break;
3373 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3374 _print_next_block(par_num++, "MCP UMP RX");
3375 break;
3376 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3377 _print_next_block(par_num++, "MCP UMP TX");
3378 break;
3379 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3380 _print_next_block(par_num++, "MCP SCPAD");
3381 break;
3382 }
3383
3384 /* Clear the bit */
3385 sig &= ~cur_bit;
3386 }
3387 }
3388
3389 return par_num;
3390}
3391
3392static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3393 u32 sig2, u32 sig3)
3394{
3395 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3396 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3397 int par_num = 0;
3398 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3399 "[0]:0x%08x [1]:0x%08x "
3400 "[2]:0x%08x [3]:0x%08x\n",
3401 sig0 & HW_PRTY_ASSERT_SET_0,
3402 sig1 & HW_PRTY_ASSERT_SET_1,
3403 sig2 & HW_PRTY_ASSERT_SET_2,
3404 sig3 & HW_PRTY_ASSERT_SET_3);
3405 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3406 bp->dev->name);
3407 par_num = bnx2x_print_blocks_with_parity0(
3408 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3409 par_num = bnx2x_print_blocks_with_parity1(
3410 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3411 par_num = bnx2x_print_blocks_with_parity2(
3412 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3413 par_num = bnx2x_print_blocks_with_parity3(
3414 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3415 printk("\n");
3416 return true;
3417 } else
3418 return false;
3419}
3420
3421static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3109{ 3422{
3110 struct attn_route attn; 3423 struct attn_route attn;
3111 struct attn_route group_mask; 3424 int port = BP_PORT(bp);
3425
3426 attn.sig[0] = REG_RD(bp,
3427 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3428 port*4);
3429 attn.sig[1] = REG_RD(bp,
3430 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3431 port*4);
3432 attn.sig[2] = REG_RD(bp,
3433 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3434 port*4);
3435 attn.sig[3] = REG_RD(bp,
3436 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3437 port*4);
3438
3439 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3440 attn.sig[3]);
3441}
3442
3443static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3444{
3445 struct attn_route attn, *group_mask;
3112 int port = BP_PORT(bp); 3446 int port = BP_PORT(bp);
3113 int index; 3447 int index;
3114 u32 reg_addr; 3448 u32 reg_addr;
@@ -3119,6 +3453,19 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3119 try to handle this event */ 3453 try to handle this event */
3120 bnx2x_acquire_alr(bp); 3454 bnx2x_acquire_alr(bp);
3121 3455
3456 if (bnx2x_chk_parity_attn(bp)) {
3457 bp->recovery_state = BNX2X_RECOVERY_INIT;
3458 bnx2x_set_reset_in_progress(bp);
3459 schedule_delayed_work(&bp->reset_task, 0);
3460 /* Disable HW interrupts */
3461 bnx2x_int_disable(bp);
3462 bnx2x_release_alr(bp);
3463 /* In case of parity errors don't handle attentions so that
3464 * other function would "see" parity errors.
3465 */
3466 return;
3467 }
3468
3122 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 3469 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3123 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 3470 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3124 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 3471 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
@@ -3128,28 +3475,20 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3128 3475
3129 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 3476 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3130 if (deasserted & (1 << index)) { 3477 if (deasserted & (1 << index)) {
3131 group_mask = bp->attn_group[index]; 3478 group_mask = &bp->attn_group[index];
3132 3479
3133 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n", 3480 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3134 index, group_mask.sig[0], group_mask.sig[1], 3481 index, group_mask->sig[0], group_mask->sig[1],
3135 group_mask.sig[2], group_mask.sig[3]); 3482 group_mask->sig[2], group_mask->sig[3]);
3136 3483
3137 bnx2x_attn_int_deasserted3(bp, 3484 bnx2x_attn_int_deasserted3(bp,
3138 attn.sig[3] & group_mask.sig[3]); 3485 attn.sig[3] & group_mask->sig[3]);
3139 bnx2x_attn_int_deasserted1(bp, 3486 bnx2x_attn_int_deasserted1(bp,
3140 attn.sig[1] & group_mask.sig[1]); 3487 attn.sig[1] & group_mask->sig[1]);
3141 bnx2x_attn_int_deasserted2(bp, 3488 bnx2x_attn_int_deasserted2(bp,
3142 attn.sig[2] & group_mask.sig[2]); 3489 attn.sig[2] & group_mask->sig[2]);
3143 bnx2x_attn_int_deasserted0(bp, 3490 bnx2x_attn_int_deasserted0(bp,
3144 attn.sig[0] & group_mask.sig[0]); 3491 attn.sig[0] & group_mask->sig[0]);
3145
3146 if ((attn.sig[0] & group_mask.sig[0] &
3147 HW_PRTY_ASSERT_SET_0) ||
3148 (attn.sig[1] & group_mask.sig[1] &
3149 HW_PRTY_ASSERT_SET_1) ||
3150 (attn.sig[2] & group_mask.sig[2] &
3151 HW_PRTY_ASSERT_SET_2))
3152 BNX2X_ERR("FATAL HW block parity attention\n");
3153 } 3492 }
3154 } 3493 }
3155 3494
@@ -3173,7 +3512,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3173 3512
3174 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", 3513 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3175 aeu_mask, deasserted); 3514 aeu_mask, deasserted);
3176 aeu_mask |= (deasserted & 0xff); 3515 aeu_mask |= (deasserted & 0x3ff);
3177 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 3516 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3178 3517
3179 REG_WR(bp, reg_addr, aeu_mask); 3518 REG_WR(bp, reg_addr, aeu_mask);
@@ -5963,6 +6302,50 @@ static void enable_blocks_attention(struct bnx2x *bp)
5963 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ 6302 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5964} 6303}
5965 6304
6305static const struct {
6306 u32 addr;
6307 u32 mask;
6308} bnx2x_parity_mask[] = {
6309 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6310 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6311 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6312 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6313 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6314 {QM_REG_QM_PRTY_MASK, 0x0},
6315 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6316 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6317 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6318 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6319 {CDU_REG_CDU_PRTY_MASK, 0x0},
6320 {CFC_REG_CFC_PRTY_MASK, 0x0},
6321 {DBG_REG_DBG_PRTY_MASK, 0x0},
6322 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6323 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6324 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6325 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6326 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6327 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6328 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6329 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6330 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6331 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6332 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6333 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6334 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6335 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6336 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6337};
6338
6339static void enable_blocks_parity(struct bnx2x *bp)
6340{
6341 int i, mask_arr_len =
6342 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6343
6344 for (i = 0; i < mask_arr_len; i++)
6345 REG_WR(bp, bnx2x_parity_mask[i].addr,
6346 bnx2x_parity_mask[i].mask);
6347}
6348
5966 6349
5967static void bnx2x_reset_common(struct bnx2x *bp) 6350static void bnx2x_reset_common(struct bnx2x *bp)
5968{ 6351{
@@ -6306,6 +6689,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
6306 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 6689 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6307 6690
6308 enable_blocks_attention(bp); 6691 enable_blocks_attention(bp);
6692 if (CHIP_PARITY_SUPPORTED(bp))
6693 enable_blocks_parity(bp);
6309 6694
6310 if (!BP_NOMCP(bp)) { 6695 if (!BP_NOMCP(bp)) {
6311 bnx2x_acquire_phy_lock(bp); 6696 bnx2x_acquire_phy_lock(bp);
@@ -7657,6 +8042,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7657 if (bp->state == BNX2X_STATE_OPEN) 8042 if (bp->state == BNX2X_STATE_OPEN)
7658 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); 8043 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7659#endif 8044#endif
8045 bnx2x_inc_load_cnt(bp);
7660 8046
7661 return 0; 8047 return 0;
7662 8048
@@ -7844,33 +8230,12 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7844 } 8230 }
7845} 8231}
7846 8232
7847/* must be called with rtnl_lock */ 8233static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7848static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7849{ 8234{
7850 int port = BP_PORT(bp); 8235 int port = BP_PORT(bp);
7851 u32 reset_code = 0; 8236 u32 reset_code = 0;
7852 int i, cnt, rc; 8237 int i, cnt, rc;
7853 8238
7854#ifdef BCM_CNIC
7855 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7856#endif
7857 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7858
7859 /* Set "drop all" */
7860 bp->rx_mode = BNX2X_RX_MODE_NONE;
7861 bnx2x_set_storm_rx_mode(bp);
7862
7863 /* Disable HW interrupts, NAPI and Tx */
7864 bnx2x_netif_stop(bp, 1);
7865
7866 del_timer_sync(&bp->timer);
7867 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7868 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7869 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7870
7871 /* Release IRQs */
7872 bnx2x_free_irq(bp, false);
7873
7874 /* Wait until tx fastpath tasks complete */ 8239 /* Wait until tx fastpath tasks complete */
7875 for_each_queue(bp, i) { 8240 for_each_queue(bp, i) {
7876 struct bnx2x_fastpath *fp = &bp->fp[i]; 8241 struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -8011,6 +8376,69 @@ unload_error:
8011 if (!BP_NOMCP(bp)) 8376 if (!BP_NOMCP(bp))
8012 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 8377 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8013 8378
8379}
8380
8381static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8382{
8383 u32 val;
8384
8385 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8386
8387 if (CHIP_IS_E1(bp)) {
8388 int port = BP_PORT(bp);
8389 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8390 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8391
8392 val = REG_RD(bp, addr);
8393 val &= ~(0x300);
8394 REG_WR(bp, addr, val);
8395 } else if (CHIP_IS_E1H(bp)) {
8396 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8397 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8398 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8399 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8400 }
8401}
8402
8403/* must be called with rtnl_lock */
8404static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8405{
8406 int i;
8407
8408 if (bp->state == BNX2X_STATE_CLOSED) {
8409 /* Interface has been removed - nothing to recover */
8410 bp->recovery_state = BNX2X_RECOVERY_DONE;
8411 bp->is_leader = 0;
8412 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8413 smp_wmb();
8414
8415 return -EINVAL;
8416 }
8417
8418#ifdef BCM_CNIC
8419 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8420#endif
8421 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8422
8423 /* Set "drop all" */
8424 bp->rx_mode = BNX2X_RX_MODE_NONE;
8425 bnx2x_set_storm_rx_mode(bp);
8426
8427 /* Disable HW interrupts, NAPI and Tx */
8428 bnx2x_netif_stop(bp, 1);
8429
8430 del_timer_sync(&bp->timer);
8431 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8432 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8433 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8434
8435 /* Release IRQs */
8436 bnx2x_free_irq(bp, false);
8437
8438 /* Cleanup the chip if needed */
8439 if (unload_mode != UNLOAD_RECOVERY)
8440 bnx2x_chip_cleanup(bp, unload_mode);
8441
8014 bp->port.pmf = 0; 8442 bp->port.pmf = 0;
8015 8443
8016 /* Free SKBs, SGEs, TPA pool and driver internals */ 8444 /* Free SKBs, SGEs, TPA pool and driver internals */
@@ -8025,17 +8453,448 @@ unload_error:
8025 8453
8026 netif_carrier_off(bp->dev); 8454 netif_carrier_off(bp->dev);
8027 8455
8456 /* The last driver must disable a "close the gate" if there is no
8457 * parity attention or "process kill" pending.
8458 */
8459 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8460 bnx2x_reset_is_done(bp))
8461 bnx2x_disable_close_the_gate(bp);
8462
8463 /* Reset MCP mail box sequence if there is on going recovery */
8464 if (unload_mode == UNLOAD_RECOVERY)
8465 bp->fw_seq = 0;
8466
8467 return 0;
8468}
8469
8470/* Close gates #2, #3 and #4: */
8471static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8472{
8473 u32 val, addr;
8474
8475 /* Gates #2 and #4a are closed/opened for "not E1" only */
8476 if (!CHIP_IS_E1(bp)) {
8477 /* #4 */
8478 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8479 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8480 close ? (val | 0x1) : (val & (~(u32)1)));
8481 /* #2 */
8482 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8483 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8484 close ? (val | 0x1) : (val & (~(u32)1)));
8485 }
8486
8487 /* #3 */
8488 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8489 val = REG_RD(bp, addr);
8490 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8491
8492 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8493 close ? "closing" : "opening");
8494 mmiowb();
8495}
8496
8497#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8498
8499static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8500{
8501 /* Do some magic... */
8502 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8503 *magic_val = val & SHARED_MF_CLP_MAGIC;
8504 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8505}
8506
8507/* Restore the value of the `magic' bit.
8508 *
8509 * @param pdev Device handle.
8510 * @param magic_val Old value of the `magic' bit.
8511 */
8512static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8513{
8514 /* Restore the `magic' bit value... */
8515 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8516 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8517 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8518 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8519 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8520 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8521}
8522
8523/* Prepares for MCP reset: takes care of CLP configurations.
8524 *
8525 * @param bp
8526 * @param magic_val Old value of 'magic' bit.
8527 */
8528static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8529{
8530 u32 shmem;
8531 u32 validity_offset;
8532
8533 DP(NETIF_MSG_HW, "Starting\n");
8534
8535 /* Set `magic' bit in order to save MF config */
8536 if (!CHIP_IS_E1(bp))
8537 bnx2x_clp_reset_prep(bp, magic_val);
8538
8539 /* Get shmem offset */
8540 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8541 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8542
8543 /* Clear validity map flags */
8544 if (shmem > 0)
8545 REG_WR(bp, shmem + validity_offset, 0);
8546}
8547
8548#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8549#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8550
8551/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8552 * depending on the HW type.
8553 *
8554 * @param bp
8555 */
8556static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8557{
8558 /* special handling for emulation and FPGA,
8559 wait 10 times longer */
8560 if (CHIP_REV_IS_SLOW(bp))
8561 msleep(MCP_ONE_TIMEOUT*10);
8562 else
8563 msleep(MCP_ONE_TIMEOUT);
8564}
8565
8566static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8567{
8568 u32 shmem, cnt, validity_offset, val;
8569 int rc = 0;
8570
8571 msleep(100);
8572
8573 /* Get shmem offset */
8574 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8575 if (shmem == 0) {
8576 BNX2X_ERR("Shmem 0 return failure\n");
8577 rc = -ENOTTY;
8578 goto exit_lbl;
8579 }
8580
8581 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8582
8583 /* Wait for MCP to come up */
8584 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8585 /* TBD: its best to check validity map of last port.
8586 * currently checks on port 0.
8587 */
8588 val = REG_RD(bp, shmem + validity_offset);
8589 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8590 shmem + validity_offset, val);
8591
8592 /* check that shared memory is valid. */
8593 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8594 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8595 break;
8596
8597 bnx2x_mcp_wait_one(bp);
8598 }
8599
8600 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8601
8602 /* Check that shared memory is valid. This indicates that MCP is up. */
8603 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8604 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8605 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8606 rc = -ENOTTY;
8607 goto exit_lbl;
8608 }
8609
8610exit_lbl:
8611 /* Restore the `magic' bit value */
8612 if (!CHIP_IS_E1(bp))
8613 bnx2x_clp_reset_done(bp, magic_val);
8614
8615 return rc;
8616}
8617
8618static void bnx2x_pxp_prep(struct bnx2x *bp)
8619{
8620 if (!CHIP_IS_E1(bp)) {
8621 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8622 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8623 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8624 mmiowb();
8625 }
8626}
8627
8628/*
8629 * Reset the whole chip except for:
8630 * - PCIE core
8631 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8632 * one reset bit)
8633 * - IGU
8634 * - MISC (including AEU)
8635 * - GRC
8636 * - RBCN, RBCP
8637 */
8638static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8639{
8640 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8641
8642 not_reset_mask1 =
8643 MISC_REGISTERS_RESET_REG_1_RST_HC |
8644 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8645 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8646
8647 not_reset_mask2 =
8648 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8649 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8650 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8651 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8652 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8653 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8654 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8655 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8656
8657 reset_mask1 = 0xffffffff;
8658
8659 if (CHIP_IS_E1(bp))
8660 reset_mask2 = 0xffff;
8661 else
8662 reset_mask2 = 0x1ffff;
8663
8664 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8665 reset_mask1 & (~not_reset_mask1));
8666 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8667 reset_mask2 & (~not_reset_mask2));
8668
8669 barrier();
8670 mmiowb();
8671
8672 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8673 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8674 mmiowb();
8675}
8676
8677static int bnx2x_process_kill(struct bnx2x *bp)
8678{
8679 int cnt = 1000;
8680 u32 val = 0;
8681 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8682
8683
8684 /* Empty the Tetris buffer, wait for 1s */
8685 do {
8686 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8687 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8688 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8689 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8690 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8691 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8692 ((port_is_idle_0 & 0x1) == 0x1) &&
8693 ((port_is_idle_1 & 0x1) == 0x1) &&
8694 (pgl_exp_rom2 == 0xffffffff))
8695 break;
8696 msleep(1);
8697 } while (cnt-- > 0);
8698
8699 if (cnt <= 0) {
8700 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8701 " are still"
8702 " outstanding read requests after 1s!\n");
8703 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8704 " port_is_idle_0=0x%08x,"
8705 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8706 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8707 pgl_exp_rom2);
8708 return -EAGAIN;
8709 }
8710
8711 barrier();
8712
8713 /* Close gates #2, #3 and #4 */
8714 bnx2x_set_234_gates(bp, true);
8715
8716 /* TBD: Indicate that "process kill" is in progress to MCP */
8717
8718 /* Clear "unprepared" bit */
8719 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8720 barrier();
8721
8722 /* Make sure all is written to the chip before the reset */
8723 mmiowb();
8724
8725 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8726 * PSWHST, GRC and PSWRD Tetris buffer.
8727 */
8728 msleep(1);
8729
8730 /* Prepare to chip reset: */
8731 /* MCP */
8732 bnx2x_reset_mcp_prep(bp, &val);
8733
8734 /* PXP */
8735 bnx2x_pxp_prep(bp);
8736 barrier();
8737
8738 /* reset the chip */
8739 bnx2x_process_kill_chip_reset(bp);
8740 barrier();
8741
8742 /* Recover after reset: */
8743 /* MCP */
8744 if (bnx2x_reset_mcp_comp(bp, val))
8745 return -EAGAIN;
8746
8747 /* PXP */
8748 bnx2x_pxp_prep(bp);
8749
8750 /* Open the gates #2, #3 and #4 */
8751 bnx2x_set_234_gates(bp, false);
8752
8753 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8754 * reset state, re-enable attentions. */
8755
8028 return 0; 8756 return 0;
8029} 8757}
8030 8758
8759static int bnx2x_leader_reset(struct bnx2x *bp)
8760{
8761 int rc = 0;
8762 /* Try to recover after the failure */
8763 if (bnx2x_process_kill(bp)) {
8764 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8765 bp->dev->name);
8766 rc = -EAGAIN;
8767 goto exit_leader_reset;
8768 }
8769
8770 /* Clear "reset is in progress" bit and update the driver state */
8771 bnx2x_set_reset_done(bp);
8772 bp->recovery_state = BNX2X_RECOVERY_DONE;
8773
8774exit_leader_reset:
8775 bp->is_leader = 0;
8776 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8777 smp_wmb();
8778 return rc;
8779}
8780
8781static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8782
8783/* Assumption: runs under rtnl lock. This together with the fact
8784 * that it's called only from bnx2x_reset_task() ensure that it
8785 * will never be called when netif_running(bp->dev) is false.
8786 */
8787static void bnx2x_parity_recover(struct bnx2x *bp)
8788{
8789 DP(NETIF_MSG_HW, "Handling parity\n");
8790 while (1) {
8791 switch (bp->recovery_state) {
8792 case BNX2X_RECOVERY_INIT:
8793 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8794 /* Try to get a LEADER_LOCK HW lock */
8795 if (bnx2x_trylock_hw_lock(bp,
8796 HW_LOCK_RESOURCE_RESERVED_08))
8797 bp->is_leader = 1;
8798
8799 /* Stop the driver */
8800 /* If interface has been removed - break */
8801 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8802 return;
8803
8804 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8805 /* Ensure "is_leader" and "recovery_state"
8806 * update values are seen on other CPUs
8807 */
8808 smp_wmb();
8809 break;
8810
8811 case BNX2X_RECOVERY_WAIT:
8812 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8813 if (bp->is_leader) {
8814 u32 load_counter = bnx2x_get_load_cnt(bp);
8815 if (load_counter) {
8816 /* Wait until all other functions get
8817 * down.
8818 */
8819 schedule_delayed_work(&bp->reset_task,
8820 HZ/10);
8821 return;
8822 } else {
8823 /* If all other functions got down -
8824 * try to bring the chip back to
8825 * normal. In any case it's an exit
8826 * point for a leader.
8827 */
8828 if (bnx2x_leader_reset(bp) ||
8829 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8830 printk(KERN_ERR"%s: Recovery "
8831 "has failed. Power cycle is "
8832 "needed.\n", bp->dev->name);
8833 /* Disconnect this device */
8834 netif_device_detach(bp->dev);
8835 /* Block ifup for all function
8836 * of this ASIC until
8837 * "process kill" or power
8838 * cycle.
8839 */
8840 bnx2x_set_reset_in_progress(bp);
8841 /* Shut down the power */
8842 bnx2x_set_power_state(bp,
8843 PCI_D3hot);
8844 return;
8845 }
8846
8847 return;
8848 }
8849 } else { /* non-leader */
8850 if (!bnx2x_reset_is_done(bp)) {
8851 /* Try to get a LEADER_LOCK HW lock as
8852 * long as a former leader may have
8853 * been unloaded by the user or
8854 * released a leadership by another
8855 * reason.
8856 */
8857 if (bnx2x_trylock_hw_lock(bp,
8858 HW_LOCK_RESOURCE_RESERVED_08)) {
8859 /* I'm a leader now! Restart a
8860 * switch case.
8861 */
8862 bp->is_leader = 1;
8863 break;
8864 }
8865
8866 schedule_delayed_work(&bp->reset_task,
8867 HZ/10);
8868 return;
8869
8870 } else { /* A leader has completed
8871 * the "process kill". It's an exit
8872 * point for a non-leader.
8873 */
8874 bnx2x_nic_load(bp, LOAD_NORMAL);
8875 bp->recovery_state =
8876 BNX2X_RECOVERY_DONE;
8877 smp_wmb();
8878 return;
8879 }
8880 }
8881 default:
8882 return;
8883 }
8884 }
8885}
8886
8887/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8888 * scheduled on a general queue in order to prevent a dead lock.
8889 */
8031static void bnx2x_reset_task(struct work_struct *work) 8890static void bnx2x_reset_task(struct work_struct *work)
8032{ 8891{
8033 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task); 8892 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8034 8893
8035#ifdef BNX2X_STOP_ON_ERROR 8894#ifdef BNX2X_STOP_ON_ERROR
8036 BNX2X_ERR("reset task called but STOP_ON_ERROR defined" 8895 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8037 " so reset not done to allow debug dump,\n" 8896 " so reset not done to allow debug dump,\n"
8038 " you will need to reboot when done\n"); 8897 KERN_ERR " you will need to reboot when done\n");
8039 return; 8898 return;
8040#endif 8899#endif
8041 8900
@@ -8044,8 +8903,12 @@ static void bnx2x_reset_task(struct work_struct *work)
8044 if (!netif_running(bp->dev)) 8903 if (!netif_running(bp->dev))
8045 goto reset_task_exit; 8904 goto reset_task_exit;
8046 8905
8047 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 8906 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8048 bnx2x_nic_load(bp, LOAD_NORMAL); 8907 bnx2x_parity_recover(bp);
8908 else {
8909 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8910 bnx2x_nic_load(bp, LOAD_NORMAL);
8911 }
8049 8912
8050reset_task_exit: 8913reset_task_exit:
8051 rtnl_unlock(); 8914 rtnl_unlock();
@@ -8913,7 +9776,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8913#endif 9776#endif
8914 9777
8915 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 9778 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8916 INIT_WORK(&bp->reset_task, bnx2x_reset_task); 9779 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8917 9780
8918 rc = bnx2x_get_hwinfo(bp); 9781 rc = bnx2x_get_hwinfo(bp);
8919 9782
@@ -9888,6 +10751,11 @@ static int bnx2x_set_ringparam(struct net_device *dev,
9888 struct bnx2x *bp = netdev_priv(dev); 10751 struct bnx2x *bp = netdev_priv(dev);
9889 int rc = 0; 10752 int rc = 0;
9890 10753
10754 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10755 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10756 return -EAGAIN;
10757 }
10758
9891 if ((ering->rx_pending > MAX_RX_AVAIL) || 10759 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9892 (ering->tx_pending > MAX_TX_AVAIL) || 10760 (ering->tx_pending > MAX_TX_AVAIL) ||
9893 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) 10761 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
@@ -9973,6 +10841,11 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
9973 int changed = 0; 10841 int changed = 0;
9974 int rc = 0; 10842 int rc = 0;
9975 10843
10844 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10845 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10846 return -EAGAIN;
10847 }
10848
9976 /* TPA requires Rx CSUM offloading */ 10849 /* TPA requires Rx CSUM offloading */
9977 if ((data & ETH_FLAG_LRO) && bp->rx_csum) { 10850 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9978 if (!disable_tpa) { 10851 if (!disable_tpa) {
@@ -10009,6 +10882,11 @@ static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10009 struct bnx2x *bp = netdev_priv(dev); 10882 struct bnx2x *bp = netdev_priv(dev);
10010 int rc = 0; 10883 int rc = 0;
10011 10884
10885 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10886 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10887 return -EAGAIN;
10888 }
10889
10012 bp->rx_csum = data; 10890 bp->rx_csum = data;
10013 10891
10014 /* Disable TPA, when Rx CSUM is disabled. Otherwise all 10892 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
@@ -10471,6 +11349,12 @@ static void bnx2x_self_test(struct net_device *dev,
10471{ 11349{
10472 struct bnx2x *bp = netdev_priv(dev); 11350 struct bnx2x *bp = netdev_priv(dev);
10473 11351
11352 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11353 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11354 etest->flags |= ETH_TEST_FL_FAILED;
11355 return;
11356 }
11357
10474 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); 11358 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10475 11359
10476 if (!netif_running(dev)) 11360 if (!netif_running(dev))
@@ -11456,6 +12340,40 @@ static int bnx2x_open(struct net_device *dev)
11456 12340
11457 bnx2x_set_power_state(bp, PCI_D0); 12341 bnx2x_set_power_state(bp, PCI_D0);
11458 12342
12343 if (!bnx2x_reset_is_done(bp)) {
12344 do {
12345 /* Reset MCP mail box sequence if there is on going
12346 * recovery
12347 */
12348 bp->fw_seq = 0;
12349
12350 /* If it's the first function to load and reset done
12351 * is still not cleared it may mean that. We don't
12352 * check the attention state here because it may have
12353 * already been cleared by a "common" reset but we
12354 * shell proceed with "process kill" anyway.
12355 */
12356 if ((bnx2x_get_load_cnt(bp) == 0) &&
12357 bnx2x_trylock_hw_lock(bp,
12358 HW_LOCK_RESOURCE_RESERVED_08) &&
12359 (!bnx2x_leader_reset(bp))) {
12360 DP(NETIF_MSG_HW, "Recovered in open\n");
12361 break;
12362 }
12363
12364 bnx2x_set_power_state(bp, PCI_D3hot);
12365
12366 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12367 " completed yet. Try again later. If u still see this"
12368 " message after a few retries then power cycle is"
12369 " required.\n", bp->dev->name);
12370
12371 return -EAGAIN;
12372 } while (0);
12373 }
12374
12375 bp->recovery_state = BNX2X_RECOVERY_DONE;
12376
11459 return bnx2x_nic_load(bp, LOAD_OPEN); 12377 return bnx2x_nic_load(bp, LOAD_OPEN);
11460} 12378}
11461 12379
@@ -11694,6 +12612,11 @@ static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11694 struct bnx2x *bp = netdev_priv(dev); 12612 struct bnx2x *bp = netdev_priv(dev);
11695 int rc = 0; 12613 int rc = 0;
11696 12614
12615 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12616 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12617 return -EAGAIN;
12618 }
12619
11697 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || 12620 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11698 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) 12621 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11699 return -EINVAL; 12622 return -EINVAL;
@@ -11721,7 +12644,7 @@ static void bnx2x_tx_timeout(struct net_device *dev)
11721 bnx2x_panic(); 12644 bnx2x_panic();
11722#endif 12645#endif
11723 /* This allows the netif to be shutdown gracefully before resetting */ 12646 /* This allows the netif to be shutdown gracefully before resetting */
11724 schedule_work(&bp->reset_task); 12647 schedule_delayed_work(&bp->reset_task, 0);
11725} 12648}
11726 12649
11727#ifdef BCM_VLAN 12650#ifdef BCM_VLAN
@@ -11880,6 +12803,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11880 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); 12803 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11881 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); 12804 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11882 12805
12806 /* Reset the load counter */
12807 bnx2x_clear_load_cnt(bp);
12808
11883 dev->watchdog_timeo = TX_TIMEOUT; 12809 dev->watchdog_timeo = TX_TIMEOUT;
11884 12810
11885 dev->netdev_ops = &bnx2x_netdev_ops; 12811 dev->netdev_ops = &bnx2x_netdev_ops;
@@ -12205,6 +13131,9 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12205 13131
12206 unregister_netdev(dev); 13132 unregister_netdev(dev);
12207 13133
13134 /* Make sure RESET task is not scheduled before continuing */
13135 cancel_delayed_work_sync(&bp->reset_task);
13136
12208 kfree(bp->init_ops_offsets); 13137 kfree(bp->init_ops_offsets);
12209 kfree(bp->init_ops); 13138 kfree(bp->init_ops);
12210 kfree(bp->init_data); 13139 kfree(bp->init_data);
@@ -12268,6 +13197,11 @@ static int bnx2x_resume(struct pci_dev *pdev)
12268 } 13197 }
12269 bp = netdev_priv(dev); 13198 bp = netdev_priv(dev);
12270 13199
13200 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13201 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13202 return -EAGAIN;
13203 }
13204
12271 rtnl_lock(); 13205 rtnl_lock();
12272 13206
12273 pci_restore_state(pdev); 13207 pci_restore_state(pdev);
@@ -12434,6 +13368,11 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
12434 struct net_device *dev = pci_get_drvdata(pdev); 13368 struct net_device *dev = pci_get_drvdata(pdev);
12435 struct bnx2x *bp = netdev_priv(dev); 13369 struct bnx2x *bp = netdev_priv(dev);
12436 13370
13371 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13372 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13373 return;
13374 }
13375
12437 rtnl_lock(); 13376 rtnl_lock();
12438 13377
12439 bnx2x_eeh_recover(bp); 13378 bnx2x_eeh_recover(bp);