diff options
-rw-r--r-- | drivers/net/bnx2x.h | 23 | ||||
-rw-r--r-- | drivers/net/bnx2x_main.c | 1045 | ||||
-rw-r--r-- | drivers/net/bnx2x_reg.h | 27 |
3 files changed, 1039 insertions, 56 deletions
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h index ae9c89ebcc8b..ccef13b41dfa 100644 --- a/drivers/net/bnx2x.h +++ b/drivers/net/bnx2x.h | |||
@@ -155,9 +155,15 @@ do { \ | |||
155 | #define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field)) | 155 | #define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field)) |
156 | #define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) | 156 | #define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) |
157 | 157 | ||
158 | #define MF_CFG_RD(bp, field) SHMEM_RD(bp, mf_cfg.field) | ||
159 | #define MF_CFG_WR(bp, field, val) SHMEM_WR(bp, mf_cfg.field, val) | ||
160 | |||
158 | #define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) | 161 | #define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) |
159 | #define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) | 162 | #define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) |
160 | 163 | ||
164 | #define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ | ||
165 | AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR | ||
166 | |||
161 | 167 | ||
162 | /* fast path */ | 168 | /* fast path */ |
163 | 169 | ||
@@ -818,6 +824,12 @@ struct attn_route { | |||
818 | u32 sig[4]; | 824 | u32 sig[4]; |
819 | }; | 825 | }; |
820 | 826 | ||
827 | typedef enum { | ||
828 | BNX2X_RECOVERY_DONE, | ||
829 | BNX2X_RECOVERY_INIT, | ||
830 | BNX2X_RECOVERY_WAIT, | ||
831 | } bnx2x_recovery_state_t; | ||
832 | |||
821 | struct bnx2x { | 833 | struct bnx2x { |
822 | /* Fields used in the tx and intr/napi performance paths | 834 | /* Fields used in the tx and intr/napi performance paths |
823 | * are grouped together in the beginning of the structure | 835 | * are grouped together in the beginning of the structure |
@@ -835,6 +847,9 @@ struct bnx2x { | |||
835 | struct pci_dev *pdev; | 847 | struct pci_dev *pdev; |
836 | 848 | ||
837 | atomic_t intr_sem; | 849 | atomic_t intr_sem; |
850 | |||
851 | bnx2x_recovery_state_t recovery_state; | ||
852 | int is_leader; | ||
838 | #ifdef BCM_CNIC | 853 | #ifdef BCM_CNIC |
839 | struct msix_entry msix_table[MAX_CONTEXT+2]; | 854 | struct msix_entry msix_table[MAX_CONTEXT+2]; |
840 | #else | 855 | #else |
@@ -924,8 +939,7 @@ struct bnx2x { | |||
924 | int mrrs; | 939 | int mrrs; |
925 | 940 | ||
926 | struct delayed_work sp_task; | 941 | struct delayed_work sp_task; |
927 | struct work_struct reset_task; | 942 | struct delayed_work reset_task; |
928 | |||
929 | struct timer_list timer; | 943 | struct timer_list timer; |
930 | int current_interval; | 944 | int current_interval; |
931 | 945 | ||
@@ -1125,6 +1139,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1125 | #define LOAD_DIAG 2 | 1139 | #define LOAD_DIAG 2 |
1126 | #define UNLOAD_NORMAL 0 | 1140 | #define UNLOAD_NORMAL 0 |
1127 | #define UNLOAD_CLOSE 1 | 1141 | #define UNLOAD_CLOSE 1 |
1142 | #define UNLOAD_RECOVERY 2 | ||
1128 | 1143 | ||
1129 | 1144 | ||
1130 | /* DMAE command defines */ | 1145 | /* DMAE command defines */ |
@@ -1294,6 +1309,10 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1294 | AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ | 1309 | AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ |
1295 | AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) | 1310 | AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) |
1296 | 1311 | ||
1312 | #define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ | ||
1313 | AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ | ||
1314 | AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ | ||
1315 | AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) | ||
1297 | 1316 | ||
1298 | #define MULTI_FLAGS(bp) \ | 1317 | #define MULTI_FLAGS(bp) \ |
1299 | (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ | 1318 | (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ |
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 63a17d604a98..2b6717748eb8 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -764,6 +764,40 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) | |||
764 | * General service functions | 764 | * General service functions |
765 | */ | 765 | */ |
766 | 766 | ||
767 | /* Return true if succeeded to acquire the lock */ | ||
768 | static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) | ||
769 | { | ||
770 | u32 lock_status; | ||
771 | u32 resource_bit = (1 << resource); | ||
772 | int func = BP_FUNC(bp); | ||
773 | u32 hw_lock_control_reg; | ||
774 | |||
775 | DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource); | ||
776 | |||
777 | /* Validating that the resource is within range */ | ||
778 | if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { | ||
779 | DP(NETIF_MSG_HW, | ||
780 | "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", | ||
781 | resource, HW_LOCK_MAX_RESOURCE_VALUE); | ||
782 | return -EINVAL; | ||
783 | } | ||
784 | |||
785 | if (func <= 5) | ||
786 | hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); | ||
787 | else | ||
788 | hw_lock_control_reg = | ||
789 | (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); | ||
790 | |||
791 | /* Try to acquire the lock */ | ||
792 | REG_WR(bp, hw_lock_control_reg + 4, resource_bit); | ||
793 | lock_status = REG_RD(bp, hw_lock_control_reg); | ||
794 | if (lock_status & resource_bit) | ||
795 | return true; | ||
796 | |||
797 | DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource); | ||
798 | return false; | ||
799 | } | ||
800 | |||
767 | static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, | 801 | static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, |
768 | u8 storm, u16 index, u8 op, u8 update) | 802 | u8 storm, u16 index, u8 op, u8 update) |
769 | { | 803 | { |
@@ -1901,6 +1935,8 @@ static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) | |||
1901 | int func = BP_FUNC(bp); | 1935 | int func = BP_FUNC(bp); |
1902 | u32 hw_lock_control_reg; | 1936 | u32 hw_lock_control_reg; |
1903 | 1937 | ||
1938 | DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource); | ||
1939 | |||
1904 | /* Validating that the resource is within range */ | 1940 | /* Validating that the resource is within range */ |
1905 | if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { | 1941 | if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { |
1906 | DP(NETIF_MSG_HW, | 1942 | DP(NETIF_MSG_HW, |
@@ -2741,12 +2777,11 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | |||
2741 | /* acquire split MCP access lock register */ | 2777 | /* acquire split MCP access lock register */ |
2742 | static int bnx2x_acquire_alr(struct bnx2x *bp) | 2778 | static int bnx2x_acquire_alr(struct bnx2x *bp) |
2743 | { | 2779 | { |
2744 | u32 i, j, val; | 2780 | u32 j, val; |
2745 | int rc = 0; | 2781 | int rc = 0; |
2746 | 2782 | ||
2747 | might_sleep(); | 2783 | might_sleep(); |
2748 | i = 100; | 2784 | for (j = 0; j < 1000; j++) { |
2749 | for (j = 0; j < i*10; j++) { | ||
2750 | val = (1UL << 31); | 2785 | val = (1UL << 31); |
2751 | REG_WR(bp, GRCBASE_MCP + 0x9c, val); | 2786 | REG_WR(bp, GRCBASE_MCP + 0x9c, val); |
2752 | val = REG_RD(bp, GRCBASE_MCP + 0x9c); | 2787 | val = REG_RD(bp, GRCBASE_MCP + 0x9c); |
@@ -2766,9 +2801,7 @@ static int bnx2x_acquire_alr(struct bnx2x *bp) | |||
2766 | /* release split MCP access lock register */ | 2801 | /* release split MCP access lock register */ |
2767 | static void bnx2x_release_alr(struct bnx2x *bp) | 2802 | static void bnx2x_release_alr(struct bnx2x *bp) |
2768 | { | 2803 | { |
2769 | u32 val = 0; | 2804 | REG_WR(bp, GRCBASE_MCP + 0x9c, 0); |
2770 | |||
2771 | REG_WR(bp, GRCBASE_MCP + 0x9c, val); | ||
2772 | } | 2805 | } |
2773 | 2806 | ||
2774 | static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) | 2807 | static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) |
@@ -2824,7 +2857,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) | |||
2824 | 2857 | ||
2825 | DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", | 2858 | DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", |
2826 | aeu_mask, asserted); | 2859 | aeu_mask, asserted); |
2827 | aeu_mask &= ~(asserted & 0xff); | 2860 | aeu_mask &= ~(asserted & 0x3ff); |
2828 | DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); | 2861 | DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); |
2829 | 2862 | ||
2830 | REG_WR(bp, aeu_addr, aeu_mask); | 2863 | REG_WR(bp, aeu_addr, aeu_mask); |
@@ -3105,10 +3138,311 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) | |||
3105 | } | 3138 | } |
3106 | } | 3139 | } |
3107 | 3140 | ||
3108 | static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | 3141 | static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); |
3142 | static int bnx2x_nic_load(struct bnx2x *bp, int load_mode); | ||
3143 | |||
3144 | |||
3145 | #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1 | ||
3146 | #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */ | ||
3147 | #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) | ||
3148 | #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK) | ||
3149 | #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS | ||
3150 | #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) | ||
3151 | /* | ||
3152 | * should be run under rtnl lock | ||
3153 | */ | ||
3154 | static inline void bnx2x_set_reset_done(struct bnx2x *bp) | ||
3155 | { | ||
3156 | u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); | ||
3157 | val &= ~(1 << RESET_DONE_FLAG_SHIFT); | ||
3158 | REG_WR(bp, BNX2X_MISC_GEN_REG, val); | ||
3159 | barrier(); | ||
3160 | mmiowb(); | ||
3161 | } | ||
3162 | |||
3163 | /* | ||
3164 | * should be run under rtnl lock | ||
3165 | */ | ||
3166 | static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp) | ||
3167 | { | ||
3168 | u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); | ||
3169 | val |= (1 << 16); | ||
3170 | REG_WR(bp, BNX2X_MISC_GEN_REG, val); | ||
3171 | barrier(); | ||
3172 | mmiowb(); | ||
3173 | } | ||
3174 | |||
3175 | /* | ||
3176 | * should be run under rtnl lock | ||
3177 | */ | ||
3178 | static inline bool bnx2x_reset_is_done(struct bnx2x *bp) | ||
3179 | { | ||
3180 | u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); | ||
3181 | DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); | ||
3182 | return (val & RESET_DONE_FLAG_MASK) ? false : true; | ||
3183 | } | ||
3184 | |||
3185 | /* | ||
3186 | * should be run under rtnl lock | ||
3187 | */ | ||
3188 | static inline void bnx2x_inc_load_cnt(struct bnx2x *bp) | ||
3189 | { | ||
3190 | u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); | ||
3191 | |||
3192 | DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); | ||
3193 | |||
3194 | val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK; | ||
3195 | REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1); | ||
3196 | barrier(); | ||
3197 | mmiowb(); | ||
3198 | } | ||
3199 | |||
3200 | /* | ||
3201 | * should be run under rtnl lock | ||
3202 | */ | ||
3203 | static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp) | ||
3204 | { | ||
3205 | u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); | ||
3206 | |||
3207 | DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); | ||
3208 | |||
3209 | val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK; | ||
3210 | REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1); | ||
3211 | barrier(); | ||
3212 | mmiowb(); | ||
3213 | |||
3214 | return val1; | ||
3215 | } | ||
3216 | |||
3217 | /* | ||
3218 | * should be run under rtnl lock | ||
3219 | */ | ||
3220 | static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp) | ||
3221 | { | ||
3222 | return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK; | ||
3223 | } | ||
3224 | |||
3225 | static inline void bnx2x_clear_load_cnt(struct bnx2x *bp) | ||
3226 | { | ||
3227 | u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); | ||
3228 | REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK)); | ||
3229 | } | ||
3230 | |||
3231 | static inline void _print_next_block(int idx, const char *blk) | ||
3232 | { | ||
3233 | if (idx) | ||
3234 | pr_cont(", "); | ||
3235 | pr_cont("%s", blk); | ||
3236 | } | ||
3237 | |||
3238 | static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num) | ||
3239 | { | ||
3240 | int i = 0; | ||
3241 | u32 cur_bit = 0; | ||
3242 | for (i = 0; sig; i++) { | ||
3243 | cur_bit = ((u32)0x1 << i); | ||
3244 | if (sig & cur_bit) { | ||
3245 | switch (cur_bit) { | ||
3246 | case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: | ||
3247 | _print_next_block(par_num++, "BRB"); | ||
3248 | break; | ||
3249 | case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: | ||
3250 | _print_next_block(par_num++, "PARSER"); | ||
3251 | break; | ||
3252 | case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: | ||
3253 | _print_next_block(par_num++, "TSDM"); | ||
3254 | break; | ||
3255 | case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: | ||
3256 | _print_next_block(par_num++, "SEARCHER"); | ||
3257 | break; | ||
3258 | case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: | ||
3259 | _print_next_block(par_num++, "TSEMI"); | ||
3260 | break; | ||
3261 | } | ||
3262 | |||
3263 | /* Clear the bit */ | ||
3264 | sig &= ~cur_bit; | ||
3265 | } | ||
3266 | } | ||
3267 | |||
3268 | return par_num; | ||
3269 | } | ||
3270 | |||
3271 | static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num) | ||
3272 | { | ||
3273 | int i = 0; | ||
3274 | u32 cur_bit = 0; | ||
3275 | for (i = 0; sig; i++) { | ||
3276 | cur_bit = ((u32)0x1 << i); | ||
3277 | if (sig & cur_bit) { | ||
3278 | switch (cur_bit) { | ||
3279 | case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: | ||
3280 | _print_next_block(par_num++, "PBCLIENT"); | ||
3281 | break; | ||
3282 | case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: | ||
3283 | _print_next_block(par_num++, "QM"); | ||
3284 | break; | ||
3285 | case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: | ||
3286 | _print_next_block(par_num++, "XSDM"); | ||
3287 | break; | ||
3288 | case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: | ||
3289 | _print_next_block(par_num++, "XSEMI"); | ||
3290 | break; | ||
3291 | case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: | ||
3292 | _print_next_block(par_num++, "DOORBELLQ"); | ||
3293 | break; | ||
3294 | case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: | ||
3295 | _print_next_block(par_num++, "VAUX PCI CORE"); | ||
3296 | break; | ||
3297 | case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: | ||
3298 | _print_next_block(par_num++, "DEBUG"); | ||
3299 | break; | ||
3300 | case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: | ||
3301 | _print_next_block(par_num++, "USDM"); | ||
3302 | break; | ||
3303 | case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: | ||
3304 | _print_next_block(par_num++, "USEMI"); | ||
3305 | break; | ||
3306 | case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: | ||
3307 | _print_next_block(par_num++, "UPB"); | ||
3308 | break; | ||
3309 | case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: | ||
3310 | _print_next_block(par_num++, "CSDM"); | ||
3311 | break; | ||
3312 | } | ||
3313 | |||
3314 | /* Clear the bit */ | ||
3315 | sig &= ~cur_bit; | ||
3316 | } | ||
3317 | } | ||
3318 | |||
3319 | return par_num; | ||
3320 | } | ||
3321 | |||
3322 | static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num) | ||
3323 | { | ||
3324 | int i = 0; | ||
3325 | u32 cur_bit = 0; | ||
3326 | for (i = 0; sig; i++) { | ||
3327 | cur_bit = ((u32)0x1 << i); | ||
3328 | if (sig & cur_bit) { | ||
3329 | switch (cur_bit) { | ||
3330 | case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: | ||
3331 | _print_next_block(par_num++, "CSEMI"); | ||
3332 | break; | ||
3333 | case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: | ||
3334 | _print_next_block(par_num++, "PXP"); | ||
3335 | break; | ||
3336 | case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: | ||
3337 | _print_next_block(par_num++, | ||
3338 | "PXPPCICLOCKCLIENT"); | ||
3339 | break; | ||
3340 | case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: | ||
3341 | _print_next_block(par_num++, "CFC"); | ||
3342 | break; | ||
3343 | case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: | ||
3344 | _print_next_block(par_num++, "CDU"); | ||
3345 | break; | ||
3346 | case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: | ||
3347 | _print_next_block(par_num++, "IGU"); | ||
3348 | break; | ||
3349 | case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: | ||
3350 | _print_next_block(par_num++, "MISC"); | ||
3351 | break; | ||
3352 | } | ||
3353 | |||
3354 | /* Clear the bit */ | ||
3355 | sig &= ~cur_bit; | ||
3356 | } | ||
3357 | } | ||
3358 | |||
3359 | return par_num; | ||
3360 | } | ||
3361 | |||
3362 | static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num) | ||
3363 | { | ||
3364 | int i = 0; | ||
3365 | u32 cur_bit = 0; | ||
3366 | for (i = 0; sig; i++) { | ||
3367 | cur_bit = ((u32)0x1 << i); | ||
3368 | if (sig & cur_bit) { | ||
3369 | switch (cur_bit) { | ||
3370 | case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: | ||
3371 | _print_next_block(par_num++, "MCP ROM"); | ||
3372 | break; | ||
3373 | case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: | ||
3374 | _print_next_block(par_num++, "MCP UMP RX"); | ||
3375 | break; | ||
3376 | case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: | ||
3377 | _print_next_block(par_num++, "MCP UMP TX"); | ||
3378 | break; | ||
3379 | case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: | ||
3380 | _print_next_block(par_num++, "MCP SCPAD"); | ||
3381 | break; | ||
3382 | } | ||
3383 | |||
3384 | /* Clear the bit */ | ||
3385 | sig &= ~cur_bit; | ||
3386 | } | ||
3387 | } | ||
3388 | |||
3389 | return par_num; | ||
3390 | } | ||
3391 | |||
3392 | static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1, | ||
3393 | u32 sig2, u32 sig3) | ||
3394 | { | ||
3395 | if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) || | ||
3396 | (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) { | ||
3397 | int par_num = 0; | ||
3398 | DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: " | ||
3399 | "[0]:0x%08x [1]:0x%08x " | ||
3400 | "[2]:0x%08x [3]:0x%08x\n", | ||
3401 | sig0 & HW_PRTY_ASSERT_SET_0, | ||
3402 | sig1 & HW_PRTY_ASSERT_SET_1, | ||
3403 | sig2 & HW_PRTY_ASSERT_SET_2, | ||
3404 | sig3 & HW_PRTY_ASSERT_SET_3); | ||
3405 | printk(KERN_ERR"%s: Parity errors detected in blocks: ", | ||
3406 | bp->dev->name); | ||
3407 | par_num = bnx2x_print_blocks_with_parity0( | ||
3408 | sig0 & HW_PRTY_ASSERT_SET_0, par_num); | ||
3409 | par_num = bnx2x_print_blocks_with_parity1( | ||
3410 | sig1 & HW_PRTY_ASSERT_SET_1, par_num); | ||
3411 | par_num = bnx2x_print_blocks_with_parity2( | ||
3412 | sig2 & HW_PRTY_ASSERT_SET_2, par_num); | ||
3413 | par_num = bnx2x_print_blocks_with_parity3( | ||
3414 | sig3 & HW_PRTY_ASSERT_SET_3, par_num); | ||
3415 | printk("\n"); | ||
3416 | return true; | ||
3417 | } else | ||
3418 | return false; | ||
3419 | } | ||
3420 | |||
3421 | static bool bnx2x_chk_parity_attn(struct bnx2x *bp) | ||
3109 | { | 3422 | { |
3110 | struct attn_route attn; | 3423 | struct attn_route attn; |
3111 | struct attn_route group_mask; | 3424 | int port = BP_PORT(bp); |
3425 | |||
3426 | attn.sig[0] = REG_RD(bp, | ||
3427 | MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + | ||
3428 | port*4); | ||
3429 | attn.sig[1] = REG_RD(bp, | ||
3430 | MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + | ||
3431 | port*4); | ||
3432 | attn.sig[2] = REG_RD(bp, | ||
3433 | MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + | ||
3434 | port*4); | ||
3435 | attn.sig[3] = REG_RD(bp, | ||
3436 | MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + | ||
3437 | port*4); | ||
3438 | |||
3439 | return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2], | ||
3440 | attn.sig[3]); | ||
3441 | } | ||
3442 | |||
3443 | static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | ||
3444 | { | ||
3445 | struct attn_route attn, *group_mask; | ||
3112 | int port = BP_PORT(bp); | 3446 | int port = BP_PORT(bp); |
3113 | int index; | 3447 | int index; |
3114 | u32 reg_addr; | 3448 | u32 reg_addr; |
@@ -3119,6 +3453,19 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
3119 | try to handle this event */ | 3453 | try to handle this event */ |
3120 | bnx2x_acquire_alr(bp); | 3454 | bnx2x_acquire_alr(bp); |
3121 | 3455 | ||
3456 | if (bnx2x_chk_parity_attn(bp)) { | ||
3457 | bp->recovery_state = BNX2X_RECOVERY_INIT; | ||
3458 | bnx2x_set_reset_in_progress(bp); | ||
3459 | schedule_delayed_work(&bp->reset_task, 0); | ||
3460 | /* Disable HW interrupts */ | ||
3461 | bnx2x_int_disable(bp); | ||
3462 | bnx2x_release_alr(bp); | ||
3463 | /* In case of parity errors don't handle attentions so that | ||
3464 | * other function would "see" parity errors. | ||
3465 | */ | ||
3466 | return; | ||
3467 | } | ||
3468 | |||
3122 | attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); | 3469 | attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); |
3123 | attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); | 3470 | attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); |
3124 | attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); | 3471 | attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); |
@@ -3128,28 +3475,20 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
3128 | 3475 | ||
3129 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { | 3476 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { |
3130 | if (deasserted & (1 << index)) { | 3477 | if (deasserted & (1 << index)) { |
3131 | group_mask = bp->attn_group[index]; | 3478 | group_mask = &bp->attn_group[index]; |
3132 | 3479 | ||
3133 | DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n", | 3480 | DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n", |
3134 | index, group_mask.sig[0], group_mask.sig[1], | 3481 | index, group_mask->sig[0], group_mask->sig[1], |
3135 | group_mask.sig[2], group_mask.sig[3]); | 3482 | group_mask->sig[2], group_mask->sig[3]); |
3136 | 3483 | ||
3137 | bnx2x_attn_int_deasserted3(bp, | 3484 | bnx2x_attn_int_deasserted3(bp, |
3138 | attn.sig[3] & group_mask.sig[3]); | 3485 | attn.sig[3] & group_mask->sig[3]); |
3139 | bnx2x_attn_int_deasserted1(bp, | 3486 | bnx2x_attn_int_deasserted1(bp, |
3140 | attn.sig[1] & group_mask.sig[1]); | 3487 | attn.sig[1] & group_mask->sig[1]); |
3141 | bnx2x_attn_int_deasserted2(bp, | 3488 | bnx2x_attn_int_deasserted2(bp, |
3142 | attn.sig[2] & group_mask.sig[2]); | 3489 | attn.sig[2] & group_mask->sig[2]); |
3143 | bnx2x_attn_int_deasserted0(bp, | 3490 | bnx2x_attn_int_deasserted0(bp, |
3144 | attn.sig[0] & group_mask.sig[0]); | 3491 | attn.sig[0] & group_mask->sig[0]); |
3145 | |||
3146 | if ((attn.sig[0] & group_mask.sig[0] & | ||
3147 | HW_PRTY_ASSERT_SET_0) || | ||
3148 | (attn.sig[1] & group_mask.sig[1] & | ||
3149 | HW_PRTY_ASSERT_SET_1) || | ||
3150 | (attn.sig[2] & group_mask.sig[2] & | ||
3151 | HW_PRTY_ASSERT_SET_2)) | ||
3152 | BNX2X_ERR("FATAL HW block parity attention\n"); | ||
3153 | } | 3492 | } |
3154 | } | 3493 | } |
3155 | 3494 | ||
@@ -3173,7 +3512,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
3173 | 3512 | ||
3174 | DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", | 3513 | DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", |
3175 | aeu_mask, deasserted); | 3514 | aeu_mask, deasserted); |
3176 | aeu_mask |= (deasserted & 0xff); | 3515 | aeu_mask |= (deasserted & 0x3ff); |
3177 | DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); | 3516 | DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); |
3178 | 3517 | ||
3179 | REG_WR(bp, reg_addr, aeu_mask); | 3518 | REG_WR(bp, reg_addr, aeu_mask); |
@@ -5963,6 +6302,50 @@ static void enable_blocks_attention(struct bnx2x *bp) | |||
5963 | REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ | 6302 | REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ |
5964 | } | 6303 | } |
5965 | 6304 | ||
6305 | static const struct { | ||
6306 | u32 addr; | ||
6307 | u32 mask; | ||
6308 | } bnx2x_parity_mask[] = { | ||
6309 | {PXP_REG_PXP_PRTY_MASK, 0xffffffff}, | ||
6310 | {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff}, | ||
6311 | {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff}, | ||
6312 | {HC_REG_HC_PRTY_MASK, 0xffffffff}, | ||
6313 | {MISC_REG_MISC_PRTY_MASK, 0xffffffff}, | ||
6314 | {QM_REG_QM_PRTY_MASK, 0x0}, | ||
6315 | {DORQ_REG_DORQ_PRTY_MASK, 0x0}, | ||
6316 | {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0}, | ||
6317 | {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0}, | ||
6318 | {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */ | ||
6319 | {CDU_REG_CDU_PRTY_MASK, 0x0}, | ||
6320 | {CFC_REG_CFC_PRTY_MASK, 0x0}, | ||
6321 | {DBG_REG_DBG_PRTY_MASK, 0x0}, | ||
6322 | {DMAE_REG_DMAE_PRTY_MASK, 0x0}, | ||
6323 | {BRB1_REG_BRB1_PRTY_MASK, 0x0}, | ||
6324 | {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */ | ||
6325 | {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */ | ||
6326 | {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */ | ||
6327 | {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */ | ||
6328 | {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */ | ||
6329 | {TSEM_REG_TSEM_PRTY_MASK_0, 0x0}, | ||
6330 | {TSEM_REG_TSEM_PRTY_MASK_1, 0x0}, | ||
6331 | {USEM_REG_USEM_PRTY_MASK_0, 0x0}, | ||
6332 | {USEM_REG_USEM_PRTY_MASK_1, 0x0}, | ||
6333 | {CSEM_REG_CSEM_PRTY_MASK_0, 0x0}, | ||
6334 | {CSEM_REG_CSEM_PRTY_MASK_1, 0x0}, | ||
6335 | {XSEM_REG_XSEM_PRTY_MASK_0, 0x0}, | ||
6336 | {XSEM_REG_XSEM_PRTY_MASK_1, 0x0} | ||
6337 | }; | ||
6338 | |||
6339 | static void enable_blocks_parity(struct bnx2x *bp) | ||
6340 | { | ||
6341 | int i, mask_arr_len = | ||
6342 | sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0])); | ||
6343 | |||
6344 | for (i = 0; i < mask_arr_len; i++) | ||
6345 | REG_WR(bp, bnx2x_parity_mask[i].addr, | ||
6346 | bnx2x_parity_mask[i].mask); | ||
6347 | } | ||
6348 | |||
5966 | 6349 | ||
5967 | static void bnx2x_reset_common(struct bnx2x *bp) | 6350 | static void bnx2x_reset_common(struct bnx2x *bp) |
5968 | { | 6351 | { |
@@ -6306,6 +6689,8 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
6306 | REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); | 6689 | REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); |
6307 | 6690 | ||
6308 | enable_blocks_attention(bp); | 6691 | enable_blocks_attention(bp); |
6692 | if (CHIP_PARITY_SUPPORTED(bp)) | ||
6693 | enable_blocks_parity(bp); | ||
6309 | 6694 | ||
6310 | if (!BP_NOMCP(bp)) { | 6695 | if (!BP_NOMCP(bp)) { |
6311 | bnx2x_acquire_phy_lock(bp); | 6696 | bnx2x_acquire_phy_lock(bp); |
@@ -7657,6 +8042,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
7657 | if (bp->state == BNX2X_STATE_OPEN) | 8042 | if (bp->state == BNX2X_STATE_OPEN) |
7658 | bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); | 8043 | bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); |
7659 | #endif | 8044 | #endif |
8045 | bnx2x_inc_load_cnt(bp); | ||
7660 | 8046 | ||
7661 | return 0; | 8047 | return 0; |
7662 | 8048 | ||
@@ -7844,33 +8230,12 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) | |||
7844 | } | 8230 | } |
7845 | } | 8231 | } |
7846 | 8232 | ||
7847 | /* must be called with rtnl_lock */ | 8233 | static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) |
7848 | static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | ||
7849 | { | 8234 | { |
7850 | int port = BP_PORT(bp); | 8235 | int port = BP_PORT(bp); |
7851 | u32 reset_code = 0; | 8236 | u32 reset_code = 0; |
7852 | int i, cnt, rc; | 8237 | int i, cnt, rc; |
7853 | 8238 | ||
7854 | #ifdef BCM_CNIC | ||
7855 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); | ||
7856 | #endif | ||
7857 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; | ||
7858 | |||
7859 | /* Set "drop all" */ | ||
7860 | bp->rx_mode = BNX2X_RX_MODE_NONE; | ||
7861 | bnx2x_set_storm_rx_mode(bp); | ||
7862 | |||
7863 | /* Disable HW interrupts, NAPI and Tx */ | ||
7864 | bnx2x_netif_stop(bp, 1); | ||
7865 | |||
7866 | del_timer_sync(&bp->timer); | ||
7867 | SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, | ||
7868 | (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); | ||
7869 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | ||
7870 | |||
7871 | /* Release IRQs */ | ||
7872 | bnx2x_free_irq(bp, false); | ||
7873 | |||
7874 | /* Wait until tx fastpath tasks complete */ | 8239 | /* Wait until tx fastpath tasks complete */ |
7875 | for_each_queue(bp, i) { | 8240 | for_each_queue(bp, i) { |
7876 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 8241 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
@@ -8011,6 +8376,69 @@ unload_error: | |||
8011 | if (!BP_NOMCP(bp)) | 8376 | if (!BP_NOMCP(bp)) |
8012 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); | 8377 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); |
8013 | 8378 | ||
8379 | } | ||
8380 | |||
8381 | static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp) | ||
8382 | { | ||
8383 | u32 val; | ||
8384 | |||
8385 | DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n"); | ||
8386 | |||
8387 | if (CHIP_IS_E1(bp)) { | ||
8388 | int port = BP_PORT(bp); | ||
8389 | u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : | ||
8390 | MISC_REG_AEU_MASK_ATTN_FUNC_0; | ||
8391 | |||
8392 | val = REG_RD(bp, addr); | ||
8393 | val &= ~(0x300); | ||
8394 | REG_WR(bp, addr, val); | ||
8395 | } else if (CHIP_IS_E1H(bp)) { | ||
8396 | val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); | ||
8397 | val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | | ||
8398 | MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); | ||
8399 | REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); | ||
8400 | } | ||
8401 | } | ||
8402 | |||
8403 | /* must be called with rtnl_lock */ | ||
8404 | static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | ||
8405 | { | ||
8406 | int i; | ||
8407 | |||
8408 | if (bp->state == BNX2X_STATE_CLOSED) { | ||
8409 | /* Interface has been removed - nothing to recover */ | ||
8410 | bp->recovery_state = BNX2X_RECOVERY_DONE; | ||
8411 | bp->is_leader = 0; | ||
8412 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08); | ||
8413 | smp_wmb(); | ||
8414 | |||
8415 | return -EINVAL; | ||
8416 | } | ||
8417 | |||
8418 | #ifdef BCM_CNIC | ||
8419 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); | ||
8420 | #endif | ||
8421 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; | ||
8422 | |||
8423 | /* Set "drop all" */ | ||
8424 | bp->rx_mode = BNX2X_RX_MODE_NONE; | ||
8425 | bnx2x_set_storm_rx_mode(bp); | ||
8426 | |||
8427 | /* Disable HW interrupts, NAPI and Tx */ | ||
8428 | bnx2x_netif_stop(bp, 1); | ||
8429 | |||
8430 | del_timer_sync(&bp->timer); | ||
8431 | SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, | ||
8432 | (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); | ||
8433 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | ||
8434 | |||
8435 | /* Release IRQs */ | ||
8436 | bnx2x_free_irq(bp, false); | ||
8437 | |||
8438 | /* Cleanup the chip if needed */ | ||
8439 | if (unload_mode != UNLOAD_RECOVERY) | ||
8440 | bnx2x_chip_cleanup(bp, unload_mode); | ||
8441 | |||
8014 | bp->port.pmf = 0; | 8442 | bp->port.pmf = 0; |
8015 | 8443 | ||
8016 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 8444 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
@@ -8025,17 +8453,448 @@ unload_error: | |||
8025 | 8453 | ||
8026 | netif_carrier_off(bp->dev); | 8454 | netif_carrier_off(bp->dev); |
8027 | 8455 | ||
8456 | /* The last driver must disable a "close the gate" if there is no | ||
8457 | * parity attention or "process kill" pending. | ||
8458 | */ | ||
8459 | if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) && | ||
8460 | bnx2x_reset_is_done(bp)) | ||
8461 | bnx2x_disable_close_the_gate(bp); | ||
8462 | |||
8463 | /* Reset MCP mail box sequence if there is on going recovery */ | ||
8464 | if (unload_mode == UNLOAD_RECOVERY) | ||
8465 | bp->fw_seq = 0; | ||
8466 | |||
8467 | return 0; | ||
8468 | } | ||
8469 | |||
8470 | /* Close gates #2, #3 and #4: */ | ||
8471 | static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) | ||
8472 | { | ||
8473 | u32 val, addr; | ||
8474 | |||
8475 | /* Gates #2 and #4a are closed/opened for "not E1" only */ | ||
8476 | if (!CHIP_IS_E1(bp)) { | ||
8477 | /* #4 */ | ||
8478 | val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS); | ||
8479 | REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, | ||
8480 | close ? (val | 0x1) : (val & (~(u32)1))); | ||
8481 | /* #2 */ | ||
8482 | val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES); | ||
8483 | REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, | ||
8484 | close ? (val | 0x1) : (val & (~(u32)1))); | ||
8485 | } | ||
8486 | |||
8487 | /* #3 */ | ||
8488 | addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; | ||
8489 | val = REG_RD(bp, addr); | ||
8490 | REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1))); | ||
8491 | |||
8492 | DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n", | ||
8493 | close ? "closing" : "opening"); | ||
8494 | mmiowb(); | ||
8495 | } | ||
8496 | |||
8497 | #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ | ||
8498 | |||
8499 | static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) | ||
8500 | { | ||
8501 | /* Do some magic... */ | ||
8502 | u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); | ||
8503 | *magic_val = val & SHARED_MF_CLP_MAGIC; | ||
8504 | MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); | ||
8505 | } | ||
8506 | |||
8507 | /* Restore the value of the `magic' bit. | ||
8508 | * | ||
8509 | * @param pdev Device handle. | ||
8510 | * @param magic_val Old value of the `magic' bit. | ||
8511 | */ | ||
8512 | static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) | ||
8513 | { | ||
8514 | /* Restore the `magic' bit value... */ | ||
8515 | /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb); | ||
8516 | SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb, | ||
8517 | (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */ | ||
8518 | u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); | ||
8519 | MF_CFG_WR(bp, shared_mf_config.clp_mb, | ||
8520 | (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); | ||
8521 | } | ||
8522 | |||
8523 | /* Prepares for MCP reset: takes care of CLP configurations. | ||
8524 | * | ||
8525 | * @param bp | ||
8526 | * @param magic_val Old value of 'magic' bit. | ||
8527 | */ | ||
8528 | static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) | ||
8529 | { | ||
8530 | u32 shmem; | ||
8531 | u32 validity_offset; | ||
8532 | |||
8533 | DP(NETIF_MSG_HW, "Starting\n"); | ||
8534 | |||
8535 | /* Set `magic' bit in order to save MF config */ | ||
8536 | if (!CHIP_IS_E1(bp)) | ||
8537 | bnx2x_clp_reset_prep(bp, magic_val); | ||
8538 | |||
8539 | /* Get shmem offset */ | ||
8540 | shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); | ||
8541 | validity_offset = offsetof(struct shmem_region, validity_map[0]); | ||
8542 | |||
8543 | /* Clear validity map flags */ | ||
8544 | if (shmem > 0) | ||
8545 | REG_WR(bp, shmem + validity_offset, 0); | ||
8546 | } | ||
8547 | |||
8548 | #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ | ||
8549 | #define MCP_ONE_TIMEOUT 100 /* 100 ms */ | ||
8550 | |||
8551 | /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10, | ||
8552 | * depending on the HW type. | ||
8553 | * | ||
8554 | * @param bp | ||
8555 | */ | ||
8556 | static inline void bnx2x_mcp_wait_one(struct bnx2x *bp) | ||
8557 | { | ||
8558 | /* special handling for emulation and FPGA, | ||
8559 | wait 10 times longer */ | ||
8560 | if (CHIP_REV_IS_SLOW(bp)) | ||
8561 | msleep(MCP_ONE_TIMEOUT*10); | ||
8562 | else | ||
8563 | msleep(MCP_ONE_TIMEOUT); | ||
8564 | } | ||
8565 | |||
8566 | static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) | ||
8567 | { | ||
8568 | u32 shmem, cnt, validity_offset, val; | ||
8569 | int rc = 0; | ||
8570 | |||
8571 | msleep(100); | ||
8572 | |||
8573 | /* Get shmem offset */ | ||
8574 | shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); | ||
8575 | if (shmem == 0) { | ||
8576 | BNX2X_ERR("Shmem 0 return failure\n"); | ||
8577 | rc = -ENOTTY; | ||
8578 | goto exit_lbl; | ||
8579 | } | ||
8580 | |||
8581 | validity_offset = offsetof(struct shmem_region, validity_map[0]); | ||
8582 | |||
8583 | /* Wait for MCP to come up */ | ||
8584 | for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) { | ||
8585 | /* TBD: its best to check validity map of last port. | ||
8586 | * currently checks on port 0. | ||
8587 | */ | ||
8588 | val = REG_RD(bp, shmem + validity_offset); | ||
8589 | DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem, | ||
8590 | shmem + validity_offset, val); | ||
8591 | |||
8592 | /* check that shared memory is valid. */ | ||
8593 | if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) | ||
8594 | == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) | ||
8595 | break; | ||
8596 | |||
8597 | bnx2x_mcp_wait_one(bp); | ||
8598 | } | ||
8599 | |||
8600 | DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val); | ||
8601 | |||
8602 | /* Check that shared memory is valid. This indicates that MCP is up. */ | ||
8603 | if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != | ||
8604 | (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { | ||
8605 | BNX2X_ERR("Shmem signature not present. MCP is not up !!\n"); | ||
8606 | rc = -ENOTTY; | ||
8607 | goto exit_lbl; | ||
8608 | } | ||
8609 | |||
8610 | exit_lbl: | ||
8611 | /* Restore the `magic' bit value */ | ||
8612 | if (!CHIP_IS_E1(bp)) | ||
8613 | bnx2x_clp_reset_done(bp, magic_val); | ||
8614 | |||
8615 | return rc; | ||
8616 | } | ||
8617 | |||
8618 | static void bnx2x_pxp_prep(struct bnx2x *bp) | ||
8619 | { | ||
8620 | if (!CHIP_IS_E1(bp)) { | ||
8621 | REG_WR(bp, PXP2_REG_RD_START_INIT, 0); | ||
8622 | REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); | ||
8623 | REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0); | ||
8624 | mmiowb(); | ||
8625 | } | ||
8626 | } | ||
8627 | |||
8628 | /* | ||
8629 | * Reset the whole chip except for: | ||
8630 | * - PCIE core | ||
8631 | * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by | ||
8632 | * one reset bit) | ||
8633 | * - IGU | ||
8634 | * - MISC (including AEU) | ||
8635 | * - GRC | ||
8636 | * - RBCN, RBCP | ||
8637 | */ | ||
8638 | static void bnx2x_process_kill_chip_reset(struct bnx2x *bp) | ||
8639 | { | ||
8640 | u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; | ||
8641 | |||
8642 | not_reset_mask1 = | ||
8643 | MISC_REGISTERS_RESET_REG_1_RST_HC | | ||
8644 | MISC_REGISTERS_RESET_REG_1_RST_PXPV | | ||
8645 | MISC_REGISTERS_RESET_REG_1_RST_PXP; | ||
8646 | |||
8647 | not_reset_mask2 = | ||
8648 | MISC_REGISTERS_RESET_REG_2_RST_MDIO | | ||
8649 | MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | | ||
8650 | MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | | ||
8651 | MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | | ||
8652 | MISC_REGISTERS_RESET_REG_2_RST_RBCN | | ||
8653 | MISC_REGISTERS_RESET_REG_2_RST_GRC | | ||
8654 | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | | ||
8655 | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B; | ||
8656 | |||
8657 | reset_mask1 = 0xffffffff; | ||
8658 | |||
8659 | if (CHIP_IS_E1(bp)) | ||
8660 | reset_mask2 = 0xffff; | ||
8661 | else | ||
8662 | reset_mask2 = 0x1ffff; | ||
8663 | |||
8664 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | ||
8665 | reset_mask1 & (~not_reset_mask1)); | ||
8666 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | ||
8667 | reset_mask2 & (~not_reset_mask2)); | ||
8668 | |||
8669 | barrier(); | ||
8670 | mmiowb(); | ||
8671 | |||
8672 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); | ||
8673 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2); | ||
8674 | mmiowb(); | ||
8675 | } | ||
8676 | |||
8677 | static int bnx2x_process_kill(struct bnx2x *bp) | ||
8678 | { | ||
8679 | int cnt = 1000; | ||
8680 | u32 val = 0; | ||
8681 | u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; | ||
8682 | |||
8683 | |||
8684 | /* Empty the Tetris buffer, wait for 1s */ | ||
8685 | do { | ||
8686 | sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); | ||
8687 | blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); | ||
8688 | port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); | ||
8689 | port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); | ||
8690 | pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); | ||
8691 | if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && | ||
8692 | ((port_is_idle_0 & 0x1) == 0x1) && | ||
8693 | ((port_is_idle_1 & 0x1) == 0x1) && | ||
8694 | (pgl_exp_rom2 == 0xffffffff)) | ||
8695 | break; | ||
8696 | msleep(1); | ||
8697 | } while (cnt-- > 0); | ||
8698 | |||
8699 | if (cnt <= 0) { | ||
8700 | DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there" | ||
8701 | " are still" | ||
8702 | " outstanding read requests after 1s!\n"); | ||
8703 | DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x," | ||
8704 | " port_is_idle_0=0x%08x," | ||
8705 | " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", | ||
8706 | sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, | ||
8707 | pgl_exp_rom2); | ||
8708 | return -EAGAIN; | ||
8709 | } | ||
8710 | |||
8711 | barrier(); | ||
8712 | |||
8713 | /* Close gates #2, #3 and #4 */ | ||
8714 | bnx2x_set_234_gates(bp, true); | ||
8715 | |||
8716 | /* TBD: Indicate that "process kill" is in progress to MCP */ | ||
8717 | |||
8718 | /* Clear "unprepared" bit */ | ||
8719 | REG_WR(bp, MISC_REG_UNPREPARED, 0); | ||
8720 | barrier(); | ||
8721 | |||
8722 | /* Make sure all is written to the chip before the reset */ | ||
8723 | mmiowb(); | ||
8724 | |||
8725 | /* Wait for 1ms to empty GLUE and PCI-E core queues, | ||
8726 | * PSWHST, GRC and PSWRD Tetris buffer. | ||
8727 | */ | ||
8728 | msleep(1); | ||
8729 | |||
8730 | /* Prepare to chip reset: */ | ||
8731 | /* MCP */ | ||
8732 | bnx2x_reset_mcp_prep(bp, &val); | ||
8733 | |||
8734 | /* PXP */ | ||
8735 | bnx2x_pxp_prep(bp); | ||
8736 | barrier(); | ||
8737 | |||
8738 | /* reset the chip */ | ||
8739 | bnx2x_process_kill_chip_reset(bp); | ||
8740 | barrier(); | ||
8741 | |||
8742 | /* Recover after reset: */ | ||
8743 | /* MCP */ | ||
8744 | if (bnx2x_reset_mcp_comp(bp, val)) | ||
8745 | return -EAGAIN; | ||
8746 | |||
8747 | /* PXP */ | ||
8748 | bnx2x_pxp_prep(bp); | ||
8749 | |||
8750 | /* Open the gates #2, #3 and #4 */ | ||
8751 | bnx2x_set_234_gates(bp, false); | ||
8752 | |||
8753 | /* TBD: IGU/AEU preparation bring back the AEU/IGU to a | ||
8754 | * reset state, re-enable attentions. */ | ||
8755 | |||
8028 | return 0; | 8756 | return 0; |
8029 | } | 8757 | } |
8030 | 8758 | ||
8759 | static int bnx2x_leader_reset(struct bnx2x *bp) | ||
8760 | { | ||
8761 | int rc = 0; | ||
8762 | /* Try to recover after the failure */ | ||
8763 | if (bnx2x_process_kill(bp)) { | ||
8764 | printk(KERN_ERR "%s: Something bad had happen! Aii!\n", | ||
8765 | bp->dev->name); | ||
8766 | rc = -EAGAIN; | ||
8767 | goto exit_leader_reset; | ||
8768 | } | ||
8769 | |||
8770 | /* Clear "reset is in progress" bit and update the driver state */ | ||
8771 | bnx2x_set_reset_done(bp); | ||
8772 | bp->recovery_state = BNX2X_RECOVERY_DONE; | ||
8773 | |||
8774 | exit_leader_reset: | ||
8775 | bp->is_leader = 0; | ||
8776 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08); | ||
8777 | smp_wmb(); | ||
8778 | return rc; | ||
8779 | } | ||
8780 | |||
8781 | static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); | ||
8782 | |||
8783 | /* Assumption: runs under rtnl lock. This together with the fact | ||
8784 | * that it's called only from bnx2x_reset_task() ensure that it | ||
8785 | * will never be called when netif_running(bp->dev) is false. | ||
8786 | */ | ||
8787 | static void bnx2x_parity_recover(struct bnx2x *bp) | ||
8788 | { | ||
8789 | DP(NETIF_MSG_HW, "Handling parity\n"); | ||
8790 | while (1) { | ||
8791 | switch (bp->recovery_state) { | ||
8792 | case BNX2X_RECOVERY_INIT: | ||
8793 | DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); | ||
8794 | /* Try to get a LEADER_LOCK HW lock */ | ||
8795 | if (bnx2x_trylock_hw_lock(bp, | ||
8796 | HW_LOCK_RESOURCE_RESERVED_08)) | ||
8797 | bp->is_leader = 1; | ||
8798 | |||
8799 | /* Stop the driver */ | ||
8800 | /* If interface has been removed - break */ | ||
8801 | if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY)) | ||
8802 | return; | ||
8803 | |||
8804 | bp->recovery_state = BNX2X_RECOVERY_WAIT; | ||
8805 | /* Ensure "is_leader" and "recovery_state" | ||
8806 | * update values are seen on other CPUs | ||
8807 | */ | ||
8808 | smp_wmb(); | ||
8809 | break; | ||
8810 | |||
8811 | case BNX2X_RECOVERY_WAIT: | ||
8812 | DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); | ||
8813 | if (bp->is_leader) { | ||
8814 | u32 load_counter = bnx2x_get_load_cnt(bp); | ||
8815 | if (load_counter) { | ||
8816 | /* Wait until all other functions get | ||
8817 | * down. | ||
8818 | */ | ||
8819 | schedule_delayed_work(&bp->reset_task, | ||
8820 | HZ/10); | ||
8821 | return; | ||
8822 | } else { | ||
8823 | /* If all other functions got down - | ||
8824 | * try to bring the chip back to | ||
8825 | * normal. In any case it's an exit | ||
8826 | * point for a leader. | ||
8827 | */ | ||
8828 | if (bnx2x_leader_reset(bp) || | ||
8829 | bnx2x_nic_load(bp, LOAD_NORMAL)) { | ||
8830 | printk(KERN_ERR"%s: Recovery " | ||
8831 | "has failed. Power cycle is " | ||
8832 | "needed.\n", bp->dev->name); | ||
8833 | /* Disconnect this device */ | ||
8834 | netif_device_detach(bp->dev); | ||
8835 | /* Block ifup for all function | ||
8836 | * of this ASIC until | ||
8837 | * "process kill" or power | ||
8838 | * cycle. | ||
8839 | */ | ||
8840 | bnx2x_set_reset_in_progress(bp); | ||
8841 | /* Shut down the power */ | ||
8842 | bnx2x_set_power_state(bp, | ||
8843 | PCI_D3hot); | ||
8844 | return; | ||
8845 | } | ||
8846 | |||
8847 | return; | ||
8848 | } | ||
8849 | } else { /* non-leader */ | ||
8850 | if (!bnx2x_reset_is_done(bp)) { | ||
8851 | /* Try to get a LEADER_LOCK HW lock as | ||
8852 | * long as a former leader may have | ||
8853 | * been unloaded by the user or | ||
8854 | * released a leadership by another | ||
8855 | * reason. | ||
8856 | */ | ||
8857 | if (bnx2x_trylock_hw_lock(bp, | ||
8858 | HW_LOCK_RESOURCE_RESERVED_08)) { | ||
8859 | /* I'm a leader now! Restart a | ||
8860 | * switch case. | ||
8861 | */ | ||
8862 | bp->is_leader = 1; | ||
8863 | break; | ||
8864 | } | ||
8865 | |||
8866 | schedule_delayed_work(&bp->reset_task, | ||
8867 | HZ/10); | ||
8868 | return; | ||
8869 | |||
8870 | } else { /* A leader has completed | ||
8871 | * the "process kill". It's an exit | ||
8872 | * point for a non-leader. | ||
8873 | */ | ||
8874 | bnx2x_nic_load(bp, LOAD_NORMAL); | ||
8875 | bp->recovery_state = | ||
8876 | BNX2X_RECOVERY_DONE; | ||
8877 | smp_wmb(); | ||
8878 | return; | ||
8879 | } | ||
8880 | } | ||
8881 | default: | ||
8882 | return; | ||
8883 | } | ||
8884 | } | ||
8885 | } | ||
8886 | |||
8887 | /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is | ||
8888 | * scheduled on a general queue in order to prevent a dead lock. | ||
8889 | */ | ||
8031 | static void bnx2x_reset_task(struct work_struct *work) | 8890 | static void bnx2x_reset_task(struct work_struct *work) |
8032 | { | 8891 | { |
8033 | struct bnx2x *bp = container_of(work, struct bnx2x, reset_task); | 8892 | struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work); |
8034 | 8893 | ||
8035 | #ifdef BNX2X_STOP_ON_ERROR | 8894 | #ifdef BNX2X_STOP_ON_ERROR |
8036 | BNX2X_ERR("reset task called but STOP_ON_ERROR defined" | 8895 | BNX2X_ERR("reset task called but STOP_ON_ERROR defined" |
8037 | " so reset not done to allow debug dump,\n" | 8896 | " so reset not done to allow debug dump,\n" |
8038 | " you will need to reboot when done\n"); | 8897 | KERN_ERR " you will need to reboot when done\n"); |
8039 | return; | 8898 | return; |
8040 | #endif | 8899 | #endif |
8041 | 8900 | ||
@@ -8044,8 +8903,12 @@ static void bnx2x_reset_task(struct work_struct *work) | |||
8044 | if (!netif_running(bp->dev)) | 8903 | if (!netif_running(bp->dev)) |
8045 | goto reset_task_exit; | 8904 | goto reset_task_exit; |
8046 | 8905 | ||
8047 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | 8906 | if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) |
8048 | bnx2x_nic_load(bp, LOAD_NORMAL); | 8907 | bnx2x_parity_recover(bp); |
8908 | else { | ||
8909 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | ||
8910 | bnx2x_nic_load(bp, LOAD_NORMAL); | ||
8911 | } | ||
8049 | 8912 | ||
8050 | reset_task_exit: | 8913 | reset_task_exit: |
8051 | rtnl_unlock(); | 8914 | rtnl_unlock(); |
@@ -8913,7 +9776,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
8913 | #endif | 9776 | #endif |
8914 | 9777 | ||
8915 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); | 9778 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); |
8916 | INIT_WORK(&bp->reset_task, bnx2x_reset_task); | 9779 | INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task); |
8917 | 9780 | ||
8918 | rc = bnx2x_get_hwinfo(bp); | 9781 | rc = bnx2x_get_hwinfo(bp); |
8919 | 9782 | ||
@@ -9888,6 +10751,11 @@ static int bnx2x_set_ringparam(struct net_device *dev, | |||
9888 | struct bnx2x *bp = netdev_priv(dev); | 10751 | struct bnx2x *bp = netdev_priv(dev); |
9889 | int rc = 0; | 10752 | int rc = 0; |
9890 | 10753 | ||
10754 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
10755 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
10756 | return -EAGAIN; | ||
10757 | } | ||
10758 | |||
9891 | if ((ering->rx_pending > MAX_RX_AVAIL) || | 10759 | if ((ering->rx_pending > MAX_RX_AVAIL) || |
9892 | (ering->tx_pending > MAX_TX_AVAIL) || | 10760 | (ering->tx_pending > MAX_TX_AVAIL) || |
9893 | (ering->tx_pending <= MAX_SKB_FRAGS + 4)) | 10761 | (ering->tx_pending <= MAX_SKB_FRAGS + 4)) |
@@ -9973,6 +10841,11 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data) | |||
9973 | int changed = 0; | 10841 | int changed = 0; |
9974 | int rc = 0; | 10842 | int rc = 0; |
9975 | 10843 | ||
10844 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
10845 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
10846 | return -EAGAIN; | ||
10847 | } | ||
10848 | |||
9976 | /* TPA requires Rx CSUM offloading */ | 10849 | /* TPA requires Rx CSUM offloading */ |
9977 | if ((data & ETH_FLAG_LRO) && bp->rx_csum) { | 10850 | if ((data & ETH_FLAG_LRO) && bp->rx_csum) { |
9978 | if (!disable_tpa) { | 10851 | if (!disable_tpa) { |
@@ -10009,6 +10882,11 @@ static int bnx2x_set_rx_csum(struct net_device *dev, u32 data) | |||
10009 | struct bnx2x *bp = netdev_priv(dev); | 10882 | struct bnx2x *bp = netdev_priv(dev); |
10010 | int rc = 0; | 10883 | int rc = 0; |
10011 | 10884 | ||
10885 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
10886 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
10887 | return -EAGAIN; | ||
10888 | } | ||
10889 | |||
10012 | bp->rx_csum = data; | 10890 | bp->rx_csum = data; |
10013 | 10891 | ||
10014 | /* Disable TPA, when Rx CSUM is disabled. Otherwise all | 10892 | /* Disable TPA, when Rx CSUM is disabled. Otherwise all |
@@ -10471,6 +11349,12 @@ static void bnx2x_self_test(struct net_device *dev, | |||
10471 | { | 11349 | { |
10472 | struct bnx2x *bp = netdev_priv(dev); | 11350 | struct bnx2x *bp = netdev_priv(dev); |
10473 | 11351 | ||
11352 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
11353 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
11354 | etest->flags |= ETH_TEST_FL_FAILED; | ||
11355 | return; | ||
11356 | } | ||
11357 | |||
10474 | memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); | 11358 | memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); |
10475 | 11359 | ||
10476 | if (!netif_running(dev)) | 11360 | if (!netif_running(dev)) |
@@ -11456,6 +12340,40 @@ static int bnx2x_open(struct net_device *dev) | |||
11456 | 12340 | ||
11457 | bnx2x_set_power_state(bp, PCI_D0); | 12341 | bnx2x_set_power_state(bp, PCI_D0); |
11458 | 12342 | ||
12343 | if (!bnx2x_reset_is_done(bp)) { | ||
12344 | do { | ||
12345 | /* Reset MCP mail box sequence if there is on going | ||
12346 | * recovery | ||
12347 | */ | ||
12348 | bp->fw_seq = 0; | ||
12349 | |||
12350 | /* If it's the first function to load and reset done | ||
12351 | * is still not cleared it may mean that. We don't | ||
12352 | * check the attention state here because it may have | ||
12353 | * already been cleared by a "common" reset but we | ||
12354 | * shell proceed with "process kill" anyway. | ||
12355 | */ | ||
12356 | if ((bnx2x_get_load_cnt(bp) == 0) && | ||
12357 | bnx2x_trylock_hw_lock(bp, | ||
12358 | HW_LOCK_RESOURCE_RESERVED_08) && | ||
12359 | (!bnx2x_leader_reset(bp))) { | ||
12360 | DP(NETIF_MSG_HW, "Recovered in open\n"); | ||
12361 | break; | ||
12362 | } | ||
12363 | |||
12364 | bnx2x_set_power_state(bp, PCI_D3hot); | ||
12365 | |||
12366 | printk(KERN_ERR"%s: Recovery flow hasn't been properly" | ||
12367 | " completed yet. Try again later. If u still see this" | ||
12368 | " message after a few retries then power cycle is" | ||
12369 | " required.\n", bp->dev->name); | ||
12370 | |||
12371 | return -EAGAIN; | ||
12372 | } while (0); | ||
12373 | } | ||
12374 | |||
12375 | bp->recovery_state = BNX2X_RECOVERY_DONE; | ||
12376 | |||
11459 | return bnx2x_nic_load(bp, LOAD_OPEN); | 12377 | return bnx2x_nic_load(bp, LOAD_OPEN); |
11460 | } | 12378 | } |
11461 | 12379 | ||
@@ -11694,6 +12612,11 @@ static int bnx2x_change_mtu(struct net_device *dev, int new_mtu) | |||
11694 | struct bnx2x *bp = netdev_priv(dev); | 12612 | struct bnx2x *bp = netdev_priv(dev); |
11695 | int rc = 0; | 12613 | int rc = 0; |
11696 | 12614 | ||
12615 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
12616 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
12617 | return -EAGAIN; | ||
12618 | } | ||
12619 | |||
11697 | if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || | 12620 | if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || |
11698 | ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) | 12621 | ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) |
11699 | return -EINVAL; | 12622 | return -EINVAL; |
@@ -11721,7 +12644,7 @@ static void bnx2x_tx_timeout(struct net_device *dev) | |||
11721 | bnx2x_panic(); | 12644 | bnx2x_panic(); |
11722 | #endif | 12645 | #endif |
11723 | /* This allows the netif to be shutdown gracefully before resetting */ | 12646 | /* This allows the netif to be shutdown gracefully before resetting */ |
11724 | schedule_work(&bp->reset_task); | 12647 | schedule_delayed_work(&bp->reset_task, 0); |
11725 | } | 12648 | } |
11726 | 12649 | ||
11727 | #ifdef BCM_VLAN | 12650 | #ifdef BCM_VLAN |
@@ -11880,6 +12803,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, | |||
11880 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); | 12803 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); |
11881 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); | 12804 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); |
11882 | 12805 | ||
12806 | /* Reset the load counter */ | ||
12807 | bnx2x_clear_load_cnt(bp); | ||
12808 | |||
11883 | dev->watchdog_timeo = TX_TIMEOUT; | 12809 | dev->watchdog_timeo = TX_TIMEOUT; |
11884 | 12810 | ||
11885 | dev->netdev_ops = &bnx2x_netdev_ops; | 12811 | dev->netdev_ops = &bnx2x_netdev_ops; |
@@ -12205,6 +13131,9 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) | |||
12205 | 13131 | ||
12206 | unregister_netdev(dev); | 13132 | unregister_netdev(dev); |
12207 | 13133 | ||
13134 | /* Make sure RESET task is not scheduled before continuing */ | ||
13135 | cancel_delayed_work_sync(&bp->reset_task); | ||
13136 | |||
12208 | kfree(bp->init_ops_offsets); | 13137 | kfree(bp->init_ops_offsets); |
12209 | kfree(bp->init_ops); | 13138 | kfree(bp->init_ops); |
12210 | kfree(bp->init_data); | 13139 | kfree(bp->init_data); |
@@ -12268,6 +13197,11 @@ static int bnx2x_resume(struct pci_dev *pdev) | |||
12268 | } | 13197 | } |
12269 | bp = netdev_priv(dev); | 13198 | bp = netdev_priv(dev); |
12270 | 13199 | ||
13200 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
13201 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
13202 | return -EAGAIN; | ||
13203 | } | ||
13204 | |||
12271 | rtnl_lock(); | 13205 | rtnl_lock(); |
12272 | 13206 | ||
12273 | pci_restore_state(pdev); | 13207 | pci_restore_state(pdev); |
@@ -12434,6 +13368,11 @@ static void bnx2x_io_resume(struct pci_dev *pdev) | |||
12434 | struct net_device *dev = pci_get_drvdata(pdev); | 13368 | struct net_device *dev = pci_get_drvdata(pdev); |
12435 | struct bnx2x *bp = netdev_priv(dev); | 13369 | struct bnx2x *bp = netdev_priv(dev); |
12436 | 13370 | ||
13371 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
13372 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
13373 | return; | ||
13374 | } | ||
13375 | |||
12437 | rtnl_lock(); | 13376 | rtnl_lock(); |
12438 | 13377 | ||
12439 | bnx2x_eeh_recover(bp); | 13378 | bnx2x_eeh_recover(bp); |
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h index 944964e78c81..a1f3bf0cd630 100644 --- a/drivers/net/bnx2x_reg.h +++ b/drivers/net/bnx2x_reg.h | |||
@@ -766,6 +766,8 @@ | |||
766 | #define MCP_REG_MCPR_NVM_SW_ARB 0x86420 | 766 | #define MCP_REG_MCPR_NVM_SW_ARB 0x86420 |
767 | #define MCP_REG_MCPR_NVM_WRITE 0x86408 | 767 | #define MCP_REG_MCPR_NVM_WRITE 0x86408 |
768 | #define MCP_REG_MCPR_SCRATCH 0xa0000 | 768 | #define MCP_REG_MCPR_SCRATCH 0xa0000 |
769 | #define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1) | ||
770 | #define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0) | ||
769 | /* [R 32] read first 32 bit after inversion of function 0. mapped as | 771 | /* [R 32] read first 32 bit after inversion of function 0. mapped as |
770 | follows: [0] NIG attention for function0; [1] NIG attention for | 772 | follows: [0] NIG attention for function0; [1] NIG attention for |
771 | function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; | 773 | function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; |
@@ -1249,6 +1251,8 @@ | |||
1249 | #define MISC_REG_E1HMF_MODE 0xa5f8 | 1251 | #define MISC_REG_E1HMF_MODE 0xa5f8 |
1250 | /* [RW 32] Debug only: spare RW register reset by core reset */ | 1252 | /* [RW 32] Debug only: spare RW register reset by core reset */ |
1251 | #define MISC_REG_GENERIC_CR_0 0xa460 | 1253 | #define MISC_REG_GENERIC_CR_0 0xa460 |
1254 | /* [RW 32] Debug only: spare RW register reset by por reset */ | ||
1255 | #define MISC_REG_GENERIC_POR_1 0xa474 | ||
1252 | /* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of | 1256 | /* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of |
1253 | these bits is written as a '1'; the corresponding SPIO bit will turn off | 1257 | these bits is written as a '1'; the corresponding SPIO bit will turn off |
1254 | it's drivers and become an input. This is the reset state of all GPIO | 1258 | it's drivers and become an input. This is the reset state of all GPIO |
@@ -1438,7 +1442,7 @@ | |||
1438 | (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */ | 1442 | (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */ |
1439 | #define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc | 1443 | #define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc |
1440 | /* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses | 1444 | /* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses |
1441 | in this register. addres 0 - timer 1; address - timer 2�address 7 - | 1445 | in this register. addres 0 - timer 1; address 1 - timer 2, ... address 7 - |
1442 | timer 8 */ | 1446 | timer 8 */ |
1443 | #define MISC_REG_SW_TIMER_VAL 0xa5c0 | 1447 | #define MISC_REG_SW_TIMER_VAL 0xa5c0 |
1444 | /* [RW 1] Set by the MCP to remember if one or more of the drivers is/are | 1448 | /* [RW 1] Set by the MCP to remember if one or more of the drivers is/are |
@@ -2407,10 +2411,16 @@ | |||
2407 | /* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means | 2411 | /* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means |
2408 | this client is waiting for the arbiter. */ | 2412 | this client is waiting for the arbiter. */ |
2409 | #define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008 | 2413 | #define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008 |
2414 | /* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue | ||
2415 | block. Should be used for close the gates. */ | ||
2416 | #define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4 | ||
2410 | /* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit | 2417 | /* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit |
2411 | should update accoring to 'hst_discard_doorbells' register when the state | 2418 | should update accoring to 'hst_discard_doorbells' register when the state |
2412 | machine is idle */ | 2419 | machine is idle */ |
2413 | #define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0 | 2420 | #define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0 |
2421 | /* [RW 1] When 1; new internal writes arriving to the block are discarded. | ||
2422 | Should be used for close the gates. */ | ||
2423 | #define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8 | ||
2414 | /* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1' | 2424 | /* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1' |
2415 | means this PSWHST is discarding inputs from this client. Each bit should | 2425 | means this PSWHST is discarding inputs from this client. Each bit should |
2416 | update accoring to 'hst_discard_internal_writes' register when the state | 2426 | update accoring to 'hst_discard_internal_writes' register when the state |
@@ -4422,11 +4432,21 @@ | |||
4422 | #define MISC_REGISTERS_GPIO_PORT_SHIFT 4 | 4432 | #define MISC_REGISTERS_GPIO_PORT_SHIFT 4 |
4423 | #define MISC_REGISTERS_GPIO_SET_POS 8 | 4433 | #define MISC_REGISTERS_GPIO_SET_POS 8 |
4424 | #define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 | 4434 | #define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 |
4435 | #define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29) | ||
4425 | #define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7) | 4436 | #define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7) |
4437 | #define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26) | ||
4438 | #define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27) | ||
4426 | #define MISC_REGISTERS_RESET_REG_1_SET 0x584 | 4439 | #define MISC_REGISTERS_RESET_REG_1_SET 0x584 |
4427 | #define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 | 4440 | #define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 |
4428 | #define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) | 4441 | #define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) |
4429 | #define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14) | 4442 | #define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14) |
4443 | #define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15) | ||
4444 | #define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4) | ||
4445 | #define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6) | ||
4446 | #define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5) | ||
4447 | #define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13) | ||
4448 | #define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11) | ||
4449 | #define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9) | ||
4430 | #define MISC_REGISTERS_RESET_REG_2_SET 0x594 | 4450 | #define MISC_REGISTERS_RESET_REG_2_SET 0x594 |
4431 | #define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8 | 4451 | #define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8 |
4432 | #define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1) | 4452 | #define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1) |
@@ -4454,6 +4474,7 @@ | |||
4454 | #define HW_LOCK_RESOURCE_GPIO 1 | 4474 | #define HW_LOCK_RESOURCE_GPIO 1 |
4455 | #define HW_LOCK_RESOURCE_MDIO 0 | 4475 | #define HW_LOCK_RESOURCE_MDIO 0 |
4456 | #define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 | 4476 | #define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 |
4477 | #define HW_LOCK_RESOURCE_RESERVED_08 8 | ||
4457 | #define HW_LOCK_RESOURCE_SPIO 2 | 4478 | #define HW_LOCK_RESOURCE_SPIO 2 |
4458 | #define HW_LOCK_RESOURCE_UNDI 5 | 4479 | #define HW_LOCK_RESOURCE_UNDI 5 |
4459 | #define PRS_FLAG_OVERETH_IPV4 1 | 4480 | #define PRS_FLAG_OVERETH_IPV4 1 |
@@ -4474,6 +4495,10 @@ | |||
4474 | #define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (1<<5) | 4495 | #define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (1<<5) |
4475 | #define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (1<<9) | 4496 | #define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (1<<9) |
4476 | #define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (1<<12) | 4497 | #define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (1<<12) |
4498 | #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (1<<28) | ||
4499 | #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (1<<31) | ||
4500 | #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (1<<29) | ||
4501 | #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (1<<30) | ||
4477 | #define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (1<<15) | 4502 | #define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (1<<15) |
4478 | #define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (1<<14) | 4503 | #define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (1<<14) |
4479 | #define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20) | 4504 | #define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20) |