diff options
-rw-r--r-- | drivers/net/Kconfig | 1 | ||||
-rw-r--r-- | drivers/net/bnx2x.h | 739 | ||||
-rw-r--r-- | drivers/net/bnx2x_fw_defs.h | 483 | ||||
-rw-r--r-- | drivers/net/bnx2x_hsi.h | 708 | ||||
-rw-r--r-- | drivers/net/bnx2x_init.h | 20 | ||||
-rw-r--r-- | drivers/net/bnx2x_link.c | 11 | ||||
-rw-r--r-- | drivers/net/bnx2x_main.c | 3875 | ||||
-rw-r--r-- | drivers/net/bnx2x_reg.h | 66 | ||||
-rw-r--r-- | include/linux/pci_ids.h | 2 |
9 files changed, 3752 insertions, 2153 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 0e0f6696ccac..287d0873c60d 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2600,6 +2600,7 @@ config BNX2X | |||
2600 | tristate "Broadcom NetXtremeII 10Gb support" | 2600 | tristate "Broadcom NetXtremeII 10Gb support" |
2601 | depends on PCI | 2601 | depends on PCI |
2602 | select ZLIB_INFLATE | 2602 | select ZLIB_INFLATE |
2603 | select LIBCRC32C | ||
2603 | help | 2604 | help |
2604 | This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards. | 2605 | This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards. |
2605 | To compile this driver as a module, choose M here: the module | 2606 | To compile this driver as a module, choose M here: the module |
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h index 0979ca0ae408..e08b9439a933 100644 --- a/drivers/net/bnx2x.h +++ b/drivers/net/bnx2x.h | |||
@@ -14,39 +14,46 @@ | |||
14 | #ifndef BNX2X_H | 14 | #ifndef BNX2X_H |
15 | #define BNX2X_H | 15 | #define BNX2X_H |
16 | 16 | ||
17 | /* compilation time flags */ | ||
18 | |||
19 | /* define this to make the driver freeze on error to allow getting debug info | ||
20 | * (you will need to reboot afterwards) */ | ||
21 | /* #define BNX2X_STOP_ON_ERROR */ | ||
22 | |||
17 | /* error/debug prints */ | 23 | /* error/debug prints */ |
18 | 24 | ||
19 | #define DRV_MODULE_NAME "bnx2x" | 25 | #define DRV_MODULE_NAME "bnx2x" |
20 | #define PFX DRV_MODULE_NAME ": " | 26 | #define PFX DRV_MODULE_NAME ": " |
21 | 27 | ||
22 | /* for messages that are currently off */ | 28 | /* for messages that are currently off */ |
23 | #define BNX2X_MSG_OFF 0 | 29 | #define BNX2X_MSG_OFF 0 |
24 | #define BNX2X_MSG_MCP 0x10000 /* was: NETIF_MSG_HW */ | 30 | #define BNX2X_MSG_MCP 0x010000 /* was: NETIF_MSG_HW */ |
25 | #define BNX2X_MSG_STATS 0x20000 /* was: NETIF_MSG_TIMER */ | 31 | #define BNX2X_MSG_STATS 0x020000 /* was: NETIF_MSG_TIMER */ |
26 | #define NETIF_MSG_NVM 0x40000 /* was: NETIF_MSG_HW */ | 32 | #define BNX2X_MSG_NVM 0x040000 /* was: NETIF_MSG_HW */ |
27 | #define NETIF_MSG_DMAE 0x80000 /* was: NETIF_MSG_HW */ | 33 | #define BNX2X_MSG_DMAE 0x080000 /* was: NETIF_MSG_HW */ |
28 | #define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */ | 34 | #define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */ |
29 | #define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */ | 35 | #define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */ |
30 | 36 | ||
31 | #define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */ | 37 | #define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */ |
32 | 38 | ||
33 | /* regular debug print */ | 39 | /* regular debug print */ |
34 | #define DP(__mask, __fmt, __args...) do { \ | 40 | #define DP(__mask, __fmt, __args...) do { \ |
35 | if (bp->msglevel & (__mask)) \ | 41 | if (bp->msglevel & (__mask)) \ |
36 | printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __FUNCTION__, \ | 42 | printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ |
37 | __LINE__, bp->dev?(bp->dev->name):"?", ##__args); \ | 43 | bp->dev?(bp->dev->name):"?", ##__args); \ |
38 | } while (0) | 44 | } while (0) |
39 | 45 | ||
40 | /* for errors (never masked) */ | 46 | /* errors debug print */ |
41 | #define BNX2X_ERR(__fmt, __args...) do { \ | 47 | #define BNX2X_DBG_ERR(__fmt, __args...) do { \ |
42 | printk(KERN_ERR "[%s:%d(%s)]" __fmt, __FUNCTION__, \ | 48 | if (bp->msglevel & NETIF_MSG_PROBE) \ |
43 | __LINE__, bp->dev?(bp->dev->name):"?", ##__args); \ | 49 | printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ |
50 | bp->dev?(bp->dev->name):"?", ##__args); \ | ||
44 | } while (0) | 51 | } while (0) |
45 | 52 | ||
46 | /* for logging (never masked) */ | 53 | /* for errors (never masked) */ |
47 | #define BNX2X_LOG(__fmt, __args...) do { \ | 54 | #define BNX2X_ERR(__fmt, __args...) do { \ |
48 | printk(KERN_NOTICE "[%s:%d(%s)]" __fmt, __FUNCTION__, \ | 55 | printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ |
49 | __LINE__, bp->dev?(bp->dev->name):"?", ##__args); \ | 56 | bp->dev?(bp->dev->name):"?", ##__args); \ |
50 | } while (0) | 57 | } while (0) |
51 | 58 | ||
52 | /* before we have a dev->name use dev_info() */ | 59 | /* before we have a dev->name use dev_info() */ |
@@ -60,7 +67,7 @@ | |||
60 | #define bnx2x_panic() do { \ | 67 | #define bnx2x_panic() do { \ |
61 | bp->panic = 1; \ | 68 | bp->panic = 1; \ |
62 | BNX2X_ERR("driver assert\n"); \ | 69 | BNX2X_ERR("driver assert\n"); \ |
63 | bnx2x_disable_int(bp); \ | 70 | bnx2x_int_disable(bp); \ |
64 | bnx2x_panic_dump(bp); \ | 71 | bnx2x_panic_dump(bp); \ |
65 | } while (0) | 72 | } while (0) |
66 | #else | 73 | #else |
@@ -71,24 +78,29 @@ | |||
71 | #endif | 78 | #endif |
72 | 79 | ||
73 | 80 | ||
74 | #define U64_LO(x) (((u64)x) & 0xffffffff) | 81 | #ifdef NETIF_F_HW_VLAN_TX |
75 | #define U64_HI(x) (((u64)x) >> 32) | 82 | #define BCM_VLAN 1 |
76 | #define HILO_U64(hi, lo) (((u64)hi << 32) + lo) | 83 | #endif |
84 | |||
77 | 85 | ||
86 | #define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) | ||
87 | #define U64_HI(x) (u32)(((u64)(x)) >> 32) | ||
88 | #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) | ||
78 | 89 | ||
79 | #define REG_ADDR(bp, offset) (bp->regview + offset) | ||
80 | 90 | ||
81 | #define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) | 91 | #define REG_ADDR(bp, offset) (bp->regview + offset) |
82 | #define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset)) | ||
83 | #define REG_RD64(bp, offset) readq(REG_ADDR(bp, offset)) | ||
84 | 92 | ||
85 | #define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset)) | 93 | #define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) |
94 | #define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset)) | ||
95 | #define REG_RD64(bp, offset) readq(REG_ADDR(bp, offset)) | ||
96 | |||
97 | #define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset)) | ||
86 | #define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset)) | 98 | #define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset)) |
87 | #define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset)) | 99 | #define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset)) |
88 | #define REG_WR32(bp, offset, val) REG_WR(bp, offset, val) | 100 | #define REG_WR32(bp, offset, val) REG_WR(bp, offset, val) |
89 | 101 | ||
90 | #define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset) | 102 | #define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset) |
91 | #define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val) | 103 | #define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val) |
92 | 104 | ||
93 | #define REG_RD_DMAE(bp, offset, valp, len32) \ | 105 | #define REG_RD_DMAE(bp, offset, valp, len32) \ |
94 | do { \ | 106 | do { \ |
@@ -96,28 +108,28 @@ | |||
96 | memcpy(valp, bnx2x_sp(bp, wb_data[0]), len32 * 4); \ | 108 | memcpy(valp, bnx2x_sp(bp, wb_data[0]), len32 * 4); \ |
97 | } while (0) | 109 | } while (0) |
98 | 110 | ||
99 | #define REG_WR_DMAE(bp, offset, val, len32) \ | 111 | #define REG_WR_DMAE(bp, offset, valp, len32) \ |
100 | do { \ | 112 | do { \ |
101 | memcpy(bnx2x_sp(bp, wb_data[0]), val, len32 * 4); \ | 113 | memcpy(bnx2x_sp(bp, wb_data[0]), valp, len32 * 4); \ |
102 | bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \ | 114 | bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \ |
103 | offset, len32); \ | 115 | offset, len32); \ |
104 | } while (0) | 116 | } while (0) |
105 | 117 | ||
106 | #define SHMEM_RD(bp, type) \ | 118 | #define SHMEM_ADDR(bp, field) (bp->common.shmem_base + \ |
107 | REG_RD(bp, bp->shmem_base + offsetof(struct shmem_region, type)) | 119 | offsetof(struct shmem_region, field)) |
108 | #define SHMEM_WR(bp, type, val) \ | 120 | #define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field)) |
109 | REG_WR(bp, bp->shmem_base + offsetof(struct shmem_region, type), val) | 121 | #define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val) |
110 | 122 | ||
111 | #define NIG_WR(reg, val) REG_WR(bp, reg, val) | 123 | #define NIG_WR(reg, val) REG_WR(bp, reg, val) |
112 | #define EMAC_WR(reg, val) REG_WR(bp, emac_base + reg, val) | 124 | #define EMAC_WR(reg, val) REG_WR(bp, emac_base + reg, val) |
113 | #define BMAC_WR(reg, val) REG_WR(bp, GRCBASE_NIG + bmac_addr + reg, val) | 125 | #define BMAC_WR(reg, val) REG_WR(bp, GRCBASE_NIG + bmac_addr + reg, val) |
114 | 126 | ||
115 | 127 | ||
116 | #define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++) | 128 | #define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++) |
117 | 129 | ||
118 | #define for_each_nondefault_queue(bp, var) \ | 130 | #define for_each_nondefault_queue(bp, var) \ |
119 | for (var = 1; var < bp->num_queues; var++) | 131 | for (var = 1; var < bp->num_queues; var++) |
120 | #define is_multi(bp) (bp->num_queues > 1) | 132 | #define is_multi(bp) (bp->num_queues > 1) |
121 | 133 | ||
122 | 134 | ||
123 | struct regp { | 135 | struct regp { |
@@ -358,210 +370,122 @@ struct bnx2x_eth_stats { | |||
358 | u32 number_of_bugs_found_in_stats_spec; /* just kidding */ | 370 | u32 number_of_bugs_found_in_stats_spec; /* just kidding */ |
359 | }; | 371 | }; |
360 | 372 | ||
361 | #define MAC_STX_NA 0xffffffff | ||
362 | |||
363 | #ifdef BNX2X_MULTI | ||
364 | #define MAX_CONTEXT 16 | ||
365 | #else | ||
366 | #define MAX_CONTEXT 1 | ||
367 | #endif | ||
368 | |||
369 | union cdu_context { | ||
370 | struct eth_context eth; | ||
371 | char pad[1024]; | ||
372 | }; | ||
373 | |||
374 | #define MAX_DMAE_C 5 | ||
375 | |||
376 | /* DMA memory not used in fastpath */ | ||
377 | struct bnx2x_slowpath { | ||
378 | union cdu_context context[MAX_CONTEXT]; | ||
379 | struct eth_stats_query fw_stats; | ||
380 | struct mac_configuration_cmd mac_config; | ||
381 | struct mac_configuration_cmd mcast_config; | ||
382 | |||
383 | /* used by dmae command executer */ | ||
384 | struct dmae_command dmae[MAX_DMAE_C]; | ||
385 | |||
386 | union mac_stats mac_stats; | ||
387 | struct nig_stats nig; | ||
388 | struct bnx2x_eth_stats eth_stats; | ||
389 | |||
390 | u32 wb_comp; | ||
391 | #define BNX2X_WB_COMP_VAL 0xe0d0d0ae | ||
392 | u32 wb_data[4]; | ||
393 | }; | ||
394 | |||
395 | #define bnx2x_sp(bp, var) (&bp->slowpath->var) | ||
396 | #define bnx2x_sp_check(bp, var) ((bp->slowpath) ? (&bp->slowpath->var) : NULL) | 373 | #define bnx2x_sp_check(bp, var) ((bp->slowpath) ? (&bp->slowpath->var) : NULL) |
397 | #define bnx2x_sp_mapping(bp, var) \ | ||
398 | (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var)) | ||
399 | |||
400 | |||
401 | struct sw_rx_bd { | 374 | struct sw_rx_bd { |
402 | struct sk_buff *skb; | 375 | struct sk_buff *skb; |
403 | DECLARE_PCI_UNMAP_ADDR(mapping) | 376 | DECLARE_PCI_UNMAP_ADDR(mapping) |
404 | }; | 377 | }; |
405 | 378 | ||
406 | struct sw_tx_bd { | 379 | struct sw_tx_bd { |
407 | struct sk_buff *skb; | 380 | struct sk_buff *skb; |
408 | u16 first_bd; | 381 | u16 first_bd; |
409 | }; | 382 | }; |
410 | 383 | ||
411 | struct bnx2x_fastpath { | 384 | struct bnx2x_fastpath { |
412 | 385 | ||
413 | struct napi_struct napi; | 386 | struct napi_struct napi; |
414 | 387 | ||
415 | struct host_status_block *status_blk; | 388 | struct host_status_block *status_blk; |
416 | dma_addr_t status_blk_mapping; | 389 | dma_addr_t status_blk_mapping; |
417 | 390 | ||
418 | struct eth_tx_db_data *hw_tx_prods; | 391 | struct eth_tx_db_data *hw_tx_prods; |
419 | dma_addr_t tx_prods_mapping; | 392 | dma_addr_t tx_prods_mapping; |
420 | 393 | ||
421 | struct sw_tx_bd *tx_buf_ring; | 394 | struct sw_tx_bd *tx_buf_ring; |
422 | 395 | ||
423 | struct eth_tx_bd *tx_desc_ring; | 396 | struct eth_tx_bd *tx_desc_ring; |
424 | dma_addr_t tx_desc_mapping; | 397 | dma_addr_t tx_desc_mapping; |
425 | 398 | ||
426 | struct sw_rx_bd *rx_buf_ring; | 399 | struct sw_rx_bd *rx_buf_ring; |
427 | 400 | ||
428 | struct eth_rx_bd *rx_desc_ring; | 401 | struct eth_rx_bd *rx_desc_ring; |
429 | dma_addr_t rx_desc_mapping; | 402 | dma_addr_t rx_desc_mapping; |
430 | 403 | ||
431 | union eth_rx_cqe *rx_comp_ring; | 404 | union eth_rx_cqe *rx_comp_ring; |
432 | dma_addr_t rx_comp_mapping; | 405 | dma_addr_t rx_comp_mapping; |
433 | 406 | ||
434 | int state; | 407 | int state; |
435 | #define BNX2X_FP_STATE_CLOSED 0 | 408 | #define BNX2X_FP_STATE_CLOSED 0 |
436 | #define BNX2X_FP_STATE_IRQ 0x80000 | 409 | #define BNX2X_FP_STATE_IRQ 0x80000 |
437 | #define BNX2X_FP_STATE_OPENING 0x90000 | 410 | #define BNX2X_FP_STATE_OPENING 0x90000 |
438 | #define BNX2X_FP_STATE_OPEN 0xa0000 | 411 | #define BNX2X_FP_STATE_OPEN 0xa0000 |
439 | #define BNX2X_FP_STATE_HALTING 0xb0000 | 412 | #define BNX2X_FP_STATE_HALTING 0xb0000 |
440 | #define BNX2X_FP_STATE_HALTED 0xc0000 | 413 | #define BNX2X_FP_STATE_HALTED 0xc0000 |
441 | 414 | ||
442 | int index; | 415 | u8 index; /* number in fp array */ |
443 | 416 | u8 cl_id; /* eth client id */ | |
444 | u16 tx_pkt_prod; | 417 | u8 sb_id; /* status block number in HW */ |
445 | u16 tx_pkt_cons; | 418 | #define FP_IDX(fp) (fp->index) |
446 | u16 tx_bd_prod; | 419 | #define FP_CL_ID(fp) (fp->cl_id) |
447 | u16 tx_bd_cons; | 420 | #define BP_CL_ID(bp) (bp->fp[0].cl_id) |
448 | u16 *tx_cons_sb; | 421 | #define FP_SB_ID(fp) (fp->sb_id) |
449 | 422 | #define CNIC_SB_ID 0 | |
450 | u16 fp_c_idx; | 423 | |
451 | u16 fp_u_idx; | 424 | u16 tx_pkt_prod; |
452 | 425 | u16 tx_pkt_cons; | |
453 | u16 rx_bd_prod; | 426 | u16 tx_bd_prod; |
454 | u16 rx_bd_cons; | 427 | u16 tx_bd_cons; |
455 | u16 rx_comp_prod; | 428 | u16 *tx_cons_sb; |
456 | u16 rx_comp_cons; | 429 | |
457 | u16 *rx_cons_sb; | 430 | u16 fp_c_idx; |
458 | 431 | u16 fp_u_idx; | |
459 | unsigned long tx_pkt, | 432 | |
433 | u16 rx_bd_prod; | ||
434 | u16 rx_bd_cons; | ||
435 | u16 rx_comp_prod; | ||
436 | u16 rx_comp_cons; | ||
437 | u16 *rx_cons_sb; | ||
438 | |||
439 | unsigned long tx_pkt, | ||
460 | rx_pkt, | 440 | rx_pkt, |
461 | rx_calls; | 441 | rx_calls; |
462 | 442 | ||
463 | struct bnx2x *bp; /* parent */ | 443 | struct bnx2x *bp; /* parent */ |
464 | }; | ||
465 | |||
466 | #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) | ||
467 | |||
468 | |||
469 | /* attn group wiring */ | ||
470 | #define MAX_DYNAMIC_ATTN_GRPS 8 | ||
471 | |||
472 | struct attn_route { | ||
473 | u32 sig[4]; | ||
474 | }; | 444 | }; |
475 | 445 | ||
476 | struct bnx2x { | 446 | #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) |
477 | /* Fields used in the tx and intr/napi performance paths | 447 | /* This is needed for determening of last_max */ |
478 | * are grouped together in the beginning of the structure | 448 | #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) |
479 | */ | ||
480 | struct bnx2x_fastpath *fp; | ||
481 | void __iomem *regview; | ||
482 | void __iomem *doorbells; | ||
483 | |||
484 | struct net_device *dev; | ||
485 | struct pci_dev *pdev; | ||
486 | |||
487 | atomic_t intr_sem; | ||
488 | struct msix_entry msix_table[MAX_CONTEXT+1]; | ||
489 | |||
490 | int tx_ring_size; | ||
491 | 449 | ||
492 | #ifdef BCM_VLAN | 450 | /* stuff added to make the code fit 80Col */ |
493 | struct vlan_group *vlgrp; | ||
494 | #endif | ||
495 | |||
496 | u32 rx_csum; | ||
497 | u32 rx_offset; | ||
498 | u32 rx_buf_use_size; /* useable size */ | ||
499 | u32 rx_buf_size; /* with alignment */ | ||
500 | #define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */ | ||
501 | #define ETH_MIN_PACKET_SIZE 60 | ||
502 | #define ETH_MAX_PACKET_SIZE 1500 | ||
503 | #define ETH_MAX_JUMBO_PACKET_SIZE 9600 | ||
504 | 451 | ||
505 | struct host_def_status_block *def_status_blk; | 452 | #define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) |
506 | #define DEF_SB_ID 16 | ||
507 | u16 def_c_idx; | ||
508 | u16 def_u_idx; | ||
509 | u16 def_t_idx; | ||
510 | u16 def_x_idx; | ||
511 | u16 def_att_idx; | ||
512 | u32 attn_state; | ||
513 | struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; | ||
514 | u32 aeu_mask; | ||
515 | u32 nig_mask; | ||
516 | 453 | ||
517 | /* slow path ring */ | 454 | #define ETH_RX_ERROR_FALGS (ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | \ |
518 | struct eth_spe *spq; | 455 | ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | \ |
519 | dma_addr_t spq_mapping; | 456 | ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG) |
520 | u16 spq_prod_idx; | ||
521 | struct eth_spe *spq_prod_bd; | ||
522 | struct eth_spe *spq_last_bd; | ||
523 | u16 *dsb_sp_prod; | ||
524 | u16 spq_left; /* serialize spq */ | ||
525 | spinlock_t spq_lock; | ||
526 | |||
527 | /* Flag for marking that there is either | ||
528 | * STAT_QUERY or CFC DELETE ramrod pending | ||
529 | */ | ||
530 | u8 stat_pending; | ||
531 | 457 | ||
532 | /* End of fields used in the performance code paths */ | ||
533 | 458 | ||
534 | int panic; | 459 | #define U_SB_ETH_RX_CQ_INDEX HC_INDEX_U_ETH_RX_CQ_CONS |
535 | int msglevel; | 460 | #define U_SB_ETH_RX_BD_INDEX HC_INDEX_U_ETH_RX_BD_CONS |
461 | #define C_SB_ETH_TX_CQ_INDEX HC_INDEX_C_ETH_TX_CQ_CONS | ||
536 | 462 | ||
537 | u32 flags; | 463 | #define BNX2X_RX_SB_INDEX \ |
538 | #define PCIX_FLAG 1 | 464 | (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_CQ_INDEX]) |
539 | #define PCI_32BIT_FLAG 2 | ||
540 | #define ONE_TDMA_FLAG 4 /* no longer used */ | ||
541 | #define NO_WOL_FLAG 8 | ||
542 | #define USING_DAC_FLAG 0x10 | ||
543 | #define USING_MSIX_FLAG 0x20 | ||
544 | #define ASF_ENABLE_FLAG 0x40 | ||
545 | 465 | ||
546 | int port; | 466 | #define BNX2X_RX_SB_BD_INDEX \ |
467 | (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_BD_INDEX]) | ||
547 | 468 | ||
548 | int pm_cap; | 469 | #define BNX2X_RX_SB_INDEX_NUM \ |
549 | int pcie_cap; | 470 | (((U_SB_ETH_RX_CQ_INDEX << \ |
471 | USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \ | ||
472 | USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER) | \ | ||
473 | ((U_SB_ETH_RX_BD_INDEX << \ | ||
474 | USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT) & \ | ||
475 | USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER)) | ||
550 | 476 | ||
551 | struct work_struct sp_task; | 477 | #define BNX2X_TX_SB_INDEX \ |
552 | struct work_struct reset_task; | 478 | (&fp->status_blk->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX]) |
553 | 479 | ||
554 | struct timer_list timer; | 480 | /* common */ |
555 | int timer_interval; | ||
556 | int current_interval; | ||
557 | 481 | ||
558 | u32 shmem_base; | 482 | struct bnx2x_common { |
559 | 483 | ||
560 | u32 chip_id; | 484 | u32 chip_id; |
561 | /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ | 485 | /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ |
562 | #define CHIP_ID(bp) (bp->chip_id & 0xfffffff0) | 486 | #define CHIP_ID(bp) (bp->common.chip_id & 0xfffffff0) |
563 | 487 | ||
564 | #define CHIP_NUM(bp) (bp->chip_id >> 16) | 488 | #define CHIP_NUM(bp) (bp->common.chip_id >> 16) |
565 | #define CHIP_NUM_57710 0x164e | 489 | #define CHIP_NUM_57710 0x164e |
566 | #define CHIP_NUM_57711 0x164f | 490 | #define CHIP_NUM_57711 0x164f |
567 | #define CHIP_NUM_57711E 0x1650 | 491 | #define CHIP_NUM_57711E 0x1650 |
@@ -572,7 +496,7 @@ struct bnx2x { | |||
572 | CHIP_IS_57711E(bp)) | 496 | CHIP_IS_57711E(bp)) |
573 | #define IS_E1H_OFFSET CHIP_IS_E1H(bp) | 497 | #define IS_E1H_OFFSET CHIP_IS_E1H(bp) |
574 | 498 | ||
575 | #define CHIP_REV(bp) (bp->chip_id & 0x0000f000) | 499 | #define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000) |
576 | #define CHIP_REV_Ax 0x00000000 | 500 | #define CHIP_REV_Ax 0x00000000 |
577 | /* assume maximum 5 revisions */ | 501 | /* assume maximum 5 revisions */ |
578 | #define CHIP_REV_IS_SLOW(bp) (CHIP_REV(bp) > 0x00005000) | 502 | #define CHIP_REV_IS_SLOW(bp) (CHIP_REV(bp) > 0x00005000) |
@@ -586,86 +510,250 @@ struct bnx2x { | |||
586 | #define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \ | 510 | #define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \ |
587 | ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1)) | 511 | ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1)) |
588 | 512 | ||
589 | #define CHIP_METAL(bp) (bp->chip_id & 0x00000ff0) | 513 | #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) |
590 | #define CHIP_BOND_ID(bp) (bp->chip_id & 0x0000000f) | 514 | #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) |
591 | 515 | ||
592 | u16 fw_seq; | 516 | int flash_size; |
593 | u16 fw_drv_pulse_wr_seq; | 517 | #define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ |
594 | u32 fw_mb; | 518 | #define NVRAM_TIMEOUT_COUNT 30000 |
519 | #define NVRAM_PAGE_SIZE 256 | ||
595 | 520 | ||
596 | u32 hw_config; | 521 | u32 shmem_base; |
522 | |||
523 | u32 hw_config; | ||
597 | u32 board; | 524 | u32 board; |
598 | 525 | ||
599 | struct link_params link_params; | 526 | u32 bc_ver; |
527 | |||
528 | char *name; | ||
529 | }; | ||
600 | 530 | ||
601 | struct link_vars link_vars; | 531 | |
532 | /* end of common */ | ||
533 | |||
534 | /* port */ | ||
535 | |||
536 | struct bnx2x_port { | ||
537 | u32 pmf; | ||
602 | 538 | ||
603 | u32 link_config; | 539 | u32 link_config; |
604 | 540 | ||
605 | u32 supported; | 541 | u32 supported; |
542 | /* link settings - missing defines */ | ||
543 | #define SUPPORTED_2500baseX_Full (1 << 15) | ||
544 | |||
545 | u32 advertising; | ||
606 | /* link settings - missing defines */ | 546 | /* link settings - missing defines */ |
607 | #define SUPPORTED_2500baseT_Full (1 << 15) | 547 | #define ADVERTISED_2500baseX_Full (1 << 15) |
608 | 548 | ||
609 | u32 phy_addr; | 549 | u32 phy_addr; |
610 | 550 | ||
611 | /* used to synchronize phy accesses */ | 551 | /* used to synchronize phy accesses */ |
612 | struct mutex phy_mutex; | 552 | struct mutex phy_mutex; |
613 | 553 | ||
614 | u32 phy_id; | 554 | u32 port_stx; |
615 | 555 | ||
556 | struct nig_stats old_nig_stats; | ||
557 | }; | ||
616 | 558 | ||
617 | u32 advertising; | 559 | /* end of port */ |
618 | /* link settings - missing defines */ | 560 | |
619 | #define ADVERTISED_2500baseT_Full (1 << 15) | 561 | #define MAC_STX_NA 0xffffffff |
562 | |||
563 | #ifdef BNX2X_MULTI | ||
564 | #define MAX_CONTEXT 16 | ||
565 | #else | ||
566 | #define MAX_CONTEXT 1 | ||
567 | #endif | ||
568 | |||
569 | union cdu_context { | ||
570 | struct eth_context eth; | ||
571 | char pad[1024]; | ||
572 | }; | ||
573 | |||
574 | #define MAX_DMAE_C 6 | ||
575 | |||
576 | /* DMA memory not used in fastpath */ | ||
577 | struct bnx2x_slowpath { | ||
578 | union cdu_context context[MAX_CONTEXT]; | ||
579 | struct eth_stats_query fw_stats; | ||
580 | struct mac_configuration_cmd mac_config; | ||
581 | struct mac_configuration_cmd mcast_config; | ||
582 | |||
583 | /* used by dmae command executer */ | ||
584 | struct dmae_command dmae[MAX_DMAE_C]; | ||
585 | |||
586 | union mac_stats mac_stats; | ||
587 | struct nig_stats nig; | ||
588 | struct bnx2x_eth_stats eth_stats; | ||
589 | |||
590 | u32 wb_comp; | ||
591 | #define BNX2X_WB_COMP_VAL 0xe0d0d0ae | ||
592 | u32 wb_data[4]; | ||
593 | }; | ||
594 | |||
595 | #define bnx2x_sp(bp, var) (&bp->slowpath->var) | ||
596 | #define bnx2x_sp_mapping(bp, var) \ | ||
597 | (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var)) | ||
598 | |||
599 | |||
600 | /* attn group wiring */ | ||
601 | #define MAX_DYNAMIC_ATTN_GRPS 8 | ||
602 | |||
603 | struct attn_route { | ||
604 | u32 sig[4]; | ||
605 | }; | ||
606 | |||
607 | struct bnx2x { | ||
608 | /* Fields used in the tx and intr/napi performance paths | ||
609 | * are grouped together in the beginning of the structure | ||
610 | */ | ||
611 | struct bnx2x_fastpath fp[MAX_CONTEXT]; | ||
612 | void __iomem *regview; | ||
613 | void __iomem *doorbells; | ||
614 | #define BNX2X_DB_SIZE (16*2048) | ||
615 | |||
616 | struct net_device *dev; | ||
617 | struct pci_dev *pdev; | ||
618 | |||
619 | atomic_t intr_sem; | ||
620 | struct msix_entry msix_table[MAX_CONTEXT+1]; | ||
621 | |||
622 | int tx_ring_size; | ||
623 | |||
624 | #ifdef BCM_VLAN | ||
625 | struct vlan_group *vlgrp; | ||
626 | #endif | ||
620 | 627 | ||
628 | u32 rx_csum; | ||
629 | u32 rx_offset; | ||
630 | u32 rx_buf_use_size; /* useable size */ | ||
631 | u32 rx_buf_size; /* with alignment */ | ||
632 | #define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */ | ||
633 | #define ETH_MIN_PACKET_SIZE 60 | ||
634 | #define ETH_MAX_PACKET_SIZE 1500 | ||
635 | #define ETH_MAX_JUMBO_PACKET_SIZE 9600 | ||
621 | 636 | ||
622 | u32 bc_ver; | 637 | struct host_def_status_block *def_status_blk; |
638 | #define DEF_SB_ID 16 | ||
639 | u16 def_c_idx; | ||
640 | u16 def_u_idx; | ||
641 | u16 def_x_idx; | ||
642 | u16 def_t_idx; | ||
643 | u16 def_att_idx; | ||
644 | u32 attn_state; | ||
645 | struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; | ||
646 | u32 aeu_mask; | ||
647 | u32 nig_mask; | ||
648 | |||
649 | /* slow path ring */ | ||
650 | struct eth_spe *spq; | ||
651 | dma_addr_t spq_mapping; | ||
652 | u16 spq_prod_idx; | ||
653 | struct eth_spe *spq_prod_bd; | ||
654 | struct eth_spe *spq_last_bd; | ||
655 | u16 *dsb_sp_prod; | ||
656 | u16 spq_left; /* serialize spq */ | ||
657 | /* used to synchronize spq accesses */ | ||
658 | spinlock_t spq_lock; | ||
659 | |||
660 | /* Flag for marking that there is either | ||
661 | * STAT_QUERY or CFC DELETE ramrod pending | ||
662 | */ | ||
663 | u8 stat_pending; | ||
664 | |||
665 | /* End of fileds used in the performance code paths */ | ||
666 | |||
667 | int panic; | ||
668 | int msglevel; | ||
669 | |||
670 | u32 flags; | ||
671 | #define PCIX_FLAG 1 | ||
672 | #define PCI_32BIT_FLAG 2 | ||
673 | #define ONE_TDMA_FLAG 4 /* no longer used */ | ||
674 | #define NO_WOL_FLAG 8 | ||
675 | #define USING_DAC_FLAG 0x10 | ||
676 | #define USING_MSIX_FLAG 0x20 | ||
677 | #define ASF_ENABLE_FLAG 0x40 | ||
678 | #define NO_MCP_FLAG 0x100 | ||
679 | #define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) | ||
680 | |||
681 | int func; | ||
682 | #define BP_PORT(bp) (bp->func % PORT_MAX) | ||
683 | #define BP_FUNC(bp) (bp->func) | ||
684 | #define BP_E1HVN(bp) (bp->func >> 1) | ||
685 | #define BP_L_ID(bp) (BP_E1HVN(bp) << 2) | ||
686 | /* assorted E1HVN */ | ||
687 | #define IS_E1HMF(bp) (bp->e1hmf != 0) | ||
688 | #define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16) | ||
689 | |||
690 | int pm_cap; | ||
691 | int pcie_cap; | ||
692 | |||
693 | struct work_struct sp_task; | ||
694 | struct work_struct reset_task; | ||
695 | |||
696 | struct timer_list timer; | ||
697 | int timer_interval; | ||
698 | int current_interval; | ||
699 | |||
700 | u16 fw_seq; | ||
701 | u16 fw_drv_pulse_wr_seq; | ||
702 | u32 func_stx; | ||
703 | |||
704 | struct link_params link_params; | ||
705 | struct link_vars link_vars; | ||
623 | 706 | ||
624 | int flash_size; | 707 | struct bnx2x_common common; |
625 | #define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ | 708 | struct bnx2x_port port; |
626 | #define NVRAM_TIMEOUT_COUNT 30000 | 709 | |
627 | #define NVRAM_PAGE_SIZE 256 | 710 | u32 mf_config; |
711 | u16 e1hov; | ||
712 | u8 e1hmf; | ||
628 | 713 | ||
629 | u8 wol; | 714 | u8 wol; |
630 | 715 | ||
631 | int rx_ring_size; | 716 | int rx_ring_size; |
632 | 717 | ||
633 | u16 tx_quick_cons_trip_int; | 718 | u16 tx_quick_cons_trip_int; |
634 | u16 tx_quick_cons_trip; | 719 | u16 tx_quick_cons_trip; |
635 | u16 tx_ticks_int; | 720 | u16 tx_ticks_int; |
636 | u16 tx_ticks; | 721 | u16 tx_ticks; |
637 | 722 | ||
638 | u16 rx_quick_cons_trip_int; | 723 | u16 rx_quick_cons_trip_int; |
639 | u16 rx_quick_cons_trip; | 724 | u16 rx_quick_cons_trip; |
640 | u16 rx_ticks_int; | 725 | u16 rx_ticks_int; |
641 | u16 rx_ticks; | 726 | u16 rx_ticks; |
642 | 727 | ||
643 | u32 stats_ticks; | 728 | u32 stats_ticks; |
729 | u32 lin_cnt; | ||
644 | 730 | ||
645 | int state; | 731 | int state; |
646 | #define BNX2X_STATE_CLOSED 0x0 | 732 | #define BNX2X_STATE_CLOSED 0x0 |
647 | #define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000 | 733 | #define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000 |
648 | #define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000 | 734 | #define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000 |
649 | #define BNX2X_STATE_OPEN 0x3000 | 735 | #define BNX2X_STATE_OPEN 0x3000 |
650 | #define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 | 736 | #define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 |
651 | #define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 | 737 | #define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 |
652 | #define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000 | 738 | #define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000 |
653 | #define BNX2X_STATE_ERROR 0xF000 | 739 | #define BNX2X_STATE_DISABLED 0xd000 |
740 | #define BNX2X_STATE_DIAG 0xe000 | ||
741 | #define BNX2X_STATE_ERROR 0xf000 | ||
654 | 742 | ||
655 | int num_queues; | 743 | int num_queues; |
656 | 744 | ||
657 | u32 rx_mode; | 745 | u32 rx_mode; |
658 | #define BNX2X_RX_MODE_NONE 0 | 746 | #define BNX2X_RX_MODE_NONE 0 |
659 | #define BNX2X_RX_MODE_NORMAL 1 | 747 | #define BNX2X_RX_MODE_NORMAL 1 |
660 | #define BNX2X_RX_MODE_ALLMULTI 2 | 748 | #define BNX2X_RX_MODE_ALLMULTI 2 |
661 | #define BNX2X_RX_MODE_PROMISC 3 | 749 | #define BNX2X_RX_MODE_PROMISC 3 |
662 | #define BNX2X_MAX_MULTICAST 64 | 750 | #define BNX2X_MAX_MULTICAST 64 |
663 | #define BNX2X_MAX_EMUL_MULTI 16 | 751 | #define BNX2X_MAX_EMUL_MULTI 16 |
664 | 752 | ||
665 | dma_addr_t def_status_blk_mapping; | 753 | dma_addr_t def_status_blk_mapping; |
666 | 754 | ||
667 | struct bnx2x_slowpath *slowpath; | 755 | struct bnx2x_slowpath *slowpath; |
668 | dma_addr_t slowpath_mapping; | 756 | dma_addr_t slowpath_mapping; |
669 | 757 | ||
670 | #ifdef BCM_ISCSI | 758 | #ifdef BCM_ISCSI |
671 | void *t1; | 759 | void *t1; |
@@ -742,8 +830,10 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode); | |||
742 | 830 | ||
743 | /* MC hsi */ | 831 | /* MC hsi */ |
744 | #define RX_COPY_THRESH 92 | 832 | #define RX_COPY_THRESH 92 |
745 | #define BCM_PAGE_BITS 12 | 833 | #define BCM_PAGE_SHIFT 12 |
746 | #define BCM_PAGE_SIZE (1 << BCM_PAGE_BITS) | 834 | #define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT) |
835 | #define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1)) | ||
836 | #define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK) | ||
747 | 837 | ||
748 | #define NUM_TX_RINGS 16 | 838 | #define NUM_TX_RINGS 16 |
749 | #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_tx_bd)) | 839 | #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_tx_bd)) |
@@ -795,26 +885,11 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode); | |||
795 | 885 | ||
796 | 886 | ||
797 | /* must be used on a CID before placing it on a HW ring */ | 887 | /* must be used on a CID before placing it on a HW ring */ |
798 | #define HW_CID(bp, x) (x | (bp->port << 23)) | 888 | #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | (BP_E1HVN(bp) << 17) | x) |
799 | 889 | ||
800 | #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) | 890 | #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) |
801 | #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) | 891 | #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) |
802 | 892 | ||
803 | #define ATTN_NIG_FOR_FUNC (1L << 8) | ||
804 | #define ATTN_SW_TIMER_4_FUNC (1L << 9) | ||
805 | #define GPIO_2_FUNC (1L << 10) | ||
806 | #define GPIO_3_FUNC (1L << 11) | ||
807 | #define GPIO_4_FUNC (1L << 12) | ||
808 | #define ATTN_GENERAL_ATTN_1 (1L << 13) | ||
809 | #define ATTN_GENERAL_ATTN_2 (1L << 14) | ||
810 | #define ATTN_GENERAL_ATTN_3 (1L << 15) | ||
811 | #define ATTN_GENERAL_ATTN_4 (1L << 13) | ||
812 | #define ATTN_GENERAL_ATTN_5 (1L << 14) | ||
813 | #define ATTN_GENERAL_ATTN_6 (1L << 15) | ||
814 | |||
815 | #define ATTN_HARD_WIRED_MASK 0xff00 | ||
816 | #define ATTENTION_ID 4 | ||
817 | |||
818 | 893 | ||
819 | #define BNX2X_BTR 3 | 894 | #define BNX2X_BTR 3 |
820 | #define MAX_SPQ_PENDING 8 | 895 | #define MAX_SPQ_PENDING 8 |
@@ -831,6 +906,31 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode); | |||
831 | DPM_TRIGER_TYPE); \ | 906 | DPM_TRIGER_TYPE); \ |
832 | } while (0) | 907 | } while (0) |
833 | 908 | ||
909 | static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | ||
910 | int wait) | ||
911 | { | ||
912 | u32 val; | ||
913 | |||
914 | do { | ||
915 | val = REG_RD(bp, reg); | ||
916 | if (val == expected) | ||
917 | break; | ||
918 | ms -= wait; | ||
919 | msleep(wait); | ||
920 | |||
921 | } while (ms > 0); | ||
922 | |||
923 | return val; | ||
924 | } | ||
925 | |||
926 | |||
927 | /* load/unload mode */ | ||
928 | #define LOAD_NORMAL 0 | ||
929 | #define LOAD_OPEN 1 | ||
930 | #define LOAD_DIAG 2 | ||
931 | #define UNLOAD_NORMAL 0 | ||
932 | #define UNLOAD_CLOSE 1 | ||
933 | |||
834 | /* DMAE command defines */ | 934 | /* DMAE command defines */ |
835 | #define DMAE_CMD_SRC_PCI 0 | 935 | #define DMAE_CMD_SRC_PCI 0 |
836 | #define DMAE_CMD_SRC_GRC DMAE_COMMAND_SRC | 936 | #define DMAE_CMD_SRC_GRC DMAE_COMMAND_SRC |
@@ -877,23 +977,48 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode); | |||
877 | 977 | ||
878 | #define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff) | 978 | #define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff) |
879 | 979 | ||
880 | /* stuff added to make the code fit 80Col */ | 980 | /* must be used on a CID before placing it on a HW ring */ |
881 | 981 | ||
882 | #define TPA_TYPE_START ETH_FAST_PATH_RX_CQE_START_FLG | ||
883 | #define TPA_TYPE_END ETH_FAST_PATH_RX_CQE_END_FLG | ||
884 | #define TPA_TYPE(cqe) (cqe->fast_path_cqe.error_type_flags & \ | ||
885 | (TPA_TYPE_START | TPA_TYPE_END)) | ||
886 | #define BNX2X_RX_SUM_OK(cqe) \ | 982 | #define BNX2X_RX_SUM_OK(cqe) \ |
887 | (!(cqe->fast_path_cqe.status_flags & \ | 983 | (!(cqe->fast_path_cqe.status_flags & \ |
888 | (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | \ | 984 | (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | \ |
889 | ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))) | 985 | ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))) |
890 | 986 | ||
891 | #define BNX2X_RX_SUM_FIX(cqe) \ | 987 | /* CMNG constants |
892 | ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \ | 988 | derived from lab experiments, and not from system spec calculations !!! */ |
893 | PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \ | 989 | #define DEF_MIN_RATE 100 |
894 | (1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT)) | 990 | /* resolution of the rate shaping timer - 100 usec */ |
991 | #define RS_PERIODIC_TIMEOUT_USEC 100 | ||
992 | /* resolution of fairness algorithm in usecs - | ||
993 | coefficient for clauclating the actuall t fair */ | ||
994 | #define T_FAIR_COEF 10000000 | ||
995 | /* number of bytes in single QM arbitration cycle - | ||
996 | coeffiecnt for calculating the fairness timer */ | ||
997 | #define QM_ARB_BYTES 40000 | ||
998 | #define FAIR_MEM 2 | ||
999 | |||
1000 | |||
1001 | #define ATTN_NIG_FOR_FUNC (1L << 8) | ||
1002 | #define ATTN_SW_TIMER_4_FUNC (1L << 9) | ||
1003 | #define GPIO_2_FUNC (1L << 10) | ||
1004 | #define GPIO_3_FUNC (1L << 11) | ||
1005 | #define GPIO_4_FUNC (1L << 12) | ||
1006 | #define ATTN_GENERAL_ATTN_1 (1L << 13) | ||
1007 | #define ATTN_GENERAL_ATTN_2 (1L << 14) | ||
1008 | #define ATTN_GENERAL_ATTN_3 (1L << 15) | ||
1009 | #define ATTN_GENERAL_ATTN_4 (1L << 13) | ||
1010 | #define ATTN_GENERAL_ATTN_5 (1L << 14) | ||
1011 | #define ATTN_GENERAL_ATTN_6 (1L << 15) | ||
1012 | |||
1013 | #define ATTN_HARD_WIRED_MASK 0xff00 | ||
1014 | #define ATTENTION_ID 4 | ||
895 | 1015 | ||
896 | 1016 | ||
1017 | /* stuff added to make the code fit 80Col */ | ||
1018 | |||
1019 | #define BNX2X_PMF_LINK_ASSERT \ | ||
1020 | GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp)) | ||
1021 | |||
897 | #define BNX2X_MC_ASSERT_BITS \ | 1022 | #define BNX2X_MC_ASSERT_BITS \ |
898 | (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ | 1023 | (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ |
899 | GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \ | 1024 | GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \ |
@@ -906,12 +1031,20 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode); | |||
906 | #define BNX2X_DOORQ_ASSERT \ | 1031 | #define BNX2X_DOORQ_ASSERT \ |
907 | AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT | 1032 | AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT |
908 | 1033 | ||
1034 | #define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC) | ||
1035 | #define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \ | ||
1036 | GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \ | ||
1037 | GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \ | ||
1038 | GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \ | ||
1039 | GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \ | ||
1040 | GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC)) | ||
1041 | |||
909 | #define HW_INTERRUT_ASSERT_SET_0 \ | 1042 | #define HW_INTERRUT_ASSERT_SET_0 \ |
910 | (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ | 1043 | (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ |
911 | AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ | 1044 | AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ |
912 | AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ | 1045 | AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ |
913 | AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT) | 1046 | AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT) |
914 | #define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \ | 1047 | #define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \ |
915 | AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \ | 1048 | AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \ |
916 | AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \ | 1049 | AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \ |
917 | AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\ | 1050 | AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\ |
@@ -928,7 +1061,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode); | |||
928 | AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \ | 1061 | AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \ |
929 | AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \ | 1062 | AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \ |
930 | AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT) | 1063 | AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT) |
931 | #define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR |\ | 1064 | #define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR |\ |
932 | AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \ | 1065 | AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \ |
933 | AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \ | 1066 | AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \ |
934 | AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \ | 1067 | AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \ |
@@ -945,7 +1078,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode); | |||
945 | AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \ | 1078 | AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \ |
946 | AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\ | 1079 | AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\ |
947 | AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT) | 1080 | AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT) |
948 | #define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \ | 1081 | #define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \ |
949 | AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \ | 1082 | AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \ |
950 | AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\ | 1083 | AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\ |
951 | AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \ | 1084 | AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \ |
@@ -954,42 +1087,44 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode); | |||
954 | AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) | 1087 | AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) |
955 | 1088 | ||
956 | 1089 | ||
957 | #define ETH_RX_ERROR_FALGS (ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | \ | ||
958 | ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | \ | ||
959 | ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG) | ||
960 | |||
961 | |||
962 | #define MULTI_FLAGS \ | 1090 | #define MULTI_FLAGS \ |
963 | (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ | 1091 | (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ |
964 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \ | 1092 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \ |
965 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \ | 1093 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \ |
966 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \ | 1094 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \ |
967 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_ENABLE) | 1095 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_ENABLE) |
968 | 1096 | ||
969 | #define MULTI_MASK 0x7f | 1097 | #define MULTI_MASK 0x7f |
970 | 1098 | ||
971 | 1099 | ||
972 | #define U_SB_ETH_RX_CQ_INDEX HC_INDEX_U_ETH_RX_CQ_CONS | 1100 | #define DEF_USB_FUNC_OFF (2 + 2*HC_USTORM_DEF_SB_NUM_INDICES) |
973 | #define C_SB_ETH_TX_CQ_INDEX HC_INDEX_C_ETH_TX_CQ_CONS | 1101 | #define DEF_CSB_FUNC_OFF (2 + 2*HC_CSTORM_DEF_SB_NUM_INDICES) |
974 | #define C_DEF_SB_SP_INDEX HC_INDEX_DEF_C_ETH_SLOW_PATH | 1102 | #define DEF_XSB_FUNC_OFF (2 + 2*HC_XSTORM_DEF_SB_NUM_INDICES) |
975 | 1103 | #define DEF_TSB_FUNC_OFF (2 + 2*HC_TSTORM_DEF_SB_NUM_INDICES) | |
976 | #define BNX2X_RX_SB_INDEX \ | ||
977 | &fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_CQ_INDEX] | ||
978 | 1104 | ||
979 | #define BNX2X_TX_SB_INDEX \ | 1105 | #define C_DEF_SB_SP_INDEX HC_INDEX_DEF_C_ETH_SLOW_PATH |
980 | &fp->status_blk->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX] | ||
981 | 1106 | ||
982 | #define BNX2X_SP_DSB_INDEX \ | 1107 | #define BNX2X_SP_DSB_INDEX \ |
983 | &bp->def_status_blk->c_def_status_block.index_values[C_DEF_SB_SP_INDEX] | 1108 | (&bp->def_status_blk->c_def_status_block.index_values[C_DEF_SB_SP_INDEX]) |
984 | 1109 | ||
985 | 1110 | ||
986 | #define CAM_IS_INVALID(x) \ | 1111 | #define CAM_IS_INVALID(x) \ |
987 | (x.target_table_entry.flags == TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE) | 1112 | (x.target_table_entry.flags == TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE) |
988 | 1113 | ||
989 | #define CAM_INVALIDATE(x) \ | 1114 | #define CAM_INVALIDATE(x) \ |
990 | x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE | 1115 | (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE) |
1116 | |||
1117 | |||
1118 | /* Number of u32 elements in MC hash array */ | ||
1119 | #define MC_HASH_SIZE 8 | ||
1120 | #define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \ | ||
1121 | TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4) | ||
991 | 1122 | ||
992 | 1123 | ||
1124 | #ifndef PXP2_REG_PXP2_INT_STS | ||
1125 | #define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0 | ||
1126 | #endif | ||
1127 | |||
993 | /* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */ | 1128 | /* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */ |
994 | 1129 | ||
995 | #endif /* bnx2x.h */ | 1130 | #endif /* bnx2x.h */ |
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x_fw_defs.h index 3b968904ca65..e3da7f69d27b 100644 --- a/drivers/net/bnx2x_fw_defs.h +++ b/drivers/net/bnx2x_fw_defs.h | |||
@@ -8,191 +8,390 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | 10 | ||
11 | #define CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index)\ | 11 | #define CSTORM_ASSERT_LIST_INDEX_OFFSET \ |
12 | (0x1922 + (port * 0x40) + (index * 0x4)) | 12 | (IS_E1H_OFFSET? 0x7000 : 0x1000) |
13 | #define CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)\ | 13 | #define CSTORM_ASSERT_LIST_OFFSET(idx) \ |
14 | (0x1900 + (port * 0x40)) | 14 | (IS_E1H_OFFSET? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) |
15 | #define CSTORM_HC_BTR_OFFSET(port)\ | 15 | #define CSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ |
16 | (0x1984 + (port * 0xc0)) | 16 | (IS_E1H_OFFSET? (0x8522 + ((function>>1) * 0x40) + ((function&1) \ |
17 | #define CSTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index)\ | 17 | * 0x100) + (index * 0x4)) : (0x1922 + (function * 0x40) + (index \ |
18 | (0x141a + (port * 0x280) + (cpu_id * 0x28) + (index * 0x4)) | 18 | * 0x4))) |
19 | #define CSTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index)\ | 19 | #define CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ |
20 | (0x1418 + (port * 0x280) + (cpu_id * 0x28) + (index * 0x4)) | 20 | (IS_E1H_OFFSET? (0x8500 + ((function>>1) * 0x40) + ((function&1) \ |
21 | #define CSTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id)\ | 21 | * 0x100)) : (0x1900 + (function * 0x40))) |
22 | (0x1400 + (port * 0x280) + (cpu_id * 0x28)) | 22 | #define CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ |
23 | #define CSTORM_STATS_FLAGS_OFFSET(port) (0x5108 + (port * 0x8)) | 23 | (IS_E1H_OFFSET? (0x8508 + ((function>>1) * 0x40) + ((function&1) \ |
24 | #define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id)\ | 24 | * 0x100)) : (0x1908 + (function * 0x40))) |
25 | (0x1510 + (port * 0x240) + (client_id * 0x20)) | 25 | #define CSTORM_FUNCTION_MODE_OFFSET \ |
26 | #define TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index)\ | 26 | (IS_E1H_OFFSET? 0x11e8 : 0xffffffff) |
27 | (0x138a + (port * 0x28) + (index * 0x4)) | 27 | #define CSTORM_HC_BTR_OFFSET(port) \ |
28 | #define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)\ | 28 | (IS_E1H_OFFSET? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0))) |
29 | (0x1370 + (port * 0x28)) | 29 | #define CSTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \ |
30 | #define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port)\ | 30 | (IS_E1H_OFFSET? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \ |
31 | (0x4b70 + (port * 0x8)) | 31 | (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \ |
32 | #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function)\ | 32 | (index * 0x4))) |
33 | (0x1418 + (function * 0x30)) | 33 | #define CSTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \ |
34 | #define TSTORM_HC_BTR_OFFSET(port)\ | 34 | (IS_E1H_OFFSET? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \ |
35 | (0x13c4 + (port * 0x18)) | 35 | (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \ |
36 | #define TSTORM_INDIRECTION_TABLE_OFFSET(port)\ | 36 | (index * 0x4))) |
37 | (0x22c8 + (port * 0x80)) | 37 | #define CSTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \ |
38 | #define TSTORM_INDIRECTION_TABLE_SIZE 0x80 | 38 | (IS_E1H_OFFSET? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \ |
39 | #define TSTORM_MAC_FILTER_CONFIG_OFFSET(port)\ | 39 | (0x1400 + (port * 0x280) + (cpu_id * 0x28))) |
40 | (0x1420 + (port * 0x30)) | 40 | #define CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \ |
41 | #define TSTORM_RCQ_PROD_OFFSET(port, client_id)\ | 41 | (IS_E1H_OFFSET? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \ |
42 | (0x1508 + (port * 0x240) + (client_id * 0x20)) | 42 | (0x1408 + (port * 0x280) + (cpu_id * 0x28))) |
43 | #define TSTORM_STATS_FLAGS_OFFSET(port) (0x4b90 + (port * 0x8)) | 43 | #define CSTORM_STATS_FLAGS_OFFSET(function) \ |
44 | #define USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index)\ | 44 | (IS_E1H_OFFSET? (0x1108 + (function * 0x8)) : (0x5108 + \ |
45 | (0x191a + (port * 0x28) + (index * 0x4)) | 45 | (function * 0x8))) |
46 | #define USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)\ | 46 | #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \ |
47 | (0x1900 + (port * 0x28)) | 47 | (IS_E1H_OFFSET? (0x31c0 + (function * 0x20)) : 0xffffffff) |
48 | #define USTORM_HC_BTR_OFFSET(port)\ | 48 | #define TSTORM_ASSERT_LIST_INDEX_OFFSET \ |
49 | (0x1954 + (port * 0xb8)) | 49 | (IS_E1H_OFFSET? 0xa000 : 0x1000) |
50 | #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port)\ | 50 | #define TSTORM_ASSERT_LIST_OFFSET(idx) \ |
51 | (0x5408 + (port * 0x8)) | 51 | (IS_E1H_OFFSET? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) |
52 | #define USTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index)\ | 52 | #define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \ |
53 | (0x141a + (port * 0x280) + (cpu_id * 0x28) + (index * 0x4)) | 53 | (IS_E1H_OFFSET? (0x3358 + (port * 0x3e8) + (client_id * 0x28)) : \ |
54 | #define USTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index)\ | 54 | (0x9c8 + (port * 0x2f8) + (client_id * 0x28))) |
55 | (0x1418 + (port * 0x280) + (cpu_id * 0x28) + (index * 0x4)) | 55 | #define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ |
56 | #define USTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id)\ | 56 | (IS_E1H_OFFSET? (0xb01a + ((function>>1) * 0x28) + ((function&1) \ |
57 | (0x1400 + (port * 0x280) + (cpu_id * 0x28)) | 57 | * 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \ |
58 | #define XSTORM_ASSERT_LIST_INDEX_OFFSET 0x1000 | 58 | 0x4))) |
59 | #define XSTORM_ASSERT_LIST_OFFSET(idx) (0x1020 + (idx * 0x10)) | 59 | #define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ |
60 | #define XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index)\ | 60 | (IS_E1H_OFFSET? (0xb000 + ((function>>1) * 0x28) + ((function&1) \ |
61 | (0x141a + (port * 0x28) + (index * 0x4)) | 61 | * 0xa0)) : (0x1400 + (function * 0x28))) |
62 | #define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)\ | 62 | #define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ |
63 | (0x1400 + (port * 0x28)) | 63 | (IS_E1H_OFFSET? (0xb008 + ((function>>1) * 0x28) + ((function&1) \ |
64 | #define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port)\ | 64 | * 0xa0)) : (0x1408 + (function * 0x28))) |
65 | (0x5408 + (port * 0x8)) | 65 | #define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ |
66 | #define XSTORM_HC_BTR_OFFSET(port)\ | 66 | (IS_E1H_OFFSET? (0x2b80 + (function * 0x8)) : (0x4b68 + \ |
67 | (0x1454 + (port * 0x18)) | 67 | (function * 0x8))) |
68 | #define XSTORM_SPQ_PAGE_BASE_OFFSET(port)\ | 68 | #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \ |
69 | (0x5328 + (port * 0x18)) | 69 | (IS_E1H_OFFSET? (0x3000 + (function * 0x38)) : (0x1500 + \ |
70 | #define XSTORM_SPQ_PROD_OFFSET(port)\ | 70 | (function * 0x38))) |
71 | (0x5330 + (port * 0x18)) | 71 | #define TSTORM_FUNCTION_MODE_OFFSET \ |
72 | #define XSTORM_STATS_FLAGS_OFFSET(port) (0x53f8 + (port * 0x8)) | 72 | (IS_E1H_OFFSET? 0x1ad0 : 0xffffffff) |
73 | #define TSTORM_HC_BTR_OFFSET(port) \ | ||
74 | (IS_E1H_OFFSET? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18))) | ||
75 | #define TSTORM_INDIRECTION_TABLE_OFFSET(function) \ | ||
76 | (IS_E1H_OFFSET? (0x12c8 + (function * 0x80)) : (0x22c8 + \ | ||
77 | (function * 0x80))) | ||
78 | #define TSTORM_INDIRECTION_TABLE_SIZE 0x80 | ||
79 | #define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \ | ||
80 | (IS_E1H_OFFSET? (0x3008 + (function * 0x38)) : (0x1508 + \ | ||
81 | (function * 0x38))) | ||
82 | #define TSTORM_RX_PRODS_OFFSET(port, client_id) \ | ||
83 | (IS_E1H_OFFSET? (0x3350 + (port * 0x3e8) + (client_id * 0x28)) : \ | ||
84 | (0x9c0 + (port * 0x2f8) + (client_id * 0x28))) | ||
85 | #define TSTORM_STATS_FLAGS_OFFSET(function) \ | ||
86 | (IS_E1H_OFFSET? (0x2c00 + (function * 0x8)) : (0x4b88 + \ | ||
87 | (function * 0x8))) | ||
88 | #define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET? 0x3b30 : 0x1c20) | ||
89 | #define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET? 0xa040 : 0x2c10) | ||
90 | #define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET? 0x2440 : 0x1200) | ||
91 | #define USTORM_ASSERT_LIST_INDEX_OFFSET \ | ||
92 | (IS_E1H_OFFSET? 0x8000 : 0x1000) | ||
93 | #define USTORM_ASSERT_LIST_OFFSET(idx) \ | ||
94 | (IS_E1H_OFFSET? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) | ||
95 | #define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \ | ||
96 | (IS_E1H_OFFSET? (0x3298 + (port * 0x258) + (clientId * 0x18)) : \ | ||
97 | (0x5450 + (port * 0x1c8) + (clientId * 0x18))) | ||
98 | #define USTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ | ||
99 | (IS_E1H_OFFSET? (0x951a + ((function>>1) * 0x28) + ((function&1) \ | ||
100 | * 0xa0) + (index * 0x4)) : (0x191a + (function * 0x28) + (index * \ | ||
101 | 0x4))) | ||
102 | #define USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ | ||
103 | (IS_E1H_OFFSET? (0x9500 + ((function>>1) * 0x28) + ((function&1) \ | ||
104 | * 0xa0)) : (0x1900 + (function * 0x28))) | ||
105 | #define USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ | ||
106 | (IS_E1H_OFFSET? (0x9508 + ((function>>1) * 0x28) + ((function&1) \ | ||
107 | * 0xa0)) : (0x1908 + (function * 0x28))) | ||
108 | #define USTORM_FUNCTION_MODE_OFFSET \ | ||
109 | (IS_E1H_OFFSET? 0x2448 : 0xffffffff) | ||
110 | #define USTORM_HC_BTR_OFFSET(port) \ | ||
111 | (IS_E1H_OFFSET? (0x9644 + (port * 0xd0)) : (0x1954 + (port * 0xb8))) | ||
112 | #define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \ | ||
113 | (IS_E1H_OFFSET? (0x3290 + (port * 0x258) + (clientId * 0x18)) : \ | ||
114 | (0x5448 + (port * 0x1c8) + (clientId * 0x18))) | ||
115 | #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \ | ||
116 | (IS_E1H_OFFSET? (0x2408 + (function * 0x8)) : (0x5408 + \ | ||
117 | (function * 0x8))) | ||
118 | #define USTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \ | ||
119 | (IS_E1H_OFFSET? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \ | ||
120 | (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \ | ||
121 | (index * 0x4))) | ||
122 | #define USTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \ | ||
123 | (IS_E1H_OFFSET? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \ | ||
124 | (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \ | ||
125 | (index * 0x4))) | ||
126 | #define USTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \ | ||
127 | (IS_E1H_OFFSET? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \ | ||
128 | (0x1400 + (port * 0x280) + (cpu_id * 0x28))) | ||
129 | #define USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \ | ||
130 | (IS_E1H_OFFSET? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \ | ||
131 | (0x1408 + (port * 0x280) + (cpu_id * 0x28))) | ||
132 | #define XSTORM_ASSERT_LIST_INDEX_OFFSET \ | ||
133 | (IS_E1H_OFFSET? 0x9000 : 0x1000) | ||
134 | #define XSTORM_ASSERT_LIST_OFFSET(idx) \ | ||
135 | (IS_E1H_OFFSET? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) | ||
136 | #define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \ | ||
137 | (IS_E1H_OFFSET? (0x24a8 + (port * 0x40)) : (0x3ba0 + (port * 0x40))) | ||
138 | #define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ | ||
139 | (IS_E1H_OFFSET? (0xa01a + ((function>>1) * 0x28) + ((function&1) \ | ||
140 | * 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \ | ||
141 | 0x4))) | ||
142 | #define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ | ||
143 | (IS_E1H_OFFSET? (0xa000 + ((function>>1) * 0x28) + ((function&1) \ | ||
144 | * 0xa0)) : (0x1400 + (function * 0x28))) | ||
145 | #define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ | ||
146 | (IS_E1H_OFFSET? (0xa008 + ((function>>1) * 0x28) + ((function&1) \ | ||
147 | * 0xa0)) : (0x1408 + (function * 0x28))) | ||
148 | #define XSTORM_E1HOV_OFFSET(function) \ | ||
149 | (IS_E1H_OFFSET? (0x2ab8 + (function * 0x2)) : 0xffffffff) | ||
150 | #define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ | ||
151 | (IS_E1H_OFFSET? (0x2418 + (function * 0x8)) : (0x3b70 + \ | ||
152 | (function * 0x8))) | ||
153 | #define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \ | ||
154 | (IS_E1H_OFFSET? (0x2568 + (function * 0x70)) : (0x3c60 + \ | ||
155 | (function * 0x70))) | ||
156 | #define XSTORM_FUNCTION_MODE_OFFSET \ | ||
157 | (IS_E1H_OFFSET? 0x2ac8 : 0xffffffff) | ||
158 | #define XSTORM_HC_BTR_OFFSET(port) \ | ||
159 | (IS_E1H_OFFSET? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18))) | ||
160 | #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \ | ||
161 | (IS_E1H_OFFSET? (0x2528 + (function * 0x70)) : (0x3c20 + \ | ||
162 | (function * 0x70))) | ||
163 | #define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \ | ||
164 | (IS_E1H_OFFSET? (0x2000 + (function * 0x10)) : (0x3328 + \ | ||
165 | (function * 0x10))) | ||
166 | #define XSTORM_SPQ_PROD_OFFSET(function) \ | ||
167 | (IS_E1H_OFFSET? (0x2008 + (function * 0x10)) : (0x3330 + \ | ||
168 | (function * 0x10))) | ||
169 | #define XSTORM_STATS_FLAGS_OFFSET(function) \ | ||
170 | (IS_E1H_OFFSET? (0x23d8 + (function * 0x8)) : (0x3b60 + \ | ||
171 | (function * 0x8))) | ||
73 | #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 | 172 | #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 |
74 | 173 | ||
75 | /** | 174 | /** |
76 | * This file defines HSI constatnts for the ETH flow | 175 | * This file defines HSI constatnts for the ETH flow |
77 | */ | 176 | */ |
78 | 177 | #ifdef _EVEREST_MICROCODE | |
79 | /* hash types */ | 178 | #include "microcode_constants.h" |
80 | #define DEFAULT_HASH_TYPE 0 | 179 | #include "eth_rx_bd.h" |
81 | #define IPV4_HASH_TYPE 1 | 180 | #include "eth_tx_bd.h" |
82 | #define TCP_IPV4_HASH_TYPE 2 | 181 | #include "eth_rx_cqe.h" |
83 | #define IPV6_HASH_TYPE 3 | 182 | #include "eth_rx_sge.h" |
84 | #define TCP_IPV6_HASH_TYPE 4 | 183 | #include "eth_rx_cqe_next_page.h" |
184 | #endif | ||
185 | |||
186 | /* RSS hash types */ | ||
187 | #define DEFAULT_HASH_TYPE 0 | ||
188 | #define IPV4_HASH_TYPE 1 | ||
189 | #define TCP_IPV4_HASH_TYPE 2 | ||
190 | #define IPV6_HASH_TYPE 3 | ||
191 | #define TCP_IPV6_HASH_TYPE 4 | ||
192 | |||
193 | /* Ethernet Ring parmaters */ | ||
194 | #define X_ETH_LOCAL_RING_SIZE 13 | ||
195 | #define FIRST_BD_IN_PKT 0 | ||
196 | #define PARSE_BD_INDEX 1 | ||
197 | #define NUM_OF_ETH_BDS_IN_PAGE \ | ||
198 | ((PAGE_SIZE) / (STRUCT_SIZE(eth_tx_bd)/8)) | ||
199 | |||
200 | |||
201 | /* Rx ring params */ | ||
202 | #define U_ETH_LOCAL_BD_RING_SIZE (16) | ||
203 | #define U_ETH_LOCAL_SGE_RING_SIZE (12) | ||
204 | #define U_ETH_SGL_SIZE (8) | ||
205 | |||
206 | |||
207 | #define U_ETH_BDS_PER_PAGE_MASK \ | ||
208 | ((PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))-1) | ||
209 | #define U_ETH_CQE_PER_PAGE_MASK \ | ||
210 | ((PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))-1) | ||
211 | #define U_ETH_SGES_PER_PAGE_MASK \ | ||
212 | ((PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))-1) | ||
213 | |||
214 | #define U_ETH_SGES_PER_PAGE_INVERSE_MASK \ | ||
215 | (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1)) | ||
216 | |||
217 | |||
218 | #define TU_ETH_CQES_PER_PAGE \ | ||
219 | (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe_next_page)/8)) | ||
220 | #define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8)) | ||
221 | #define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8)) | ||
222 | |||
223 | #define U_ETH_UNDEFINED_Q 0xFF | ||
85 | 224 | ||
86 | /* values of command IDs in the ramrod message */ | 225 | /* values of command IDs in the ramrod message */ |
87 | #define RAMROD_CMD_ID_ETH_PORT_SETUP (80) | 226 | #define RAMROD_CMD_ID_ETH_PORT_SETUP (80) |
88 | #define RAMROD_CMD_ID_ETH_CLIENT_SETUP (85) | 227 | #define RAMROD_CMD_ID_ETH_CLIENT_SETUP (85) |
89 | #define RAMROD_CMD_ID_ETH_STAT_QUERY (90) | 228 | #define RAMROD_CMD_ID_ETH_STAT_QUERY (90) |
90 | #define RAMROD_CMD_ID_ETH_UPDATE (100) | 229 | #define RAMROD_CMD_ID_ETH_UPDATE (100) |
91 | #define RAMROD_CMD_ID_ETH_HALT (105) | 230 | #define RAMROD_CMD_ID_ETH_HALT (105) |
92 | #define RAMROD_CMD_ID_ETH_SET_MAC (110) | 231 | #define RAMROD_CMD_ID_ETH_SET_MAC (110) |
93 | #define RAMROD_CMD_ID_ETH_CFC_DEL (115) | 232 | #define RAMROD_CMD_ID_ETH_CFC_DEL (115) |
94 | #define RAMROD_CMD_ID_ETH_PORT_DEL (120) | 233 | #define RAMROD_CMD_ID_ETH_PORT_DEL (120) |
95 | #define RAMROD_CMD_ID_ETH_FORWARD_SETUP (125) | 234 | #define RAMROD_CMD_ID_ETH_FORWARD_SETUP (125) |
96 | 235 | ||
97 | 236 | ||
98 | /* command values for set mac command */ | 237 | /* command values for set mac command */ |
99 | #define T_ETH_MAC_COMMAND_SET 0 | 238 | #define T_ETH_MAC_COMMAND_SET 0 |
100 | #define T_ETH_MAC_COMMAND_INVALIDATE 1 | 239 | #define T_ETH_MAC_COMMAND_INVALIDATE 1 |
240 | |||
241 | #define T_ETH_INDIRECTION_TABLE_SIZE 128 | ||
101 | 242 | ||
102 | #define T_ETH_INDIRECTION_TABLE_SIZE 128 | 243 | /*The CRC32 seed, that is used for the hash(reduction) multicast address */ |
244 | #define T_ETH_CRC32_HASH_SEED 0x00000000 | ||
103 | 245 | ||
104 | /* Maximal L2 clients supported */ | 246 | /* Maximal L2 clients supported */ |
105 | #define ETH_MAX_RX_CLIENTS (18) | 247 | #define ETH_MAX_RX_CLIENTS_E1 19 |
248 | #define ETH_MAX_RX_CLIENTS_E1H 25 | ||
249 | |||
250 | /* Maximal aggregation queues supported */ | ||
251 | #define ETH_MAX_AGGREGATION_QUEUES_E1 (32) | ||
252 | #define ETH_MAX_AGGREGATION_QUEUES_E1H (64) | ||
253 | |||
106 | 254 | ||
107 | /** | 255 | /** |
108 | * This file defines HSI constatnts common to all microcode flows | 256 | * This file defines HSI constatnts common to all microcode flows |
109 | */ | 257 | */ |
110 | 258 | ||
111 | /* Connection types */ | 259 | /* Connection types */ |
112 | #define ETH_CONNECTION_TYPE 0 | 260 | #define ETH_CONNECTION_TYPE 0 |
261 | #define TOE_CONNECTION_TYPE 1 | ||
262 | #define RDMA_CONNECTION_TYPE 2 | ||
263 | #define ISCSI_CONNECTION_TYPE 3 | ||
264 | #define FCOE_CONNECTION_TYPE 4 | ||
265 | #define RESERVED_CONNECTION_TYPE_0 5 | ||
266 | #define RESERVED_CONNECTION_TYPE_1 6 | ||
267 | #define RESERVED_CONNECTION_TYPE_2 7 | ||
268 | |||
113 | 269 | ||
114 | #define PROTOCOL_STATE_BIT_OFFSET 6 | 270 | #define PROTOCOL_STATE_BIT_OFFSET 6 |
115 | 271 | ||
116 | #define ETH_STATE (ETH_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) | 272 | #define ETH_STATE (ETH_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) |
273 | #define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) | ||
274 | #define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) | ||
275 | #define ISCSI_STATE \ | ||
276 | (ISCSI_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) | ||
277 | #define FCOE_STATE (FCOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) | ||
117 | 278 | ||
118 | /* microcode fixed page page size 4K (chains and ring segments) */ | 279 | /* microcode fixed page page size 4K (chains and ring segments) */ |
119 | #define MC_PAGE_SIZE (4096) | 280 | #define MC_PAGE_SIZE (4096) |
120 | 281 | ||
121 | /* Host coalescing constants */ | ||
122 | 282 | ||
123 | /* IGU constants */ | 283 | /* Host coalescing constants */ |
124 | #define IGU_PORT_BASE 0x0400 | ||
125 | |||
126 | #define IGU_ADDR_MSIX 0x0000 | ||
127 | #define IGU_ADDR_INT_ACK 0x0200 | ||
128 | #define IGU_ADDR_PROD_UPD 0x0201 | ||
129 | #define IGU_ADDR_ATTN_BITS_UPD 0x0202 | ||
130 | #define IGU_ADDR_ATTN_BITS_SET 0x0203 | ||
131 | #define IGU_ADDR_ATTN_BITS_CLR 0x0204 | ||
132 | #define IGU_ADDR_COALESCE_NOW 0x0205 | ||
133 | #define IGU_ADDR_SIMD_MASK 0x0206 | ||
134 | #define IGU_ADDR_SIMD_NOMASK 0x0207 | ||
135 | #define IGU_ADDR_MSI_CTL 0x0210 | ||
136 | #define IGU_ADDR_MSI_ADDR_LO 0x0211 | ||
137 | #define IGU_ADDR_MSI_ADDR_HI 0x0212 | ||
138 | #define IGU_ADDR_MSI_DATA 0x0213 | ||
139 | |||
140 | #define IGU_INT_ENABLE 0 | ||
141 | #define IGU_INT_DISABLE 1 | ||
142 | #define IGU_INT_NOP 2 | ||
143 | #define IGU_INT_NOP2 3 | ||
144 | 284 | ||
145 | /* index numbers */ | 285 | /* index numbers */ |
146 | #define HC_USTORM_DEF_SB_NUM_INDICES 4 | 286 | #define HC_USTORM_DEF_SB_NUM_INDICES 4 |
147 | #define HC_CSTORM_DEF_SB_NUM_INDICES 8 | 287 | #define HC_CSTORM_DEF_SB_NUM_INDICES 8 |
148 | #define HC_XSTORM_DEF_SB_NUM_INDICES 4 | 288 | #define HC_XSTORM_DEF_SB_NUM_INDICES 4 |
149 | #define HC_TSTORM_DEF_SB_NUM_INDICES 4 | 289 | #define HC_TSTORM_DEF_SB_NUM_INDICES 4 |
150 | #define HC_USTORM_SB_NUM_INDICES 4 | 290 | #define HC_USTORM_SB_NUM_INDICES 4 |
151 | #define HC_CSTORM_SB_NUM_INDICES 4 | 291 | #define HC_CSTORM_SB_NUM_INDICES 4 |
152 | 292 | ||
153 | /* index values - which counterto update */ | 293 | /* index values - which counterto update */ |
154 | 294 | ||
155 | #define HC_INDEX_U_ETH_RX_CQ_CONS 1 | 295 | #define HC_INDEX_U_TOE_RX_CQ_CONS 0 |
296 | #define HC_INDEX_U_ETH_RX_CQ_CONS 1 | ||
297 | #define HC_INDEX_U_ETH_RX_BD_CONS 2 | ||
298 | #define HC_INDEX_U_FCOE_EQ_CONS 3 | ||
299 | |||
300 | #define HC_INDEX_C_TOE_TX_CQ_CONS 0 | ||
301 | #define HC_INDEX_C_ETH_TX_CQ_CONS 1 | ||
302 | #define HC_INDEX_C_ISCSI_EQ_CONS 2 | ||
303 | |||
304 | #define HC_INDEX_DEF_X_SPQ_CONS 0 | ||
156 | 305 | ||
157 | #define HC_INDEX_C_ETH_TX_CQ_CONS 1 | 306 | #define HC_INDEX_DEF_C_RDMA_EQ_CONS 0 |
307 | #define HC_INDEX_DEF_C_RDMA_NAL_PROD 1 | ||
308 | #define HC_INDEX_DEF_C_ETH_FW_TX_CQ_CONS 2 | ||
309 | #define HC_INDEX_DEF_C_ETH_SLOW_PATH 3 | ||
310 | #define HC_INDEX_DEF_C_ETH_RDMA_CQ_CONS 4 | ||
311 | #define HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS 5 | ||
158 | 312 | ||
159 | #define HC_INDEX_DEF_X_SPQ_CONS 0 | 313 | #define HC_INDEX_DEF_U_ETH_RDMA_RX_CQ_CONS 0 |
314 | #define HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS 1 | ||
315 | #define HC_INDEX_DEF_U_ETH_RDMA_RX_BD_CONS 2 | ||
316 | #define HC_INDEX_DEF_U_ETH_ISCSI_RX_BD_CONS 3 | ||
160 | 317 | ||
161 | #define HC_INDEX_DEF_C_ETH_FW_TX_CQ_CONS 2 | ||
162 | #define HC_INDEX_DEF_C_ETH_SLOW_PATH 3 | ||
163 | 318 | ||
164 | /* used by the driver to get the SB offset */ | 319 | /* used by the driver to get the SB offset */ |
165 | #define USTORM_ID 0 | 320 | #define USTORM_ID 0 |
166 | #define CSTORM_ID 1 | 321 | #define CSTORM_ID 1 |
167 | #define XSTORM_ID 2 | 322 | #define XSTORM_ID 2 |
168 | #define TSTORM_ID 3 | 323 | #define TSTORM_ID 3 |
169 | #define ATTENTION_ID 4 | 324 | #define ATTENTION_ID 4 |
170 | 325 | ||
171 | /* max number of slow path commands per port */ | 326 | /* max number of slow path commands per port */ |
172 | #define MAX_RAMRODS_PER_PORT (8) | 327 | #define MAX_RAMRODS_PER_PORT (8) |
173 | 328 | ||
174 | /* values for RX ETH CQE type field */ | 329 | /* values for RX ETH CQE type field */ |
175 | #define RX_ETH_CQE_TYPE_ETH_FASTPATH (0) | 330 | #define RX_ETH_CQE_TYPE_ETH_FASTPATH (0) |
176 | #define RX_ETH_CQE_TYPE_ETH_RAMROD (1) | 331 | #define RX_ETH_CQE_TYPE_ETH_RAMROD (1) |
177 | 332 | ||
178 | /* MAC address list size */ | 333 | |
179 | #define T_MAC_ADDRESS_LIST_SIZE (96) | 334 | /**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ |
180 | 335 | #define EMULATION_FREQUENCY_FACTOR (1600) | |
336 | #define FPGA_FREQUENCY_FACTOR (100) | ||
337 | |||
338 | #define TIMERS_TICK_SIZE_CHIP (1e-3) | ||
339 | #define TIMERS_TICK_SIZE_EMUL \ | ||
340 | ((TIMERS_TICK_SIZE_CHIP)/((EMULATION_FREQUENCY_FACTOR))) | ||
341 | #define TIMERS_TICK_SIZE_FPGA \ | ||
342 | ((TIMERS_TICK_SIZE_CHIP)/((FPGA_FREQUENCY_FACTOR))) | ||
343 | |||
344 | #define TSEMI_CLK1_RESUL_CHIP (1e-3) | ||
345 | #define TSEMI_CLK1_RESUL_EMUL \ | ||
346 | ((TSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR)) | ||
347 | #define TSEMI_CLK1_RESUL_FPGA \ | ||
348 | ((TSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR)) | ||
349 | |||
350 | #define USEMI_CLK1_RESUL_CHIP \ | ||
351 | (TIMERS_TICK_SIZE_CHIP) | ||
352 | #define USEMI_CLK1_RESUL_EMUL \ | ||
353 | (TIMERS_TICK_SIZE_EMUL) | ||
354 | #define USEMI_CLK1_RESUL_FPGA \ | ||
355 | (TIMERS_TICK_SIZE_FPGA) | ||
356 | |||
357 | #define XSEMI_CLK1_RESUL_CHIP (1e-3) | ||
358 | #define XSEMI_CLK1_RESUL_EMUL \ | ||
359 | ((XSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR)) | ||
360 | #define XSEMI_CLK1_RESUL_FPGA \ | ||
361 | ((XSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR)) | ||
362 | |||
363 | #define XSEMI_CLK2_RESUL_CHIP (1e-6) | ||
364 | #define XSEMI_CLK2_RESUL_EMUL \ | ||
365 | ((XSEMI_CLK2_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR)) | ||
366 | #define XSEMI_CLK2_RESUL_FPGA \ | ||
367 | ((XSEMI_CLK2_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR)) | ||
368 | |||
369 | #define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6)) | ||
370 | #define SDM_TIMER_TICK_RESUL_EMUL \ | ||
371 | ((SDM_TIMER_TICK_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR)) | ||
372 | #define SDM_TIMER_TICK_RESUL_FPGA \ | ||
373 | ((SDM_TIMER_TICK_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR)) | ||
374 | |||
375 | |||
376 | /**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ | ||
181 | #define XSTORM_IP_ID_ROLL_HALF 0x8000 | 377 | #define XSTORM_IP_ID_ROLL_HALF 0x8000 |
182 | #define XSTORM_IP_ID_ROLL_ALL 0 | 378 | #define XSTORM_IP_ID_ROLL_ALL 0 |
183 | 379 | ||
184 | #define FW_LOG_LIST_SIZE (50) | 380 | #define FW_LOG_LIST_SIZE (50) |
381 | |||
382 | #define NUM_OF_PROTOCOLS 4 | ||
383 | #define MAX_COS_NUMBER 16 | ||
384 | #define MAX_T_STAT_COUNTER_ID 18 | ||
385 | #define MAX_X_STAT_COUNTER_ID 18 | ||
185 | 386 | ||
186 | #define NUM_OF_PROTOCOLS 4 | 387 | #define UNKNOWN_ADDRESS 0 |
187 | #define MAX_COS_NUMBER 16 | 388 | #define UNICAST_ADDRESS 1 |
188 | #define MAX_T_STAT_COUNTER_ID 18 | 389 | #define MULTICAST_ADDRESS 2 |
390 | #define BROADCAST_ADDRESS 3 | ||
189 | 391 | ||
190 | #define T_FAIR 1 | 392 | #define SINGLE_FUNCTION 0 |
191 | #define FAIR_MEM 2 | 393 | #define MULTI_FUNCTION 1 |
192 | #define RS_PERIODIC_TIMEOUT_IN_SDM_TICS 25 | ||
193 | 394 | ||
194 | #define UNKNOWN_ADDRESS 0 | 395 | #define IP_V4 0 |
195 | #define UNICAST_ADDRESS 1 | 396 | #define IP_V6 1 |
196 | #define MULTICAST_ADDRESS 2 | ||
197 | #define BROADCAST_ADDRESS 3 | ||
198 | 397 | ||
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h index 96208ace1466..e515d68ea20f 100644 --- a/drivers/net/bnx2x_hsi.h +++ b/drivers/net/bnx2x_hsi.h | |||
@@ -132,6 +132,12 @@ struct shared_hw_cfg { /* NVRAM Offset */ | |||
132 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G 0x00000008 | 132 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G 0x00000008 |
133 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G 0x00000009 | 133 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G 0x00000009 |
134 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G 0x0000000a | 134 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G 0x0000000a |
135 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1023G 0x0000000b | ||
136 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1033G 0x0000000c | ||
137 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957711T1101 0x0000000d | ||
138 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957711ET1201 0x0000000e | ||
139 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957711A1133G 0x0000000f | ||
140 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957711EA1233G 0x00000010 | ||
135 | 141 | ||
136 | #define SHARED_HW_CFG_BOARD_VER_MASK 0xffff0000 | 142 | #define SHARED_HW_CFG_BOARD_VER_MASK 0xffff0000 |
137 | #define SHARED_HW_CFG_BOARD_VER_SHIFT 16 | 143 | #define SHARED_HW_CFG_BOARD_VER_SHIFT 16 |
@@ -313,6 +319,7 @@ struct shared_feat_cfg { /* NVRAM Offset */ | |||
313 | 319 | ||
314 | u32 config; /* 0x450 */ | 320 | u32 config; /* 0x450 */ |
315 | #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001 | 321 | #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001 |
322 | #define SHARED_FEATURE_MF_MODE_DISABLED 0x00000100 | ||
316 | 323 | ||
317 | }; | 324 | }; |
318 | 325 | ||
@@ -502,20 +509,20 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */ | |||
502 | }; | 509 | }; |
503 | 510 | ||
504 | 511 | ||
505 | /***************************************************************************** | 512 | /**************************************************************************** |
506 | * Device Information * | 513 | * Device Information * |
507 | *****************************************************************************/ | 514 | ****************************************************************************/ |
508 | struct dev_info { /* size */ | 515 | struct dev_info { /* size */ |
509 | 516 | ||
510 | u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */ | 517 | u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */ |
511 | 518 | ||
512 | struct shared_hw_cfg shared_hw_config; /* 40 */ | 519 | struct shared_hw_cfg shared_hw_config; /* 40 */ |
513 | 520 | ||
514 | struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */ | 521 | struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */ |
515 | 522 | ||
516 | struct shared_feat_cfg shared_feature_config; /* 4 */ | 523 | struct shared_feat_cfg shared_feature_config; /* 4 */ |
517 | 524 | ||
518 | struct port_feat_cfg port_feature_config[PORT_MAX]; /* 116*2=232 */ | 525 | struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */ |
519 | 526 | ||
520 | }; | 527 | }; |
521 | 528 | ||
@@ -632,7 +639,9 @@ struct drv_port_mb { | |||
632 | #define LINK_STATUS_LINK_PARTNER_15GXFD_CAPABLE 0x08000000 | 639 | #define LINK_STATUS_LINK_PARTNER_15GXFD_CAPABLE 0x08000000 |
633 | #define LINK_STATUS_LINK_PARTNER_16GXFD_CAPABLE 0x10000000 | 640 | #define LINK_STATUS_LINK_PARTNER_16GXFD_CAPABLE 0x10000000 |
634 | 641 | ||
635 | u32 reserved[3]; | 642 | u32 port_stx; |
643 | |||
644 | u32 reserved[2]; | ||
636 | 645 | ||
637 | }; | 646 | }; |
638 | 647 | ||
@@ -655,6 +664,11 @@ struct drv_func_mb { | |||
655 | #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 | 664 | #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 |
656 | #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 | 665 | #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 |
657 | 666 | ||
667 | #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 | ||
668 | #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 | ||
669 | #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 | ||
670 | #define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 | ||
671 | |||
658 | #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff | 672 | #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff |
659 | 673 | ||
660 | u32 drv_mb_param; | 674 | u32 drv_mb_param; |
@@ -684,6 +698,11 @@ struct drv_func_mb { | |||
684 | #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000 | 698 | #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000 |
685 | #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000 | 699 | #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000 |
686 | 700 | ||
701 | #define FW_MSG_CODE_LIC_CHALLENGE 0xff010000 | ||
702 | #define FW_MSG_CODE_LIC_RESPONSE 0xff020000 | ||
703 | #define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000 | ||
704 | #define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 | ||
705 | |||
687 | #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff | 706 | #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff |
688 | 707 | ||
689 | u32 fw_mb_param; | 708 | u32 fw_mb_param; |
@@ -709,7 +728,13 @@ struct drv_func_mb { | |||
709 | u32 iscsi_boot_signature; | 728 | u32 iscsi_boot_signature; |
710 | u32 iscsi_boot_block_offset; | 729 | u32 iscsi_boot_block_offset; |
711 | 730 | ||
712 | u32 reserved[3]; | 731 | u32 drv_status; |
732 | #define DRV_STATUS_PMF 0x00000001 | ||
733 | |||
734 | u32 virt_mac_upper; | ||
735 | #define VIRT_MAC_SIGN_MASK 0xffff0000 | ||
736 | #define VIRT_MAC_SIGNATURE 0x564d0000 | ||
737 | u32 virt_mac_lower; | ||
713 | 738 | ||
714 | }; | 739 | }; |
715 | 740 | ||
@@ -726,6 +751,92 @@ struct mgmtfw_state { | |||
726 | 751 | ||
727 | 752 | ||
728 | /**************************************************************************** | 753 | /**************************************************************************** |
754 | * Multi-Function configuration * | ||
755 | ****************************************************************************/ | ||
756 | struct shared_mf_cfg { | ||
757 | |||
758 | u32 clp_mb; | ||
759 | #define SHARED_MF_CLP_SET_DEFAULT 0x00000000 | ||
760 | /* set by CLP */ | ||
761 | #define SHARED_MF_CLP_EXIT 0x00000001 | ||
762 | /* set by MCP */ | ||
763 | #define SHARED_MF_CLP_EXIT_DONE 0x00010000 | ||
764 | |||
765 | }; | ||
766 | |||
767 | struct port_mf_cfg { | ||
768 | |||
769 | u32 dynamic_cfg; /* device control channel */ | ||
770 | #define PORT_MF_CFG_OUTER_VLAN_TAG_MASK 0x0000ffff | ||
771 | #define PORT_MF_CFG_OUTER_VLAN_TAG_SHIFT 0 | ||
772 | #define PORT_MF_CFG_DYNAMIC_CFG_ENABLED 0x00010000 | ||
773 | #define PORT_MF_CFG_DYNAMIC_CFG_DEFAULT 0x00000000 | ||
774 | |||
775 | u32 reserved[3]; | ||
776 | |||
777 | }; | ||
778 | |||
779 | struct func_mf_cfg { | ||
780 | |||
781 | u32 config; | ||
782 | /* E/R/I/D */ | ||
783 | /* function 0 of each port cannot be hidden */ | ||
784 | #define FUNC_MF_CFG_FUNC_HIDE 0x00000001 | ||
785 | |||
786 | #define FUNC_MF_CFG_PROTOCOL_MASK 0x00000007 | ||
787 | #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002 | ||
788 | #define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004 | ||
789 | #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006 | ||
790 | #define FUNC_MF_CFG_PROTOCOL_DEFAULT\ | ||
791 | FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA | ||
792 | |||
793 | #define FUNC_MF_CFG_FUNC_DISABLED 0x00000008 | ||
794 | |||
795 | /* PRI */ | ||
796 | /* 0 - low priority, 3 - high priority */ | ||
797 | #define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300 | ||
798 | #define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8 | ||
799 | #define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000 | ||
800 | |||
801 | /* MINBW, MAXBW */ | ||
802 | /* value range - 0..100, increments in 100Mbps */ | ||
803 | #define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000 | ||
804 | #define FUNC_MF_CFG_MIN_BW_SHIFT 16 | ||
805 | #define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 | ||
806 | #define FUNC_MF_CFG_MAX_BW_MASK 0xff000000 | ||
807 | #define FUNC_MF_CFG_MAX_BW_SHIFT 24 | ||
808 | #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000 | ||
809 | |||
810 | u32 mac_upper; /* MAC */ | ||
811 | #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff | ||
812 | #define FUNC_MF_CFG_UPPERMAC_SHIFT 0 | ||
813 | #define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK | ||
814 | u32 mac_lower; | ||
815 | #define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff | ||
816 | |||
817 | u32 e1hov_tag; /* VNI */ | ||
818 | #define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff | ||
819 | #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 | ||
820 | #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK | ||
821 | |||
822 | u32 reserved[2]; | ||
823 | |||
824 | }; | ||
825 | |||
826 | struct mf_cfg { | ||
827 | |||
828 | struct shared_mf_cfg shared_mf_config; | ||
829 | struct port_mf_cfg port_mf_config[PORT_MAX]; | ||
830 | #if defined(b710) | ||
831 | struct func_mf_cfg func_mf_config[E1_FUNC_MAX]; | ||
832 | #else | ||
833 | struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; | ||
834 | #endif | ||
835 | |||
836 | }; | ||
837 | |||
838 | |||
839 | /**************************************************************************** | ||
729 | * Shared Memory Region * | 840 | * Shared Memory Region * |
730 | ****************************************************************************/ | 841 | ****************************************************************************/ |
731 | struct shmem_region { /* SharedMem Offset (size) */ | 842 | struct shmem_region { /* SharedMem Offset (size) */ |
@@ -760,18 +871,18 @@ struct shmem_region { /* SharedMem Offset (size) */ | |||
760 | struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */ | 871 | struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */ |
761 | 872 | ||
762 | struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */ | 873 | struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */ |
763 | #if defined(b710) | ||
764 | struct drv_func_mb func_mb[E1_FUNC_MAX]; /* 0x684 (44*2=0x58) */ | ||
765 | #else | ||
766 | struct drv_func_mb func_mb[E1H_FUNC_MAX]; | 874 | struct drv_func_mb func_mb[E1H_FUNC_MAX]; |
767 | #endif | 875 | |
876 | struct mf_cfg mf_cfg; | ||
768 | 877 | ||
769 | }; /* 0x6dc */ | 878 | }; /* 0x6dc */ |
770 | 879 | ||
771 | 880 | ||
881 | |||
882 | |||
772 | #define BCM_5710_FW_MAJOR_VERSION 4 | 883 | #define BCM_5710_FW_MAJOR_VERSION 4 |
773 | #define BCM_5710_FW_MINOR_VERSION 0 | 884 | #define BCM_5710_FW_MINOR_VERSION 5 |
774 | #define BCM_5710_FW_REVISION_VERSION 14 | 885 | #define BCM_5710_FW_REVISION_VERSION 1 |
775 | #define BCM_5710_FW_COMPILE_FLAGS 1 | 886 | #define BCM_5710_FW_COMPILE_FLAGS 1 |
776 | 887 | ||
777 | 888 | ||
@@ -810,7 +921,7 @@ struct doorbell_hdr { | |||
810 | }; | 921 | }; |
811 | 922 | ||
812 | /* | 923 | /* |
813 | * doorbell message send to the chip | 924 | * doorbell message sent to the chip |
814 | */ | 925 | */ |
815 | struct doorbell { | 926 | struct doorbell { |
816 | #if defined(__BIG_ENDIAN) | 927 | #if defined(__BIG_ENDIAN) |
@@ -866,8 +977,10 @@ struct parsing_flags { | |||
866 | u16 flags; | 977 | u16 flags; |
867 | #define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE (0x1<<0) | 978 | #define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE (0x1<<0) |
868 | #define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE_SHIFT 0 | 979 | #define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE_SHIFT 0 |
869 | #define PARSING_FLAGS_NUMBER_OF_NESTED_VLANS (0x3<<1) | 980 | #define PARSING_FLAGS_VLAN (0x1<<1) |
870 | #define PARSING_FLAGS_NUMBER_OF_NESTED_VLANS_SHIFT 1 | 981 | #define PARSING_FLAGS_VLAN_SHIFT 1 |
982 | #define PARSING_FLAGS_EXTRA_VLAN (0x1<<2) | ||
983 | #define PARSING_FLAGS_EXTRA_VLAN_SHIFT 2 | ||
871 | #define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL (0x3<<3) | 984 | #define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL (0x3<<3) |
872 | #define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT 3 | 985 | #define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT 3 |
873 | #define PARSING_FLAGS_IP_OPTIONS (0x1<<5) | 986 | #define PARSING_FLAGS_IP_OPTIONS (0x1<<5) |
@@ -891,6 +1004,12 @@ struct parsing_flags { | |||
891 | }; | 1004 | }; |
892 | 1005 | ||
893 | 1006 | ||
1007 | struct regpair { | ||
1008 | u32 lo; | ||
1009 | u32 hi; | ||
1010 | }; | ||
1011 | |||
1012 | |||
894 | /* | 1013 | /* |
895 | * dmae command structure | 1014 | * dmae command structure |
896 | */ | 1015 | */ |
@@ -971,72 +1090,107 @@ struct double_regpair { | |||
971 | 1090 | ||
972 | 1091 | ||
973 | /* | 1092 | /* |
974 | * The eth Rx Buffer Descriptor | 1093 | * The eth storm context of Ustorm (configuration part) |
975 | */ | 1094 | */ |
976 | struct eth_rx_bd { | 1095 | struct ustorm_eth_st_context_config { |
977 | u32 addr_lo; | ||
978 | u32 addr_hi; | ||
979 | }; | ||
980 | |||
981 | /* | ||
982 | * The eth storm context of Ustorm | ||
983 | */ | ||
984 | struct ustorm_eth_st_context { | ||
985 | #if defined(__BIG_ENDIAN) | 1096 | #if defined(__BIG_ENDIAN) |
986 | u8 sb_index_number; | 1097 | u8 flags; |
1098 | #define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT (0x1<<0) | ||
1099 | #define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT_SHIFT 0 | ||
1100 | #define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC (0x1<<1) | ||
1101 | #define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1 | ||
1102 | #define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2) | ||
1103 | #define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2 | ||
1104 | #define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING (0x1<<3) | ||
1105 | #define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING_SHIFT 3 | ||
1106 | #define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4) | ||
1107 | #define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4 | ||
987 | u8 status_block_id; | 1108 | u8 status_block_id; |
988 | u8 __local_rx_bd_cons; | 1109 | u8 clientId; |
989 | u8 __local_rx_bd_prod; | 1110 | u8 sb_index_numbers; |
1111 | #define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER (0xF<<0) | ||
1112 | #define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT 0 | ||
1113 | #define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER (0xF<<4) | ||
1114 | #define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT 4 | ||
990 | #elif defined(__LITTLE_ENDIAN) | 1115 | #elif defined(__LITTLE_ENDIAN) |
991 | u8 __local_rx_bd_prod; | 1116 | u8 sb_index_numbers; |
992 | u8 __local_rx_bd_cons; | 1117 | #define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER (0xF<<0) |
1118 | #define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT 0 | ||
1119 | #define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER (0xF<<4) | ||
1120 | #define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT 4 | ||
1121 | u8 clientId; | ||
993 | u8 status_block_id; | 1122 | u8 status_block_id; |
994 | u8 sb_index_number; | 1123 | u8 flags; |
1124 | #define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT (0x1<<0) | ||
1125 | #define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT_SHIFT 0 | ||
1126 | #define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC (0x1<<1) | ||
1127 | #define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1 | ||
1128 | #define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2) | ||
1129 | #define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2 | ||
1130 | #define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING (0x1<<3) | ||
1131 | #define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING_SHIFT 3 | ||
1132 | #define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4) | ||
1133 | #define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4 | ||
995 | #endif | 1134 | #endif |
996 | #if defined(__BIG_ENDIAN) | 1135 | #if defined(__BIG_ENDIAN) |
997 | u16 rcq_cons; | 1136 | u16 bd_buff_size; |
998 | u16 rx_bd_cons; | 1137 | u16 mc_alignment_size; |
999 | #elif defined(__LITTLE_ENDIAN) | 1138 | #elif defined(__LITTLE_ENDIAN) |
1000 | u16 rx_bd_cons; | 1139 | u16 mc_alignment_size; |
1001 | u16 rcq_cons; | 1140 | u16 bd_buff_size; |
1002 | #endif | 1141 | #endif |
1003 | u32 rx_bd_page_base_lo; | ||
1004 | u32 rx_bd_page_base_hi; | ||
1005 | u32 rcq_base_address_lo; | ||
1006 | u32 rcq_base_address_hi; | ||
1007 | #if defined(__BIG_ENDIAN) | 1142 | #if defined(__BIG_ENDIAN) |
1008 | u16 __num_of_returned_cqes; | 1143 | u8 __local_sge_prod; |
1009 | u8 num_rss; | 1144 | u8 __local_bd_prod; |
1010 | u8 flags; | 1145 | u16 sge_buff_size; |
1011 | #define USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT (0x1<<0) | ||
1012 | #define USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT_SHIFT 0 | ||
1013 | #define USTORM_ETH_ST_CONTEXT_ENABLE_DYNAMIC_HC (0x1<<1) | ||
1014 | #define USTORM_ETH_ST_CONTEXT_ENABLE_DYNAMIC_HC_SHIFT 1 | ||
1015 | #define USTORM_ETH_ST_CONTEXT_ENABLE_TPA (0x1<<2) | ||
1016 | #define USTORM_ETH_ST_CONTEXT_ENABLE_TPA_SHIFT 2 | ||
1017 | #define __USTORM_ETH_ST_CONTEXT_RESERVED0 (0x1F<<3) | ||
1018 | #define __USTORM_ETH_ST_CONTEXT_RESERVED0_SHIFT 3 | ||
1019 | #elif defined(__LITTLE_ENDIAN) | 1146 | #elif defined(__LITTLE_ENDIAN) |
1020 | u8 flags; | 1147 | u16 sge_buff_size; |
1021 | #define USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT (0x1<<0) | 1148 | u8 __local_bd_prod; |
1022 | #define USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT_SHIFT 0 | 1149 | u8 __local_sge_prod; |
1023 | #define USTORM_ETH_ST_CONTEXT_ENABLE_DYNAMIC_HC (0x1<<1) | ||
1024 | #define USTORM_ETH_ST_CONTEXT_ENABLE_DYNAMIC_HC_SHIFT 1 | ||
1025 | #define USTORM_ETH_ST_CONTEXT_ENABLE_TPA (0x1<<2) | ||
1026 | #define USTORM_ETH_ST_CONTEXT_ENABLE_TPA_SHIFT 2 | ||
1027 | #define __USTORM_ETH_ST_CONTEXT_RESERVED0 (0x1F<<3) | ||
1028 | #define __USTORM_ETH_ST_CONTEXT_RESERVED0_SHIFT 3 | ||
1029 | u8 num_rss; | ||
1030 | u16 __num_of_returned_cqes; | ||
1031 | #endif | 1150 | #endif |
1032 | #if defined(__BIG_ENDIAN) | 1151 | #if defined(__BIG_ENDIAN) |
1033 | u16 mc_alignment_size; | 1152 | u16 __bd_cons; |
1034 | u16 agg_threshold; | 1153 | u16 __sge_cons; |
1035 | #elif defined(__LITTLE_ENDIAN) | 1154 | #elif defined(__LITTLE_ENDIAN) |
1036 | u16 agg_threshold; | 1155 | u16 __sge_cons; |
1037 | u16 mc_alignment_size; | 1156 | u16 __bd_cons; |
1038 | #endif | 1157 | #endif |
1158 | u32 bd_page_base_lo; | ||
1159 | u32 bd_page_base_hi; | ||
1160 | u32 sge_page_base_lo; | ||
1161 | u32 sge_page_base_hi; | ||
1162 | }; | ||
1163 | |||
1164 | /* | ||
1165 | * The eth Rx Buffer Descriptor | ||
1166 | */ | ||
1167 | struct eth_rx_bd { | ||
1168 | u32 addr_lo; | ||
1169 | u32 addr_hi; | ||
1170 | }; | ||
1171 | |||
1172 | /* | ||
1173 | * The eth Rx SGE Descriptor | ||
1174 | */ | ||
1175 | struct eth_rx_sge { | ||
1176 | u32 addr_lo; | ||
1177 | u32 addr_hi; | ||
1178 | }; | ||
1179 | |||
1180 | /* | ||
1181 | * Local BDs and SGEs rings (in ETH) | ||
1182 | */ | ||
1183 | struct eth_local_rx_rings { | ||
1039 | struct eth_rx_bd __local_bd_ring[16]; | 1184 | struct eth_rx_bd __local_bd_ring[16]; |
1185 | struct eth_rx_sge __local_sge_ring[12]; | ||
1186 | }; | ||
1187 | |||
1188 | /* | ||
1189 | * The eth storm context of Ustorm | ||
1190 | */ | ||
1191 | struct ustorm_eth_st_context { | ||
1192 | struct ustorm_eth_st_context_config common; | ||
1193 | struct eth_local_rx_rings __rings; | ||
1040 | }; | 1194 | }; |
1041 | 1195 | ||
1042 | /* | 1196 | /* |
@@ -1107,9 +1261,9 @@ struct xstorm_eth_extra_ag_context_section { | |||
1107 | #if defined(__BIG_ENDIAN) | 1261 | #if defined(__BIG_ENDIAN) |
1108 | u16 __reserved3; | 1262 | u16 __reserved3; |
1109 | u8 __reserved2; | 1263 | u8 __reserved2; |
1110 | u8 __agg_misc7; | 1264 | u8 __da_only_cnt; |
1111 | #elif defined(__LITTLE_ENDIAN) | 1265 | #elif defined(__LITTLE_ENDIAN) |
1112 | u8 __agg_misc7; | 1266 | u8 __da_only_cnt; |
1113 | u8 __reserved2; | 1267 | u8 __reserved2; |
1114 | u16 __reserved3; | 1268 | u16 __reserved3; |
1115 | #endif | 1269 | #endif |
@@ -1387,7 +1541,13 @@ struct timers_block_context { | |||
1387 | u32 __reserved_0; | 1541 | u32 __reserved_0; |
1388 | u32 __reserved_1; | 1542 | u32 __reserved_1; |
1389 | u32 __reserved_2; | 1543 | u32 __reserved_2; |
1390 | u32 __reserved_flags; | 1544 | u32 flags; |
1545 | #define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0) | ||
1546 | #define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0 | ||
1547 | #define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2) | ||
1548 | #define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2 | ||
1549 | #define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3) | ||
1550 | #define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3 | ||
1391 | }; | 1551 | }; |
1392 | 1552 | ||
1393 | /* | 1553 | /* |
@@ -1497,11 +1657,19 @@ struct xstorm_eth_st_context { | |||
1497 | u32 tx_bd_page_base_hi; | 1657 | u32 tx_bd_page_base_hi; |
1498 | #if defined(__BIG_ENDIAN) | 1658 | #if defined(__BIG_ENDIAN) |
1499 | u16 tx_bd_cons; | 1659 | u16 tx_bd_cons; |
1500 | u8 __reserved0; | 1660 | u8 statistics_data; |
1661 | #define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID (0x7F<<0) | ||
1662 | #define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID_SHIFT 0 | ||
1663 | #define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE (0x1<<7) | ||
1664 | #define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7 | ||
1501 | u8 __local_tx_bd_prod; | 1665 | u8 __local_tx_bd_prod; |
1502 | #elif defined(__LITTLE_ENDIAN) | 1666 | #elif defined(__LITTLE_ENDIAN) |
1503 | u8 __local_tx_bd_prod; | 1667 | u8 __local_tx_bd_prod; |
1504 | u8 __reserved0; | 1668 | u8 statistics_data; |
1669 | #define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID (0x7F<<0) | ||
1670 | #define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID_SHIFT 0 | ||
1671 | #define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE (0x1<<7) | ||
1672 | #define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7 | ||
1505 | u16 tx_bd_cons; | 1673 | u16 tx_bd_cons; |
1506 | #endif | 1674 | #endif |
1507 | u32 db_data_addr_lo; | 1675 | u32 db_data_addr_lo; |
@@ -1578,7 +1746,7 @@ struct eth_tx_doorbell { | |||
1578 | struct ustorm_def_status_block { | 1746 | struct ustorm_def_status_block { |
1579 | u16 index_values[HC_USTORM_DEF_SB_NUM_INDICES]; | 1747 | u16 index_values[HC_USTORM_DEF_SB_NUM_INDICES]; |
1580 | u16 status_block_index; | 1748 | u16 status_block_index; |
1581 | u8 reserved0; | 1749 | u8 func; |
1582 | u8 status_block_id; | 1750 | u8 status_block_id; |
1583 | u32 __flags; | 1751 | u32 __flags; |
1584 | }; | 1752 | }; |
@@ -1589,7 +1757,7 @@ struct ustorm_def_status_block { | |||
1589 | struct cstorm_def_status_block { | 1757 | struct cstorm_def_status_block { |
1590 | u16 index_values[HC_CSTORM_DEF_SB_NUM_INDICES]; | 1758 | u16 index_values[HC_CSTORM_DEF_SB_NUM_INDICES]; |
1591 | u16 status_block_index; | 1759 | u16 status_block_index; |
1592 | u8 reserved0; | 1760 | u8 func; |
1593 | u8 status_block_id; | 1761 | u8 status_block_id; |
1594 | u32 __flags; | 1762 | u32 __flags; |
1595 | }; | 1763 | }; |
@@ -1600,7 +1768,7 @@ struct cstorm_def_status_block { | |||
1600 | struct xstorm_def_status_block { | 1768 | struct xstorm_def_status_block { |
1601 | u16 index_values[HC_XSTORM_DEF_SB_NUM_INDICES]; | 1769 | u16 index_values[HC_XSTORM_DEF_SB_NUM_INDICES]; |
1602 | u16 status_block_index; | 1770 | u16 status_block_index; |
1603 | u8 reserved0; | 1771 | u8 func; |
1604 | u8 status_block_id; | 1772 | u8 status_block_id; |
1605 | u32 __flags; | 1773 | u32 __flags; |
1606 | }; | 1774 | }; |
@@ -1611,7 +1779,7 @@ struct xstorm_def_status_block { | |||
1611 | struct tstorm_def_status_block { | 1779 | struct tstorm_def_status_block { |
1612 | u16 index_values[HC_TSTORM_DEF_SB_NUM_INDICES]; | 1780 | u16 index_values[HC_TSTORM_DEF_SB_NUM_INDICES]; |
1613 | u16 status_block_index; | 1781 | u16 status_block_index; |
1614 | u8 reserved0; | 1782 | u8 func; |
1615 | u8 status_block_id; | 1783 | u8 status_block_id; |
1616 | u32 __flags; | 1784 | u32 __flags; |
1617 | }; | 1785 | }; |
@@ -1634,7 +1802,7 @@ struct host_def_status_block { | |||
1634 | struct ustorm_status_block { | 1802 | struct ustorm_status_block { |
1635 | u16 index_values[HC_USTORM_SB_NUM_INDICES]; | 1803 | u16 index_values[HC_USTORM_SB_NUM_INDICES]; |
1636 | u16 status_block_index; | 1804 | u16 status_block_index; |
1637 | u8 reserved0; | 1805 | u8 func; |
1638 | u8 status_block_id; | 1806 | u8 status_block_id; |
1639 | u32 __flags; | 1807 | u32 __flags; |
1640 | }; | 1808 | }; |
@@ -1645,7 +1813,7 @@ struct ustorm_status_block { | |||
1645 | struct cstorm_status_block { | 1813 | struct cstorm_status_block { |
1646 | u16 index_values[HC_CSTORM_SB_NUM_INDICES]; | 1814 | u16 index_values[HC_CSTORM_SB_NUM_INDICES]; |
1647 | u16 status_block_index; | 1815 | u16 status_block_index; |
1648 | u8 reserved0; | 1816 | u8 func; |
1649 | u8 status_block_id; | 1817 | u8 status_block_id; |
1650 | u32 __flags; | 1818 | u32 __flags; |
1651 | }; | 1819 | }; |
@@ -1683,20 +1851,21 @@ struct eth_dynamic_hc_config { | |||
1683 | * regular eth FP CQE parameters struct | 1851 | * regular eth FP CQE parameters struct |
1684 | */ | 1852 | */ |
1685 | struct eth_fast_path_rx_cqe { | 1853 | struct eth_fast_path_rx_cqe { |
1686 | u8 type; | 1854 | u8 type_error_flags; |
1687 | u8 error_type_flags; | 1855 | #define ETH_FAST_PATH_RX_CQE_TYPE (0x1<<0) |
1688 | #define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<0) | 1856 | #define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0 |
1689 | #define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 0 | 1857 | #define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<1) |
1690 | #define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<1) | 1858 | #define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 1 |
1691 | #define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 1 | 1859 | #define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<2) |
1692 | #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<2) | 1860 | #define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 2 |
1693 | #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 2 | 1861 | #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<3) |
1694 | #define ETH_FAST_PATH_RX_CQE_START_FLG (0x1<<3) | 1862 | #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 3 |
1695 | #define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 3 | 1863 | #define ETH_FAST_PATH_RX_CQE_START_FLG (0x1<<4) |
1696 | #define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<4) | 1864 | #define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 4 |
1697 | #define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 4 | 1865 | #define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<5) |
1698 | #define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x7<<5) | 1866 | #define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 5 |
1699 | #define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 5 | 1867 | #define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6) |
1868 | #define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6 | ||
1700 | u8 status_flags; | 1869 | u8 status_flags; |
1701 | #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) | 1870 | #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) |
1702 | #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 | 1871 | #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 |
@@ -1711,11 +1880,13 @@ struct eth_fast_path_rx_cqe { | |||
1711 | #define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7) | 1880 | #define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7) |
1712 | #define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7 | 1881 | #define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7 |
1713 | u8 placement_offset; | 1882 | u8 placement_offset; |
1883 | u8 queue_index; | ||
1714 | u32 rss_hash_result; | 1884 | u32 rss_hash_result; |
1715 | u16 vlan_tag; | 1885 | u16 vlan_tag; |
1716 | u16 pkt_len; | 1886 | u16 pkt_len; |
1717 | u16 queue_index; | 1887 | u16 len_on_bd; |
1718 | struct parsing_flags pars_flags; | 1888 | struct parsing_flags pars_flags; |
1889 | u16 sgl[8]; | ||
1719 | }; | 1890 | }; |
1720 | 1891 | ||
1721 | 1892 | ||
@@ -1729,6 +1900,23 @@ struct eth_halt_ramrod_data { | |||
1729 | 1900 | ||
1730 | 1901 | ||
1731 | /* | 1902 | /* |
1903 | * The data for statistics query ramrod | ||
1904 | */ | ||
1905 | struct eth_query_ramrod_data { | ||
1906 | #if defined(__BIG_ENDIAN) | ||
1907 | u8 reserved0; | ||
1908 | u8 collect_port_1b; | ||
1909 | u16 drv_counter; | ||
1910 | #elif defined(__LITTLE_ENDIAN) | ||
1911 | u16 drv_counter; | ||
1912 | u8 collect_port_1b; | ||
1913 | u8 reserved0; | ||
1914 | #endif | ||
1915 | u32 ctr_id_vector; | ||
1916 | }; | ||
1917 | |||
1918 | |||
1919 | /* | ||
1732 | * Place holder for ramrods protocol specific data | 1920 | * Place holder for ramrods protocol specific data |
1733 | */ | 1921 | */ |
1734 | struct ramrod_data { | 1922 | struct ramrod_data { |
@@ -1758,15 +1946,20 @@ struct eth_rx_bd_next_page { | |||
1758 | * Eth Rx Cqe structure- general structure for ramrods | 1946 | * Eth Rx Cqe structure- general structure for ramrods |
1759 | */ | 1947 | */ |
1760 | struct common_ramrod_eth_rx_cqe { | 1948 | struct common_ramrod_eth_rx_cqe { |
1761 | u8 type; | 1949 | u8 ramrod_type; |
1950 | #define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x1<<0) | ||
1951 | #define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0 | ||
1952 | #define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x7F<<1) | ||
1953 | #define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 1 | ||
1762 | u8 conn_type_3b; | 1954 | u8 conn_type_3b; |
1763 | u16 reserved; | 1955 | u16 reserved1; |
1764 | u32 conn_and_cmd_data; | 1956 | u32 conn_and_cmd_data; |
1765 | #define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0) | 1957 | #define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0) |
1766 | #define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0 | 1958 | #define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0 |
1767 | #define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24) | 1959 | #define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24) |
1768 | #define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24 | 1960 | #define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24 |
1769 | struct ramrod_data protocol_data; | 1961 | struct ramrod_data protocol_data; |
1962 | u32 reserved2[4]; | ||
1770 | }; | 1963 | }; |
1771 | 1964 | ||
1772 | /* | 1965 | /* |
@@ -1775,8 +1968,7 @@ struct common_ramrod_eth_rx_cqe { | |||
1775 | struct eth_rx_cqe_next_page { | 1968 | struct eth_rx_cqe_next_page { |
1776 | u32 addr_lo; | 1969 | u32 addr_lo; |
1777 | u32 addr_hi; | 1970 | u32 addr_hi; |
1778 | u32 reserved0; | 1971 | u32 reserved[6]; |
1779 | u32 reserved1; | ||
1780 | }; | 1972 | }; |
1781 | 1973 | ||
1782 | /* | 1974 | /* |
@@ -1806,11 +1998,6 @@ struct spe_hdr { | |||
1806 | u16 reserved; | 1998 | u16 reserved; |
1807 | }; | 1999 | }; |
1808 | 2000 | ||
1809 | struct regpair { | ||
1810 | u32 lo; | ||
1811 | u32 hi; | ||
1812 | }; | ||
1813 | |||
1814 | /* | 2001 | /* |
1815 | * ethernet slow path element | 2002 | * ethernet slow path element |
1816 | */ | 2003 | */ |
@@ -1821,6 +2008,7 @@ union eth_specific_data { | |||
1821 | struct eth_halt_ramrod_data halt_ramrod_data; | 2008 | struct eth_halt_ramrod_data halt_ramrod_data; |
1822 | struct regpair leading_cqe_addr; | 2009 | struct regpair leading_cqe_addr; |
1823 | struct regpair update_data_addr; | 2010 | struct regpair update_data_addr; |
2011 | struct eth_query_ramrod_data query_ramrod_data; | ||
1824 | }; | 2012 | }; |
1825 | 2013 | ||
1826 | /* | 2014 | /* |
@@ -1843,10 +2031,13 @@ struct eth_tx_db_data { | |||
1843 | 2031 | ||
1844 | 2032 | ||
1845 | /* | 2033 | /* |
1846 | * Common configuration parameters per port in Tstorm | 2034 | * Common configuration parameters per function in Tstorm |
1847 | */ | 2035 | */ |
1848 | struct tstorm_eth_function_common_config { | 2036 | struct tstorm_eth_function_common_config { |
1849 | u32 config_flags; | 2037 | #if defined(__BIG_ENDIAN) |
2038 | u8 leading_client_id; | ||
2039 | u8 rss_result_mask; | ||
2040 | u16 config_flags; | ||
1850 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) | 2041 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) |
1851 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0 | 2042 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0 |
1852 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) | 2043 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) |
@@ -1859,17 +2050,32 @@ struct tstorm_eth_function_common_config { | |||
1859 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_ENABLE_SHIFT 4 | 2050 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_ENABLE_SHIFT 4 |
1860 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<5) | 2051 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<5) |
1861 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 5 | 2052 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 5 |
1862 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x3FFFFFF<<6) | 2053 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM (0x1<<6) |
1863 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 6 | 2054 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 6 |
1864 | #if defined(__BIG_ENDIAN) | 2055 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x1FF<<7) |
1865 | u16 __secondary_vlan_id; | 2056 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 7 |
1866 | u8 leading_client_id; | ||
1867 | u8 rss_result_mask; | ||
1868 | #elif defined(__LITTLE_ENDIAN) | 2057 | #elif defined(__LITTLE_ENDIAN) |
2058 | u16 config_flags; | ||
2059 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) | ||
2060 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0 | ||
2061 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) | ||
2062 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1 | ||
2063 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2) | ||
2064 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2 | ||
2065 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3) | ||
2066 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 | ||
2067 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_ENABLE (0x1<<4) | ||
2068 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_ENABLE_SHIFT 4 | ||
2069 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<5) | ||
2070 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 5 | ||
2071 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM (0x1<<6) | ||
2072 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 6 | ||
2073 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x1FF<<7) | ||
2074 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 7 | ||
1869 | u8 rss_result_mask; | 2075 | u8 rss_result_mask; |
1870 | u8 leading_client_id; | 2076 | u8 leading_client_id; |
1871 | u16 __secondary_vlan_id; | ||
1872 | #endif | 2077 | #endif |
2078 | u16 vlan_id[2]; | ||
1873 | }; | 2079 | }; |
1874 | 2080 | ||
1875 | /* | 2081 | /* |
@@ -1887,7 +2093,7 @@ struct eth_update_ramrod_data { | |||
1887 | struct mac_configuration_hdr { | 2093 | struct mac_configuration_hdr { |
1888 | u8 length_6b; | 2094 | u8 length_6b; |
1889 | u8 offset; | 2095 | u8 offset; |
1890 | u16 reserved0; | 2096 | u16 client_id; |
1891 | u32 reserved1; | 2097 | u32 reserved1; |
1892 | }; | 2098 | }; |
1893 | 2099 | ||
@@ -1944,15 +2150,55 @@ struct mac_configuration_cmd { | |||
1944 | 2150 | ||
1945 | 2151 | ||
1946 | /* | 2152 | /* |
2153 | * MAC address in list for ramrod | ||
2154 | */ | ||
2155 | struct mac_configuration_entry_e1h { | ||
2156 | u16 lsb_mac_addr; | ||
2157 | u16 middle_mac_addr; | ||
2158 | u16 msb_mac_addr; | ||
2159 | u16 vlan_id; | ||
2160 | u16 e1hov_id; | ||
2161 | u8 client_id; | ||
2162 | u8 flags; | ||
2163 | #define MAC_CONFIGURATION_ENTRY_E1H_PORT (0x1<<0) | ||
2164 | #define MAC_CONFIGURATION_ENTRY_E1H_PORT_SHIFT 0 | ||
2165 | #define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE (0x1<<1) | ||
2166 | #define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE_SHIFT 1 | ||
2167 | #define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC (0x1<<2) | ||
2168 | #define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC_SHIFT 2 | ||
2169 | #define MAC_CONFIGURATION_ENTRY_E1H_RESERVED0 (0x1F<<3) | ||
2170 | #define MAC_CONFIGURATION_ENTRY_E1H_RESERVED0_SHIFT 3 | ||
2171 | }; | ||
2172 | |||
2173 | /* | ||
2174 | * MAC filtering configuration command | ||
2175 | */ | ||
2176 | struct mac_configuration_cmd_e1h { | ||
2177 | struct mac_configuration_hdr hdr; | ||
2178 | struct mac_configuration_entry_e1h config_table[32]; | ||
2179 | }; | ||
2180 | |||
2181 | |||
2182 | /* | ||
2183 | * approximate-match multicast filtering for E1H per function in Tstorm | ||
2184 | */ | ||
2185 | struct tstorm_eth_approximate_match_multicast_filtering { | ||
2186 | u32 mcast_add_hash_bit_array[8]; | ||
2187 | }; | ||
2188 | |||
2189 | |||
2190 | /* | ||
1947 | * Configuration parameters per client in Tstorm | 2191 | * Configuration parameters per client in Tstorm |
1948 | */ | 2192 | */ |
1949 | struct tstorm_eth_client_config { | 2193 | struct tstorm_eth_client_config { |
1950 | #if defined(__BIG_ENDIAN) | 2194 | #if defined(__BIG_ENDIAN) |
1951 | u16 statistics_counter_id; | 2195 | u8 max_sges_for_packet; |
2196 | u8 statistics_counter_id; | ||
1952 | u16 mtu; | 2197 | u16 mtu; |
1953 | #elif defined(__LITTLE_ENDIAN) | 2198 | #elif defined(__LITTLE_ENDIAN) |
1954 | u16 mtu; | 2199 | u16 mtu; |
1955 | u16 statistics_counter_id; | 2200 | u8 statistics_counter_id; |
2201 | u8 max_sges_for_packet; | ||
1956 | #endif | 2202 | #endif |
1957 | #if defined(__BIG_ENDIAN) | 2203 | #if defined(__BIG_ENDIAN) |
1958 | u16 drop_flags; | 2204 | u16 drop_flags; |
@@ -1960,42 +2206,42 @@ struct tstorm_eth_client_config { | |||
1960 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0 | 2206 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0 |
1961 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1) | 2207 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1) |
1962 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1 | 2208 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1 |
1963 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR (0x1<<2) | 2209 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<2) |
1964 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR_SHIFT 2 | 2210 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2 |
1965 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<3) | 2211 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3) |
1966 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 3 | 2212 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3 |
1967 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<4) | 2213 | #define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0xFFF<<4) |
1968 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 4 | 2214 | #define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 4 |
1969 | #define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x7FF<<5) | ||
1970 | #define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 5 | ||
1971 | u16 config_flags; | 2215 | u16 config_flags; |
1972 | #define TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE (0x1<<0) | 2216 | #define TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE (0x1<<0) |
1973 | #define TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE_SHIFT 0 | 2217 | #define TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE_SHIFT 0 |
1974 | #define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<1) | 2218 | #define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<1) |
1975 | #define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 1 | 2219 | #define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 1 |
1976 | #define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0 (0x3FFF<<2) | 2220 | #define TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING (0x1<<2) |
1977 | #define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0_SHIFT 2 | 2221 | #define TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING_SHIFT 2 |
2222 | #define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0 (0x1FFF<<3) | ||
2223 | #define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0_SHIFT 3 | ||
1978 | #elif defined(__LITTLE_ENDIAN) | 2224 | #elif defined(__LITTLE_ENDIAN) |
1979 | u16 config_flags; | 2225 | u16 config_flags; |
1980 | #define TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE (0x1<<0) | 2226 | #define TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE (0x1<<0) |
1981 | #define TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE_SHIFT 0 | 2227 | #define TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE_SHIFT 0 |
1982 | #define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<1) | 2228 | #define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<1) |
1983 | #define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 1 | 2229 | #define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 1 |
1984 | #define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0 (0x3FFF<<2) | 2230 | #define TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING (0x1<<2) |
1985 | #define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0_SHIFT 2 | 2231 | #define TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING_SHIFT 2 |
2232 | #define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0 (0x1FFF<<3) | ||
2233 | #define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0_SHIFT 3 | ||
1986 | u16 drop_flags; | 2234 | u16 drop_flags; |
1987 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR (0x1<<0) | 2235 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR (0x1<<0) |
1988 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0 | 2236 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0 |
1989 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1) | 2237 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1) |
1990 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1 | 2238 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1 |
1991 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR (0x1<<2) | 2239 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<2) |
1992 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR_SHIFT 2 | 2240 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2 |
1993 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<3) | 2241 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3) |
1994 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 3 | 2242 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3 |
1995 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<4) | 2243 | #define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0xFFF<<4) |
1996 | #define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 4 | 2244 | #define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 4 |
1997 | #define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x7FF<<5) | ||
1998 | #define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 5 | ||
1999 | #endif | 2245 | #endif |
2000 | }; | 2246 | }; |
2001 | 2247 | ||
@@ -2011,96 +2257,112 @@ struct tstorm_eth_mac_filter_config { | |||
2011 | u32 bcast_drop_all; | 2257 | u32 bcast_drop_all; |
2012 | u32 bcast_accept_all; | 2258 | u32 bcast_accept_all; |
2013 | u32 strict_vlan; | 2259 | u32 strict_vlan; |
2014 | u32 __secondary_vlan_clients; | 2260 | u32 vlan_filter[2]; |
2261 | u32 reserved; | ||
2015 | }; | 2262 | }; |
2016 | 2263 | ||
2017 | 2264 | ||
2018 | struct rate_shaping_per_protocol { | 2265 | /* |
2266 | * Three RX producers for ETH | ||
2267 | */ | ||
2268 | struct tstorm_eth_rx_producers { | ||
2019 | #if defined(__BIG_ENDIAN) | 2269 | #if defined(__BIG_ENDIAN) |
2020 | u16 reserved0; | 2270 | u16 bd_prod; |
2021 | u16 protocol_rate; | 2271 | u16 cqe_prod; |
2022 | #elif defined(__LITTLE_ENDIAN) | 2272 | #elif defined(__LITTLE_ENDIAN) |
2023 | u16 protocol_rate; | 2273 | u16 cqe_prod; |
2024 | u16 reserved0; | 2274 | u16 bd_prod; |
2025 | #endif | 2275 | #endif |
2026 | u32 protocol_quota; | ||
2027 | s32 current_credit; | ||
2028 | u32 reserved; | ||
2029 | }; | ||
2030 | |||
2031 | struct rate_shaping_vars { | ||
2032 | struct rate_shaping_per_protocol protocol_vars[NUM_OF_PROTOCOLS]; | ||
2033 | u32 pause_mask; | ||
2034 | u32 periodic_stop; | ||
2035 | u32 rs_periodic_timeout; | ||
2036 | u32 rs_threshold; | ||
2037 | u32 last_periodic_time; | ||
2038 | u32 reserved; | ||
2039 | }; | ||
2040 | |||
2041 | struct fairness_per_protocol { | ||
2042 | u32 credit_delta; | ||
2043 | s32 fair_credit; | ||
2044 | #if defined(__BIG_ENDIAN) | 2276 | #if defined(__BIG_ENDIAN) |
2045 | u16 reserved0; | 2277 | u16 reserved; |
2046 | u8 state; | 2278 | u16 sge_prod; |
2047 | u8 weight; | ||
2048 | #elif defined(__LITTLE_ENDIAN) | 2279 | #elif defined(__LITTLE_ENDIAN) |
2049 | u8 weight; | 2280 | u16 sge_prod; |
2050 | u8 state; | 2281 | u16 reserved; |
2051 | u16 reserved0; | ||
2052 | #endif | 2282 | #endif |
2053 | u32 reserved1; | ||
2054 | }; | 2283 | }; |
2055 | 2284 | ||
2056 | struct fairness_vars { | ||
2057 | struct fairness_per_protocol protocol_vars[NUM_OF_PROTOCOLS]; | ||
2058 | u32 upper_bound; | ||
2059 | u32 port_rate; | ||
2060 | u32 pause_mask; | ||
2061 | u32 fair_threshold; | ||
2062 | }; | ||
2063 | 2285 | ||
2064 | struct safc_struct { | 2286 | /* |
2065 | u32 cur_pause_mask; | 2287 | * common flag to indicate existance of TPA. |
2066 | u32 expire_time; | 2288 | */ |
2289 | struct tstorm_eth_tpa_exist { | ||
2067 | #if defined(__BIG_ENDIAN) | 2290 | #if defined(__BIG_ENDIAN) |
2068 | u16 reserved0; | 2291 | u16 reserved1; |
2069 | u8 cur_cos_types; | 2292 | u8 reserved0; |
2070 | u8 safc_timeout_usec; | 2293 | u8 tpa_exist; |
2071 | #elif defined(__LITTLE_ENDIAN) | 2294 | #elif defined(__LITTLE_ENDIAN) |
2072 | u8 safc_timeout_usec; | 2295 | u8 tpa_exist; |
2073 | u8 cur_cos_types; | 2296 | u8 reserved0; |
2074 | u16 reserved0; | 2297 | u16 reserved1; |
2075 | #endif | 2298 | #endif |
2076 | u32 reserved1; | 2299 | u32 reserved2; |
2077 | }; | 2300 | }; |
2078 | 2301 | ||
2079 | struct demo_struct { | 2302 | |
2303 | /* | ||
2304 | * per-port SAFC demo variables | ||
2305 | */ | ||
2306 | struct cmng_flags_per_port { | ||
2080 | u8 con_number[NUM_OF_PROTOCOLS]; | 2307 | u8 con_number[NUM_OF_PROTOCOLS]; |
2081 | #if defined(__BIG_ENDIAN) | 2308 | #if defined(__BIG_ENDIAN) |
2082 | u8 reserved1; | ||
2083 | u8 fairness_enable; | 2309 | u8 fairness_enable; |
2084 | u8 rate_shaping_enable; | 2310 | u8 rate_shaping_enable; |
2085 | u8 cmng_enable; | 2311 | u8 cmng_protocol_enable; |
2312 | u8 cmng_vn_enable; | ||
2086 | #elif defined(__LITTLE_ENDIAN) | 2313 | #elif defined(__LITTLE_ENDIAN) |
2087 | u8 cmng_enable; | 2314 | u8 cmng_vn_enable; |
2315 | u8 cmng_protocol_enable; | ||
2088 | u8 rate_shaping_enable; | 2316 | u8 rate_shaping_enable; |
2089 | u8 fairness_enable; | 2317 | u8 fairness_enable; |
2090 | u8 reserved1; | ||
2091 | #endif | 2318 | #endif |
2092 | }; | 2319 | }; |
2093 | 2320 | ||
2094 | struct cmng_struct { | 2321 | |
2095 | struct rate_shaping_vars rs_vars; | 2322 | /* |
2096 | struct fairness_vars fair_vars; | 2323 | * per-port rate shaping variables |
2097 | struct safc_struct safc_vars; | 2324 | */ |
2098 | struct demo_struct demo_vars; | 2325 | struct rate_shaping_vars_per_port { |
2326 | u32 rs_periodic_timeout; | ||
2327 | u32 rs_threshold; | ||
2328 | }; | ||
2329 | |||
2330 | |||
2331 | /* | ||
2332 | * per-port fairness variables | ||
2333 | */ | ||
2334 | struct fairness_vars_per_port { | ||
2335 | u32 upper_bound; | ||
2336 | u32 fair_threshold; | ||
2337 | u32 fairness_timeout; | ||
2338 | }; | ||
2339 | |||
2340 | |||
2341 | /* | ||
2342 | * per-port SAFC variables | ||
2343 | */ | ||
2344 | struct safc_struct_per_port { | ||
2345 | #if defined(__BIG_ENDIAN) | ||
2346 | u16 __reserved0; | ||
2347 | u8 cur_cos_types; | ||
2348 | u8 safc_timeout_usec; | ||
2349 | #elif defined(__LITTLE_ENDIAN) | ||
2350 | u8 safc_timeout_usec; | ||
2351 | u8 cur_cos_types; | ||
2352 | u16 __reserved0; | ||
2353 | #endif | ||
2354 | u8 cos_to_protocol[MAX_COS_NUMBER]; | ||
2099 | }; | 2355 | }; |
2100 | 2356 | ||
2101 | 2357 | ||
2102 | struct cos_to_protocol { | 2358 | /* |
2103 | u8 mask[MAX_COS_NUMBER]; | 2359 | * Per-port congestion management variables |
2360 | */ | ||
2361 | struct cmng_struct_per_port { | ||
2362 | struct rate_shaping_vars_per_port rs_vars; | ||
2363 | struct fairness_vars_per_port fair_vars; | ||
2364 | struct safc_struct_per_port safc_vars; | ||
2365 | struct cmng_flags_per_port flags; | ||
2104 | }; | 2366 | }; |
2105 | 2367 | ||
2106 | 2368 | ||
@@ -2162,6 +2424,16 @@ struct eth_stats_query { | |||
2162 | 2424 | ||
2163 | 2425 | ||
2164 | /* | 2426 | /* |
2427 | * per-vnic fairness variables | ||
2428 | */ | ||
2429 | struct fairness_vars_per_vn { | ||
2430 | u32 protocol_credit_delta[NUM_OF_PROTOCOLS]; | ||
2431 | u32 vn_credit_delta; | ||
2432 | u32 __reserved0; | ||
2433 | }; | ||
2434 | |||
2435 | |||
2436 | /* | ||
2165 | * FW version stored in the Xstorm RAM | 2437 | * FW version stored in the Xstorm RAM |
2166 | */ | 2438 | */ |
2167 | struct fw_version { | 2439 | struct fw_version { |
@@ -2179,8 +2451,10 @@ struct fw_version { | |||
2179 | #define FW_VERSION_OPTIMIZED_SHIFT 0 | 2451 | #define FW_VERSION_OPTIMIZED_SHIFT 0 |
2180 | #define FW_VERSION_BIG_ENDIEN (0x1<<1) | 2452 | #define FW_VERSION_BIG_ENDIEN (0x1<<1) |
2181 | #define FW_VERSION_BIG_ENDIEN_SHIFT 1 | 2453 | #define FW_VERSION_BIG_ENDIEN_SHIFT 1 |
2182 | #define __FW_VERSION_RESERVED (0x3FFFFFFF<<2) | 2454 | #define FW_VERSION_CHIP_VERSION (0x3<<2) |
2183 | #define __FW_VERSION_RESERVED_SHIFT 2 | 2455 | #define FW_VERSION_CHIP_VERSION_SHIFT 2 |
2456 | #define __FW_VERSION_RESERVED (0xFFFFFFF<<4) | ||
2457 | #define __FW_VERSION_RESERVED_SHIFT 4 | ||
2184 | }; | 2458 | }; |
2185 | 2459 | ||
2186 | 2460 | ||
@@ -2188,15 +2462,9 @@ struct fw_version { | |||
2188 | * FW version stored in first line of pram | 2462 | * FW version stored in first line of pram |
2189 | */ | 2463 | */ |
2190 | struct pram_fw_version { | 2464 | struct pram_fw_version { |
2191 | #if defined(__BIG_ENDIAN) | ||
2192 | u16 patch; | ||
2193 | u8 primary; | ||
2194 | u8 client; | ||
2195 | #elif defined(__LITTLE_ENDIAN) | ||
2196 | u8 client; | 2465 | u8 client; |
2197 | u8 primary; | 2466 | u8 primary; |
2198 | u16 patch; | 2467 | u16 patch; |
2199 | #endif | ||
2200 | u8 flags; | 2468 | u8 flags; |
2201 | #define PRAM_FW_VERSION_OPTIMIZED (0x1<<0) | 2469 | #define PRAM_FW_VERSION_OPTIMIZED (0x1<<0) |
2202 | #define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0 | 2470 | #define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0 |
@@ -2204,8 +2472,34 @@ struct pram_fw_version { | |||
2204 | #define PRAM_FW_VERSION_STORM_ID_SHIFT 1 | 2472 | #define PRAM_FW_VERSION_STORM_ID_SHIFT 1 |
2205 | #define PRAM_FW_VERSION_BIG_ENDIEN (0x1<<3) | 2473 | #define PRAM_FW_VERSION_BIG_ENDIEN (0x1<<3) |
2206 | #define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3 | 2474 | #define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3 |
2207 | #define __PRAM_FW_VERSION_RESERVED0 (0xF<<4) | 2475 | #define PRAM_FW_VERSION_CHIP_VERSION (0x3<<4) |
2208 | #define __PRAM_FW_VERSION_RESERVED0_SHIFT 4 | 2476 | #define PRAM_FW_VERSION_CHIP_VERSION_SHIFT 4 |
2477 | #define __PRAM_FW_VERSION_RESERVED0 (0x3<<6) | ||
2478 | #define __PRAM_FW_VERSION_RESERVED0_SHIFT 6 | ||
2479 | }; | ||
2480 | |||
2481 | |||
2482 | /* | ||
2483 | * a single rate shaping counter. can be used as protocol or vnic counter | ||
2484 | */ | ||
2485 | struct rate_shaping_counter { | ||
2486 | u32 quota; | ||
2487 | #if defined(__BIG_ENDIAN) | ||
2488 | u16 __reserved0; | ||
2489 | u16 rate; | ||
2490 | #elif defined(__LITTLE_ENDIAN) | ||
2491 | u16 rate; | ||
2492 | u16 __reserved0; | ||
2493 | #endif | ||
2494 | }; | ||
2495 | |||
2496 | |||
2497 | /* | ||
2498 | * per-vnic rate shaping variables | ||
2499 | */ | ||
2500 | struct rate_shaping_vars_per_vn { | ||
2501 | struct rate_shaping_counter protocol_counters[NUM_OF_PROTOCOLS]; | ||
2502 | struct rate_shaping_counter vn_counter; | ||
2209 | }; | 2503 | }; |
2210 | 2504 | ||
2211 | 2505 | ||
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h index 5a4e82b9e7bf..4c7750789b62 100644 --- a/drivers/net/bnx2x_init.h +++ b/drivers/net/bnx2x_init.h | |||
@@ -226,28 +226,28 @@ static const u32 *bnx2x_sel_blob(u32 addr, const u32 *data, int is_e1) | |||
226 | tsem_int_table_data_e1h; | 226 | tsem_int_table_data_e1h; |
227 | else | 227 | else |
228 | IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr) | 228 | IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr) |
229 | data = is_e1 ? csem_int_table_data_e1 : | 229 | data = is_e1 ? csem_int_table_data_e1 : |
230 | csem_int_table_data_e1h; | 230 | csem_int_table_data_e1h; |
231 | else | 231 | else |
232 | IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr) | 232 | IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr) |
233 | data = is_e1 ? usem_int_table_data_e1 : | 233 | data = is_e1 ? usem_int_table_data_e1 : |
234 | usem_int_table_data_e1h; | 234 | usem_int_table_data_e1h; |
235 | else | 235 | else |
236 | IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr) | 236 | IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr) |
237 | data = is_e1 ? xsem_int_table_data_e1 : | 237 | data = is_e1 ? xsem_int_table_data_e1 : |
238 | xsem_int_table_data_e1h; | 238 | xsem_int_table_data_e1h; |
239 | else | 239 | else |
240 | IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr) | 240 | IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr) |
241 | data = is_e1 ? tsem_pram_data_e1 : tsem_pram_data_e1h; | 241 | data = is_e1 ? tsem_pram_data_e1 : tsem_pram_data_e1h; |
242 | else | 242 | else |
243 | IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr) | 243 | IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr) |
244 | data = is_e1 ? csem_pram_data_e1 : csem_pram_data_e1h; | 244 | data = is_e1 ? csem_pram_data_e1 : csem_pram_data_e1h; |
245 | else | 245 | else |
246 | IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr) | 246 | IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr) |
247 | data = is_e1 ? usem_pram_data_e1 : usem_pram_data_e1h; | 247 | data = is_e1 ? usem_pram_data_e1 : usem_pram_data_e1h; |
248 | else | 248 | else |
249 | IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr) | 249 | IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr) |
250 | data = is_e1 ? xsem_pram_data_e1 : xsem_pram_data_e1h; | 250 | data = is_e1 ? xsem_pram_data_e1 : xsem_pram_data_e1h; |
251 | 251 | ||
252 | return data; | 252 | return data; |
253 | } | 253 | } |
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c index 22586ebd7b1e..ff2743db10d9 100644 --- a/drivers/net/bnx2x_link.c +++ b/drivers/net/bnx2x_link.c | |||
@@ -3572,7 +3572,8 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, | |||
3572 | LED_BLINK_RATE_VAL); | 3572 | LED_BLINK_RATE_VAL); |
3573 | REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + | 3573 | REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + |
3574 | port*4, 1); | 3574 | port*4, 1); |
3575 | if (((speed == SPEED_2500) || | 3575 | if (!CHIP_IS_E1H(bp) && |
3576 | ((speed == SPEED_2500) || | ||
3576 | (speed == SPEED_1000) || | 3577 | (speed == SPEED_1000) || |
3577 | (speed == SPEED_100) || | 3578 | (speed == SPEED_100) || |
3578 | (speed == SPEED_10))) { | 3579 | (speed == SPEED_10))) { |
@@ -3753,6 +3754,14 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) | |||
3753 | vars->duplex = DUPLEX_FULL; | 3754 | vars->duplex = DUPLEX_FULL; |
3754 | vars->flow_ctrl = FLOW_CTRL_NONE; | 3755 | vars->flow_ctrl = FLOW_CTRL_NONE; |
3755 | vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD); | 3756 | vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD); |
3757 | /* enable on E1.5 FPGA */ | ||
3758 | if (CHIP_IS_E1H(bp)) { | ||
3759 | vars->flow_ctrl |= | ||
3760 | (FLOW_CTRL_TX | FLOW_CTRL_RX); | ||
3761 | vars->link_status |= | ||
3762 | (LINK_STATUS_TX_FLOW_CONTROL_ENABLED | | ||
3763 | LINK_STATUS_RX_FLOW_CONTROL_ENABLED); | ||
3764 | } | ||
3756 | 3765 | ||
3757 | bnx2x_emac_enable(params, vars, 0); | 3766 | bnx2x_emac_enable(params, vars, 0); |
3758 | bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed); | 3767 | bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed); |
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index efa942688f84..90b54e4c5c3b 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* bnx2x.c: Broadcom Everest network driver. | 1 | /* bnx2x_main.c: Broadcom Everest network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2007-2008 Broadcom Corporation | 3 | * Copyright (c) 2007-2008 Broadcom Corporation |
4 | * | 4 | * |
@@ -15,12 +15,6 @@ | |||
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | 17 | ||
18 | /* define this to make the driver freeze on error | ||
19 | * to allow getting debug info | ||
20 | * (you will need to reboot afterwards) | ||
21 | */ | ||
22 | /*#define BNX2X_STOP_ON_ERROR*/ | ||
23 | |||
24 | #include <linux/module.h> | 18 | #include <linux/module.h> |
25 | #include <linux/moduleparam.h> | 19 | #include <linux/moduleparam.h> |
26 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
@@ -46,16 +40,17 @@ | |||
46 | #include <linux/mii.h> | 40 | #include <linux/mii.h> |
47 | #ifdef NETIF_F_HW_VLAN_TX | 41 | #ifdef NETIF_F_HW_VLAN_TX |
48 | #include <linux/if_vlan.h> | 42 | #include <linux/if_vlan.h> |
49 | #define BCM_VLAN 1 | ||
50 | #endif | 43 | #endif |
51 | #include <net/ip.h> | 44 | #include <net/ip.h> |
52 | #include <net/tcp.h> | 45 | #include <net/tcp.h> |
53 | #include <net/checksum.h> | 46 | #include <net/checksum.h> |
47 | #include <linux/version.h> | ||
48 | #include <net/ip6_checksum.h> | ||
54 | #include <linux/workqueue.h> | 49 | #include <linux/workqueue.h> |
55 | #include <linux/crc32.h> | 50 | #include <linux/crc32.h> |
51 | #include <linux/crc32c.h> | ||
56 | #include <linux/prefetch.h> | 52 | #include <linux/prefetch.h> |
57 | #include <linux/zlib.h> | 53 | #include <linux/zlib.h> |
58 | #include <linux/version.h> | ||
59 | #include <linux/io.h> | 54 | #include <linux/io.h> |
60 | 55 | ||
61 | #include "bnx2x_reg.h" | 56 | #include "bnx2x_reg.h" |
@@ -67,13 +62,13 @@ | |||
67 | 62 | ||
68 | #define DRV_MODULE_VERSION "1.42.4" | 63 | #define DRV_MODULE_VERSION "1.42.4" |
69 | #define DRV_MODULE_RELDATE "2008/4/9" | 64 | #define DRV_MODULE_RELDATE "2008/4/9" |
70 | #define BNX2X_BC_VER 0x040200 | 65 | #define BNX2X_BC_VER 0x040200 |
71 | 66 | ||
72 | /* Time in jiffies before concluding the transmitter is hung. */ | 67 | /* Time in jiffies before concluding the transmitter is hung */ |
73 | #define TX_TIMEOUT (5*HZ) | 68 | #define TX_TIMEOUT (5*HZ) |
74 | 69 | ||
75 | static char version[] __devinitdata = | 70 | static char version[] __devinitdata = |
76 | "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver " | 71 | "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver " |
77 | DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 72 | DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
78 | 73 | ||
79 | MODULE_AUTHOR("Eliezer Tamir"); | 74 | MODULE_AUTHOR("Eliezer Tamir"); |
@@ -83,20 +78,19 @@ MODULE_VERSION(DRV_MODULE_VERSION); | |||
83 | 78 | ||
84 | static int use_inta; | 79 | static int use_inta; |
85 | static int poll; | 80 | static int poll; |
86 | static int onefunc; | ||
87 | static int nomcp; | ||
88 | static int debug; | 81 | static int debug; |
82 | static int nomcp; | ||
83 | static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ | ||
89 | static int use_multi; | 84 | static int use_multi; |
90 | 85 | ||
91 | module_param(use_inta, int, 0); | 86 | module_param(use_inta, int, 0); |
92 | module_param(poll, int, 0); | 87 | module_param(poll, int, 0); |
93 | module_param(onefunc, int, 0); | ||
94 | module_param(debug, int, 0); | 88 | module_param(debug, int, 0); |
89 | module_param(nomcp, int, 0); | ||
95 | MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); | 90 | MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); |
96 | MODULE_PARM_DESC(poll, "use polling (for debug)"); | 91 | MODULE_PARM_DESC(poll, "use polling (for debug)"); |
97 | MODULE_PARM_DESC(onefunc, "enable only first function"); | ||
98 | MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)"); | ||
99 | MODULE_PARM_DESC(debug, "default debug msglevel"); | 92 | MODULE_PARM_DESC(debug, "default debug msglevel"); |
93 | MODULE_PARM_DESC(nomcp, "ignore management CPU"); | ||
100 | 94 | ||
101 | #ifdef BNX2X_MULTI | 95 | #ifdef BNX2X_MULTI |
102 | module_param(use_multi, int, 0); | 96 | module_param(use_multi, int, 0); |
@@ -105,18 +99,27 @@ MODULE_PARM_DESC(use_multi, "use per-CPU queues"); | |||
105 | 99 | ||
106 | enum bnx2x_board_type { | 100 | enum bnx2x_board_type { |
107 | BCM57710 = 0, | 101 | BCM57710 = 0, |
102 | BCM57711 = 1, | ||
103 | BCM57711E = 2, | ||
108 | }; | 104 | }; |
109 | 105 | ||
110 | /* indexed by board_t, above */ | 106 | /* indexed by board_type, above */ |
111 | static struct { | 107 | static struct { |
112 | char *name; | 108 | char *name; |
113 | } board_info[] __devinitdata = { | 109 | } board_info[] __devinitdata = { |
114 | { "Broadcom NetXtreme II BCM57710 XGb" } | 110 | { "Broadcom NetXtreme II BCM57710 XGb" }, |
111 | { "Broadcom NetXtreme II BCM57711 XGb" }, | ||
112 | { "Broadcom NetXtreme II BCM57711E XGb" } | ||
115 | }; | 113 | }; |
116 | 114 | ||
115 | |||
117 | static const struct pci_device_id bnx2x_pci_tbl[] = { | 116 | static const struct pci_device_id bnx2x_pci_tbl[] = { |
118 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710, | 117 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710, |
119 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 }, | 118 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 }, |
119 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711, | ||
120 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 }, | ||
121 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E, | ||
122 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E }, | ||
120 | { 0 } | 123 | { 0 } |
121 | }; | 124 | }; |
122 | 125 | ||
@@ -201,7 +204,8 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, | |||
201 | #else | 204 | #else |
202 | DMAE_CMD_ENDIANITY_DW_SWAP | | 205 | DMAE_CMD_ENDIANITY_DW_SWAP | |
203 | #endif | 206 | #endif |
204 | (bp->port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0)); | 207 | (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | |
208 | (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); | ||
205 | dmae->src_addr_lo = U64_LO(dma_addr); | 209 | dmae->src_addr_lo = U64_LO(dma_addr); |
206 | dmae->src_addr_hi = U64_HI(dma_addr); | 210 | dmae->src_addr_hi = U64_HI(dma_addr); |
207 | dmae->dst_addr_lo = dst_addr >> 2; | 211 | dmae->dst_addr_lo = dst_addr >> 2; |
@@ -224,7 +228,7 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, | |||
224 | 228 | ||
225 | *wb_comp = 0; | 229 | *wb_comp = 0; |
226 | 230 | ||
227 | bnx2x_post_dmae(bp, dmae, (bp->port)*MAX_DMAE_C_PER_PORT); | 231 | bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); |
228 | 232 | ||
229 | udelay(5); | 233 | udelay(5); |
230 | 234 | ||
@@ -277,7 +281,8 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) | |||
277 | #else | 281 | #else |
278 | DMAE_CMD_ENDIANITY_DW_SWAP | | 282 | DMAE_CMD_ENDIANITY_DW_SWAP | |
279 | #endif | 283 | #endif |
280 | (bp->port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0)); | 284 | (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | |
285 | (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); | ||
281 | dmae->src_addr_lo = src_addr >> 2; | 286 | dmae->src_addr_lo = src_addr >> 2; |
282 | dmae->src_addr_hi = 0; | 287 | dmae->src_addr_hi = 0; |
283 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); | 288 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); |
@@ -297,7 +302,7 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) | |||
297 | 302 | ||
298 | *wb_comp = 0; | 303 | *wb_comp = 0; |
299 | 304 | ||
300 | bnx2x_post_dmae(bp, dmae, (bp->port)*MAX_DMAE_C_PER_PORT); | 305 | bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); |
301 | 306 | ||
302 | udelay(5); | 307 | udelay(5); |
303 | 308 | ||
@@ -345,47 +350,122 @@ static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg) | |||
345 | 350 | ||
346 | static int bnx2x_mc_assert(struct bnx2x *bp) | 351 | static int bnx2x_mc_assert(struct bnx2x *bp) |
347 | { | 352 | { |
348 | int i, j, rc = 0; | ||
349 | char last_idx; | 353 | char last_idx; |
350 | const char storm[] = {"XTCU"}; | 354 | int i, rc = 0; |
351 | const u32 intmem_base[] = { | 355 | u32 row0, row1, row2, row3; |
352 | BAR_XSTRORM_INTMEM, | 356 | |
353 | BAR_TSTRORM_INTMEM, | 357 | /* XSTORM */ |
354 | BAR_CSTRORM_INTMEM, | 358 | last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM + |
355 | BAR_USTRORM_INTMEM | 359 | XSTORM_ASSERT_LIST_INDEX_OFFSET); |
356 | }; | 360 | if (last_idx) |
357 | 361 | BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); | |
358 | /* Go through all instances of all SEMIs */ | 362 | |
359 | for (i = 0; i < 4; i++) { | 363 | /* print the asserts */ |
360 | last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET + | 364 | for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { |
361 | intmem_base[i]); | 365 | |
362 | if (last_idx) | 366 | row0 = REG_RD(bp, BAR_XSTRORM_INTMEM + |
363 | BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n", | 367 | XSTORM_ASSERT_LIST_OFFSET(i)); |
364 | storm[i], last_idx); | 368 | row1 = REG_RD(bp, BAR_XSTRORM_INTMEM + |
365 | 369 | XSTORM_ASSERT_LIST_OFFSET(i) + 4); | |
366 | /* print the asserts */ | 370 | row2 = REG_RD(bp, BAR_XSTRORM_INTMEM + |
367 | for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) { | 371 | XSTORM_ASSERT_LIST_OFFSET(i) + 8); |
368 | u32 row0, row1, row2, row3; | 372 | row3 = REG_RD(bp, BAR_XSTRORM_INTMEM + |
369 | 373 | XSTORM_ASSERT_LIST_OFFSET(i) + 12); | |
370 | row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + | 374 | |
371 | intmem_base[i]); | 375 | if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { |
372 | row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 + | 376 | BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x" |
373 | intmem_base[i]); | 377 | " 0x%08x 0x%08x 0x%08x\n", |
374 | row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 + | 378 | i, row3, row2, row1, row0); |
375 | intmem_base[i]); | 379 | rc++; |
376 | row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 + | 380 | } else { |
377 | intmem_base[i]); | 381 | break; |
378 | 382 | } | |
379 | if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { | 383 | } |
380 | BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x =" | 384 | |
381 | " 0x%08x 0x%08x 0x%08x 0x%08x\n", | 385 | /* TSTORM */ |
382 | storm[i], j, row3, row2, row1, row0); | 386 | last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM + |
383 | rc++; | 387 | TSTORM_ASSERT_LIST_INDEX_OFFSET); |
384 | } else { | 388 | if (last_idx) |
385 | break; | 389 | BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); |
386 | } | 390 | |
391 | /* print the asserts */ | ||
392 | for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { | ||
393 | |||
394 | row0 = REG_RD(bp, BAR_TSTRORM_INTMEM + | ||
395 | TSTORM_ASSERT_LIST_OFFSET(i)); | ||
396 | row1 = REG_RD(bp, BAR_TSTRORM_INTMEM + | ||
397 | TSTORM_ASSERT_LIST_OFFSET(i) + 4); | ||
398 | row2 = REG_RD(bp, BAR_TSTRORM_INTMEM + | ||
399 | TSTORM_ASSERT_LIST_OFFSET(i) + 8); | ||
400 | row3 = REG_RD(bp, BAR_TSTRORM_INTMEM + | ||
401 | TSTORM_ASSERT_LIST_OFFSET(i) + 12); | ||
402 | |||
403 | if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { | ||
404 | BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x" | ||
405 | " 0x%08x 0x%08x 0x%08x\n", | ||
406 | i, row3, row2, row1, row0); | ||
407 | rc++; | ||
408 | } else { | ||
409 | break; | ||
410 | } | ||
411 | } | ||
412 | |||
413 | /* CSTORM */ | ||
414 | last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM + | ||
415 | CSTORM_ASSERT_LIST_INDEX_OFFSET); | ||
416 | if (last_idx) | ||
417 | BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); | ||
418 | |||
419 | /* print the asserts */ | ||
420 | for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { | ||
421 | |||
422 | row0 = REG_RD(bp, BAR_CSTRORM_INTMEM + | ||
423 | CSTORM_ASSERT_LIST_OFFSET(i)); | ||
424 | row1 = REG_RD(bp, BAR_CSTRORM_INTMEM + | ||
425 | CSTORM_ASSERT_LIST_OFFSET(i) + 4); | ||
426 | row2 = REG_RD(bp, BAR_CSTRORM_INTMEM + | ||
427 | CSTORM_ASSERT_LIST_OFFSET(i) + 8); | ||
428 | row3 = REG_RD(bp, BAR_CSTRORM_INTMEM + | ||
429 | CSTORM_ASSERT_LIST_OFFSET(i) + 12); | ||
430 | |||
431 | if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { | ||
432 | BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x" | ||
433 | " 0x%08x 0x%08x 0x%08x\n", | ||
434 | i, row3, row2, row1, row0); | ||
435 | rc++; | ||
436 | } else { | ||
437 | break; | ||
387 | } | 438 | } |
388 | } | 439 | } |
440 | |||
441 | /* USTORM */ | ||
442 | last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM + | ||
443 | USTORM_ASSERT_LIST_INDEX_OFFSET); | ||
444 | if (last_idx) | ||
445 | BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); | ||
446 | |||
447 | /* print the asserts */ | ||
448 | for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { | ||
449 | |||
450 | row0 = REG_RD(bp, BAR_USTRORM_INTMEM + | ||
451 | USTORM_ASSERT_LIST_OFFSET(i)); | ||
452 | row1 = REG_RD(bp, BAR_USTRORM_INTMEM + | ||
453 | USTORM_ASSERT_LIST_OFFSET(i) + 4); | ||
454 | row2 = REG_RD(bp, BAR_USTRORM_INTMEM + | ||
455 | USTORM_ASSERT_LIST_OFFSET(i) + 8); | ||
456 | row3 = REG_RD(bp, BAR_USTRORM_INTMEM + | ||
457 | USTORM_ASSERT_LIST_OFFSET(i) + 12); | ||
458 | |||
459 | if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { | ||
460 | BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x" | ||
461 | " 0x%08x 0x%08x 0x%08x\n", | ||
462 | i, row3, row2, row1, row0); | ||
463 | rc++; | ||
464 | } else { | ||
465 | break; | ||
466 | } | ||
467 | } | ||
468 | |||
389 | return rc; | 469 | return rc; |
390 | } | 470 | } |
391 | 471 | ||
@@ -428,14 +508,16 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
428 | struct eth_tx_db_data *hw_prods = fp->hw_tx_prods; | 508 | struct eth_tx_db_data *hw_prods = fp->hw_tx_prods; |
429 | 509 | ||
430 | BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)" | 510 | BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)" |
431 | " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)" | 511 | " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", |
432 | " *rx_cons_sb(%x) rx_comp_prod(%x)" | ||
433 | " rx_comp_cons(%x) fp_c_idx(%x) fp_u_idx(%x)" | ||
434 | " bd data(%x,%x)\n", | ||
435 | i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, | 512 | i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, |
436 | fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb, | 513 | fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); |
437 | fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx, | 514 | BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)" |
438 | fp->fp_u_idx, hw_prods->packets_prod, | 515 | " *rx_cons_sb(%x)\n", |
516 | fp->rx_comp_prod, fp->rx_comp_cons, | ||
517 | le16_to_cpu(*fp->rx_cons_sb)); | ||
518 | BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)" | ||
519 | " bd data(%x,%x)\n", | ||
520 | fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod, | ||
439 | hw_prods->bds_prod); | 521 | hw_prods->bds_prod); |
440 | 522 | ||
441 | start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); | 523 | start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); |
@@ -463,7 +545,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
463 | struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; | 545 | struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; |
464 | 546 | ||
465 | BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", | 547 | BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", |
466 | j, rx_bd[0], rx_bd[1], sw_bd->skb); | 548 | j, rx_bd[1], rx_bd[0], sw_bd->skb); |
467 | } | 549 | } |
468 | 550 | ||
469 | start = RCQ_BD(fp->rx_comp_cons - 10); | 551 | start = RCQ_BD(fp->rx_comp_cons - 10); |
@@ -482,7 +564,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
482 | bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx, | 564 | bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx, |
483 | bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); | 565 | bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); |
484 | 566 | ||
485 | 567 | bnx2x_fw_dump(bp); | |
486 | bnx2x_mc_assert(bp); | 568 | bnx2x_mc_assert(bp); |
487 | BNX2X_ERR("end crash dump -----------------\n"); | 569 | BNX2X_ERR("end crash dump -----------------\n"); |
488 | 570 | ||
@@ -492,7 +574,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
492 | 574 | ||
493 | static void bnx2x_int_enable(struct bnx2x *bp) | 575 | static void bnx2x_int_enable(struct bnx2x *bp) |
494 | { | 576 | { |
495 | int port = bp->port; | 577 | int port = BP_PORT(bp); |
496 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; | 578 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; |
497 | u32 val = REG_RD(bp, addr); | 579 | u32 val = REG_RD(bp, addr); |
498 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; | 580 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; |
@@ -507,7 +589,6 @@ static void bnx2x_int_enable(struct bnx2x *bp) | |||
507 | HC_CONFIG_0_REG_INT_LINE_EN_0 | | 589 | HC_CONFIG_0_REG_INT_LINE_EN_0 | |
508 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); | 590 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); |
509 | 591 | ||
510 | /* Errata A0.158 workaround */ | ||
511 | DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n", | 592 | DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n", |
512 | val, port, addr, msix); | 593 | val, port, addr, msix); |
513 | 594 | ||
@@ -520,11 +601,25 @@ static void bnx2x_int_enable(struct bnx2x *bp) | |||
520 | val, port, addr, msix); | 601 | val, port, addr, msix); |
521 | 602 | ||
522 | REG_WR(bp, addr, val); | 603 | REG_WR(bp, addr, val); |
604 | |||
605 | if (CHIP_IS_E1H(bp)) { | ||
606 | /* init leading/trailing edge */ | ||
607 | if (IS_E1HMF(bp)) { | ||
608 | val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4))); | ||
609 | if (bp->port.pmf) | ||
610 | /* enable nig attention */ | ||
611 | val |= 0x0100; | ||
612 | } else | ||
613 | val = 0xffff; | ||
614 | |||
615 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); | ||
616 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); | ||
617 | } | ||
523 | } | 618 | } |
524 | 619 | ||
525 | static void bnx2x_int_disable(struct bnx2x *bp) | 620 | static void bnx2x_int_disable(struct bnx2x *bp) |
526 | { | 621 | { |
527 | int port = bp->port; | 622 | int port = BP_PORT(bp); |
528 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; | 623 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; |
529 | u32 val = REG_RD(bp, addr); | 624 | u32 val = REG_RD(bp, addr); |
530 | 625 | ||
@@ -543,10 +638,10 @@ static void bnx2x_int_disable(struct bnx2x *bp) | |||
543 | 638 | ||
544 | static void bnx2x_int_disable_sync(struct bnx2x *bp) | 639 | static void bnx2x_int_disable_sync(struct bnx2x *bp) |
545 | { | 640 | { |
546 | |||
547 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; | 641 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; |
548 | int i; | 642 | int i; |
549 | 643 | ||
644 | /* disable interrupt handling */ | ||
550 | atomic_inc(&bp->intr_sem); | 645 | atomic_inc(&bp->intr_sem); |
551 | /* prevent the HW from sending interrupts */ | 646 | /* prevent the HW from sending interrupts */ |
552 | bnx2x_int_disable(bp); | 647 | bnx2x_int_disable(bp); |
@@ -563,30 +658,29 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp) | |||
563 | 658 | ||
564 | /* make sure sp_task is not running */ | 659 | /* make sure sp_task is not running */ |
565 | cancel_work_sync(&bp->sp_task); | 660 | cancel_work_sync(&bp->sp_task); |
566 | |||
567 | } | 661 | } |
568 | 662 | ||
569 | /* fast path code */ | 663 | /* fast path */ |
570 | 664 | ||
571 | /* | 665 | /* |
572 | * general service functions | 666 | * General service functions |
573 | */ | 667 | */ |
574 | 668 | ||
575 | static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id, | 669 | static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, |
576 | u8 storm, u16 index, u8 op, u8 update) | 670 | u8 storm, u16 index, u8 op, u8 update) |
577 | { | 671 | { |
578 | u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8; | 672 | u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; |
579 | struct igu_ack_register igu_ack; | 673 | struct igu_ack_register igu_ack; |
580 | 674 | ||
581 | igu_ack.status_block_index = index; | 675 | igu_ack.status_block_index = index; |
582 | igu_ack.sb_id_and_flags = | 676 | igu_ack.sb_id_and_flags = |
583 | ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | | 677 | ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | |
584 | (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | | 678 | (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | |
585 | (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | | 679 | (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | |
586 | (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); | 680 | (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); |
587 | 681 | ||
588 | /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", | 682 | DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n", |
589 | (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */ | 683 | (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); |
590 | REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack)); | 684 | REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack)); |
591 | } | 685 | } |
592 | 686 | ||
@@ -614,8 +708,9 @@ static inline int bnx2x_has_work(struct bnx2x_fastpath *fp) | |||
614 | if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | 708 | if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) |
615 | rx_cons_sb++; | 709 | rx_cons_sb++; |
616 | 710 | ||
617 | if ((rx_cons_sb != fp->rx_comp_cons) || | 711 | if ((fp->rx_comp_cons != rx_cons_sb) || |
618 | (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)) | 712 | (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || |
713 | (fp->tx_pkt_prod != fp->tx_pkt_cons)) | ||
619 | return 1; | 714 | return 1; |
620 | 715 | ||
621 | return 0; | 716 | return 0; |
@@ -623,11 +718,11 @@ static inline int bnx2x_has_work(struct bnx2x_fastpath *fp) | |||
623 | 718 | ||
624 | static u16 bnx2x_ack_int(struct bnx2x *bp) | 719 | static u16 bnx2x_ack_int(struct bnx2x *bp) |
625 | { | 720 | { |
626 | u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8; | 721 | u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; |
627 | u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr); | 722 | u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr); |
628 | 723 | ||
629 | /* DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n", | 724 | DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n", |
630 | result, BAR_IGU_INTMEM + igu_addr); */ | 725 | result, BAR_IGU_INTMEM + igu_addr); |
631 | 726 | ||
632 | #ifdef IGU_DEBUG | 727 | #ifdef IGU_DEBUG |
633 | #warning IGU_DEBUG active | 728 | #warning IGU_DEBUG active |
@@ -653,7 +748,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
653 | struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx]; | 748 | struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx]; |
654 | struct eth_tx_bd *tx_bd; | 749 | struct eth_tx_bd *tx_bd; |
655 | struct sk_buff *skb = tx_buf->skb; | 750 | struct sk_buff *skb = tx_buf->skb; |
656 | u16 bd_idx = tx_buf->first_bd; | 751 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; |
657 | int nbd; | 752 | int nbd; |
658 | 753 | ||
659 | DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", | 754 | DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", |
@@ -666,9 +761,10 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
666 | BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE); | 761 | BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE); |
667 | 762 | ||
668 | nbd = le16_to_cpu(tx_bd->nbd) - 1; | 763 | nbd = le16_to_cpu(tx_bd->nbd) - 1; |
764 | new_cons = nbd + tx_buf->first_bd; | ||
669 | #ifdef BNX2X_STOP_ON_ERROR | 765 | #ifdef BNX2X_STOP_ON_ERROR |
670 | if (nbd > (MAX_SKB_FRAGS + 2)) { | 766 | if (nbd > (MAX_SKB_FRAGS + 2)) { |
671 | BNX2X_ERR("bad nbd!\n"); | 767 | BNX2X_ERR("BAD nbd!\n"); |
672 | bnx2x_panic(); | 768 | bnx2x_panic(); |
673 | } | 769 | } |
674 | #endif | 770 | #endif |
@@ -708,32 +804,30 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
708 | tx_buf->first_bd = 0; | 804 | tx_buf->first_bd = 0; |
709 | tx_buf->skb = NULL; | 805 | tx_buf->skb = NULL; |
710 | 806 | ||
711 | return bd_idx; | 807 | return new_cons; |
712 | } | 808 | } |
713 | 809 | ||
714 | static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp) | 810 | static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) |
715 | { | 811 | { |
716 | u16 used; | 812 | s16 used; |
717 | u32 prod; | 813 | u16 prod; |
718 | u32 cons; | 814 | u16 cons; |
719 | 815 | ||
720 | /* Tell compiler that prod and cons can change */ | 816 | barrier(); /* Tell compiler that prod and cons can change */ |
721 | barrier(); | ||
722 | prod = fp->tx_bd_prod; | 817 | prod = fp->tx_bd_prod; |
723 | cons = fp->tx_bd_cons; | 818 | cons = fp->tx_bd_cons; |
724 | 819 | ||
725 | used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons + | 820 | /* NUM_TX_RINGS = number of "next-page" entries |
726 | (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT)); | 821 | It will be used as a threshold */ |
727 | 822 | used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; | |
728 | if (prod >= cons) { | ||
729 | /* used = prod - cons - prod/size + cons/size */ | ||
730 | used -= NUM_TX_BD - NUM_TX_RINGS; | ||
731 | } | ||
732 | 823 | ||
824 | #ifdef BNX2X_STOP_ON_ERROR | ||
825 | BUG_TRAP(used >= 0); | ||
733 | BUG_TRAP(used <= fp->bp->tx_ring_size); | 826 | BUG_TRAP(used <= fp->bp->tx_ring_size); |
734 | BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL); | 827 | BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL); |
828 | #endif | ||
735 | 829 | ||
736 | return (fp->bp->tx_ring_size - used); | 830 | return (s16)(fp->bp->tx_ring_size) - used; |
737 | } | 831 | } |
738 | 832 | ||
739 | static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) | 833 | static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) |
@@ -757,10 +851,10 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) | |||
757 | 851 | ||
758 | /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */ | 852 | /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */ |
759 | 853 | ||
760 | DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %d\n", | 854 | DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n", |
761 | hw_cons, sw_cons, pkt_cons); | 855 | hw_cons, sw_cons, pkt_cons); |
762 | 856 | ||
763 | /* if (NEXT_TX_IDX(sw_cons) != hw_cons) { | 857 | /* if (NEXT_TX_IDX(sw_cons) != hw_cons) { |
764 | rmb(); | 858 | rmb(); |
765 | prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb); | 859 | prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb); |
766 | } | 860 | } |
@@ -793,7 +887,6 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) | |||
793 | netif_wake_queue(bp->dev); | 887 | netif_wake_queue(bp->dev); |
794 | 888 | ||
795 | netif_tx_unlock(bp->dev); | 889 | netif_tx_unlock(bp->dev); |
796 | |||
797 | } | 890 | } |
798 | } | 891 | } |
799 | 892 | ||
@@ -804,13 +897,14 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp, | |||
804 | int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); | 897 | int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); |
805 | int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); | 898 | int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); |
806 | 899 | ||
807 | DP(NETIF_MSG_RX_STATUS, | 900 | DP(BNX2X_MSG_SP, |
808 | "fp %d cid %d got ramrod #%d state is %x type is %d\n", | 901 | "fp %d cid %d got ramrod #%d state is %x type is %d\n", |
809 | fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type); | 902 | FP_IDX(fp), cid, command, bp->state, |
903 | rr_cqe->ramrod_cqe.ramrod_type); | ||
810 | 904 | ||
811 | bp->spq_left++; | 905 | bp->spq_left++; |
812 | 906 | ||
813 | if (fp->index) { | 907 | if (FP_IDX(fp)) { |
814 | switch (command | fp->state) { | 908 | switch (command | fp->state) { |
815 | case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | | 909 | case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | |
816 | BNX2X_FP_STATE_OPENING): | 910 | BNX2X_FP_STATE_OPENING): |
@@ -826,10 +920,11 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp, | |||
826 | break; | 920 | break; |
827 | 921 | ||
828 | default: | 922 | default: |
829 | BNX2X_ERR("unexpected MC reply(%d) state is %x\n", | 923 | BNX2X_ERR("unexpected MC reply (%d) " |
830 | command, fp->state); | 924 | "fp->state is %x\n", command, fp->state); |
925 | break; | ||
831 | } | 926 | } |
832 | mb(); /* force bnx2x_wait_ramrod to see the change */ | 927 | mb(); /* force bnx2x_wait_ramrod() to see the change */ |
833 | return; | 928 | return; |
834 | } | 929 | } |
835 | 930 | ||
@@ -846,25 +941,25 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp, | |||
846 | break; | 941 | break; |
847 | 942 | ||
848 | case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT): | 943 | case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT): |
849 | DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", | 944 | DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid); |
850 | cid); | ||
851 | bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; | 945 | bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; |
852 | break; | 946 | break; |
853 | 947 | ||
854 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): | 948 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): |
949 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG): | ||
855 | DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); | 950 | DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); |
856 | break; | 951 | break; |
857 | 952 | ||
858 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): | 953 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): |
859 | DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n"); | 954 | DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); |
860 | break; | 955 | break; |
861 | 956 | ||
862 | default: | 957 | default: |
863 | BNX2X_ERR("unexpected ramrod (%d) state is %x\n", | 958 | BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n", |
864 | command, bp->state); | 959 | command, bp->state); |
960 | break; | ||
865 | } | 961 | } |
866 | 962 | mb(); /* force bnx2x_wait_ramrod() to see the change */ | |
867 | mb(); /* force bnx2x_wait_ramrod to see the change */ | ||
868 | } | 963 | } |
869 | 964 | ||
870 | static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, | 965 | static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, |
@@ -882,7 +977,6 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, | |||
882 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, | 977 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, |
883 | PCI_DMA_FROMDEVICE); | 978 | PCI_DMA_FROMDEVICE); |
884 | if (unlikely(dma_mapping_error(mapping))) { | 979 | if (unlikely(dma_mapping_error(mapping))) { |
885 | |||
886 | dev_kfree_skb(skb); | 980 | dev_kfree_skb(skb); |
887 | return -ENOMEM; | 981 | return -ENOMEM; |
888 | } | 982 | } |
@@ -924,7 +1018,7 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, | |||
924 | static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | 1018 | static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) |
925 | { | 1019 | { |
926 | struct bnx2x *bp = fp->bp; | 1020 | struct bnx2x *bp = fp->bp; |
927 | u16 bd_cons, bd_prod, comp_ring_cons; | 1021 | u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; |
928 | u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; | 1022 | u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; |
929 | int rx_pkt = 0; | 1023 | int rx_pkt = 0; |
930 | 1024 | ||
@@ -933,12 +1027,15 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
933 | return 0; | 1027 | return 0; |
934 | #endif | 1028 | #endif |
935 | 1029 | ||
1030 | /* CQ "next element" is of the size of the regular element, | ||
1031 | that's why it's ok here */ | ||
936 | hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); | 1032 | hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); |
937 | if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | 1033 | if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) |
938 | hw_comp_cons++; | 1034 | hw_comp_cons++; |
939 | 1035 | ||
940 | bd_cons = fp->rx_bd_cons; | 1036 | bd_cons = fp->rx_bd_cons; |
941 | bd_prod = fp->rx_bd_prod; | 1037 | bd_prod = fp->rx_bd_prod; |
1038 | bd_prod_fw = bd_prod; | ||
942 | sw_comp_cons = fp->rx_comp_cons; | 1039 | sw_comp_cons = fp->rx_comp_cons; |
943 | sw_comp_prod = fp->rx_comp_prod; | 1040 | sw_comp_prod = fp->rx_comp_prod; |
944 | 1041 | ||
@@ -949,34 +1046,31 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
949 | 1046 | ||
950 | DP(NETIF_MSG_RX_STATUS, | 1047 | DP(NETIF_MSG_RX_STATUS, |
951 | "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", | 1048 | "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", |
952 | fp->index, hw_comp_cons, sw_comp_cons); | 1049 | FP_IDX(fp), hw_comp_cons, sw_comp_cons); |
953 | 1050 | ||
954 | while (sw_comp_cons != hw_comp_cons) { | 1051 | while (sw_comp_cons != hw_comp_cons) { |
955 | unsigned int len, pad; | 1052 | struct sw_rx_bd *rx_buf = NULL; |
956 | struct sw_rx_bd *rx_buf; | ||
957 | struct sk_buff *skb; | 1053 | struct sk_buff *skb; |
958 | union eth_rx_cqe *cqe; | 1054 | union eth_rx_cqe *cqe; |
1055 | u8 cqe_fp_flags; | ||
1056 | u16 len, pad; | ||
959 | 1057 | ||
960 | comp_ring_cons = RCQ_BD(sw_comp_cons); | 1058 | comp_ring_cons = RCQ_BD(sw_comp_cons); |
961 | bd_prod = RX_BD(bd_prod); | 1059 | bd_prod = RX_BD(bd_prod); |
962 | bd_cons = RX_BD(bd_cons); | 1060 | bd_cons = RX_BD(bd_cons); |
963 | 1061 | ||
964 | cqe = &fp->rx_comp_ring[comp_ring_cons]; | 1062 | cqe = &fp->rx_comp_ring[comp_ring_cons]; |
1063 | cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; | ||
965 | 1064 | ||
966 | DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u sw_comp_cons %u" | ||
967 | " comp_ring (%u) bd_ring (%u,%u)\n", | ||
968 | hw_comp_cons, sw_comp_cons, | ||
969 | comp_ring_cons, bd_prod, bd_cons); | ||
970 | DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" | 1065 | DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" |
971 | " queue %x vlan %x len %x\n", | 1066 | " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), |
972 | cqe->fast_path_cqe.type, | 1067 | cqe_fp_flags, cqe->fast_path_cqe.status_flags, |
973 | cqe->fast_path_cqe.error_type_flags, | ||
974 | cqe->fast_path_cqe.status_flags, | ||
975 | cqe->fast_path_cqe.rss_hash_result, | 1068 | cqe->fast_path_cqe.rss_hash_result, |
976 | cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len); | 1069 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag), |
1070 | le16_to_cpu(cqe->fast_path_cqe.pkt_len)); | ||
977 | 1071 | ||
978 | /* is this a slowpath msg? */ | 1072 | /* is this a slowpath msg? */ |
979 | if (unlikely(cqe->fast_path_cqe.type)) { | 1073 | if (unlikely(CQE_TYPE(cqe_fp_flags))) { |
980 | bnx2x_sp_event(fp, cqe); | 1074 | bnx2x_sp_event(fp, cqe); |
981 | goto next_cqe; | 1075 | goto next_cqe; |
982 | 1076 | ||
@@ -984,7 +1078,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
984 | } else { | 1078 | } else { |
985 | rx_buf = &fp->rx_buf_ring[bd_cons]; | 1079 | rx_buf = &fp->rx_buf_ring[bd_cons]; |
986 | skb = rx_buf->skb; | 1080 | skb = rx_buf->skb; |
987 | |||
988 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); | 1081 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); |
989 | pad = cqe->fast_path_cqe.placement_offset; | 1082 | pad = cqe->fast_path_cqe.placement_offset; |
990 | 1083 | ||
@@ -996,13 +1089,11 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
996 | prefetch(((char *)(skb)) + 128); | 1089 | prefetch(((char *)(skb)) + 128); |
997 | 1090 | ||
998 | /* is this an error packet? */ | 1091 | /* is this an error packet? */ |
999 | if (unlikely(cqe->fast_path_cqe.error_type_flags & | 1092 | if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { |
1000 | ETH_RX_ERROR_FALGS)) { | ||
1001 | /* do we sometimes forward error packets anyway? */ | 1093 | /* do we sometimes forward error packets anyway? */ |
1002 | DP(NETIF_MSG_RX_ERR, | 1094 | DP(NETIF_MSG_RX_ERR, |
1003 | "ERROR flags(%u) Rx packet(%u)\n", | 1095 | "ERROR flags %x rx packet %u\n", |
1004 | cqe->fast_path_cqe.error_type_flags, | 1096 | cqe_fp_flags, sw_comp_cons); |
1005 | sw_comp_cons); | ||
1006 | /* TBD make sure MC counts this as a drop */ | 1097 | /* TBD make sure MC counts this as a drop */ |
1007 | goto reuse_rx; | 1098 | goto reuse_rx; |
1008 | } | 1099 | } |
@@ -1018,7 +1109,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
1018 | len + pad); | 1109 | len + pad); |
1019 | if (new_skb == NULL) { | 1110 | if (new_skb == NULL) { |
1020 | DP(NETIF_MSG_RX_ERR, | 1111 | DP(NETIF_MSG_RX_ERR, |
1021 | "ERROR packet dropped " | 1112 | "ERROR packet dropped " |
1022 | "because of alloc failure\n"); | 1113 | "because of alloc failure\n"); |
1023 | /* TBD count this as a drop? */ | 1114 | /* TBD count this as a drop? */ |
1024 | goto reuse_rx; | 1115 | goto reuse_rx; |
@@ -1044,7 +1135,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
1044 | 1135 | ||
1045 | } else { | 1136 | } else { |
1046 | DP(NETIF_MSG_RX_ERR, | 1137 | DP(NETIF_MSG_RX_ERR, |
1047 | "ERROR packet dropped because " | 1138 | "ERROR packet dropped because " |
1048 | "of alloc failure\n"); | 1139 | "of alloc failure\n"); |
1049 | reuse_rx: | 1140 | reuse_rx: |
1050 | bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); | 1141 | bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); |
@@ -1061,14 +1152,14 @@ reuse_rx: | |||
1061 | } | 1152 | } |
1062 | 1153 | ||
1063 | #ifdef BCM_VLAN | 1154 | #ifdef BCM_VLAN |
1064 | if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) | 1155 | if ((bp->vlgrp != NULL) && |
1065 | & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS) | 1156 | (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & |
1066 | && (bp->vlgrp != NULL)) | 1157 | PARSING_FLAGS_VLAN)) |
1067 | vlan_hwaccel_receive_skb(skb, bp->vlgrp, | 1158 | vlan_hwaccel_receive_skb(skb, bp->vlgrp, |
1068 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag)); | 1159 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag)); |
1069 | else | 1160 | else |
1070 | #endif | 1161 | #endif |
1071 | netif_receive_skb(skb); | 1162 | netif_receive_skb(skb); |
1072 | 1163 | ||
1073 | bp->dev->last_rx = jiffies; | 1164 | bp->dev->last_rx = jiffies; |
1074 | 1165 | ||
@@ -1077,22 +1168,25 @@ next_rx: | |||
1077 | 1168 | ||
1078 | bd_cons = NEXT_RX_IDX(bd_cons); | 1169 | bd_cons = NEXT_RX_IDX(bd_cons); |
1079 | bd_prod = NEXT_RX_IDX(bd_prod); | 1170 | bd_prod = NEXT_RX_IDX(bd_prod); |
1171 | bd_prod_fw = NEXT_RX_IDX(bd_prod_fw); | ||
1172 | rx_pkt++; | ||
1080 | next_cqe: | 1173 | next_cqe: |
1081 | sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); | 1174 | sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); |
1082 | sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); | 1175 | sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); |
1083 | rx_pkt++; | ||
1084 | 1176 | ||
1085 | if ((rx_pkt == budget)) | 1177 | if (rx_pkt == budget) |
1086 | break; | 1178 | break; |
1087 | } /* while */ | 1179 | } /* while */ |
1088 | 1180 | ||
1089 | fp->rx_bd_cons = bd_cons; | 1181 | fp->rx_bd_cons = bd_cons; |
1090 | fp->rx_bd_prod = bd_prod; | 1182 | fp->rx_bd_prod = bd_prod_fw; |
1091 | fp->rx_comp_cons = sw_comp_cons; | 1183 | fp->rx_comp_cons = sw_comp_cons; |
1092 | fp->rx_comp_prod = sw_comp_prod; | 1184 | fp->rx_comp_prod = sw_comp_prod; |
1093 | 1185 | ||
1094 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 1186 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
1095 | TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod); | 1187 | TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)), |
1188 | sw_comp_prod); | ||
1189 | |||
1096 | 1190 | ||
1097 | mmiowb(); /* keep prod updates ordered */ | 1191 | mmiowb(); /* keep prod updates ordered */ |
1098 | 1192 | ||
@@ -1107,10 +1201,11 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) | |||
1107 | struct bnx2x_fastpath *fp = fp_cookie; | 1201 | struct bnx2x_fastpath *fp = fp_cookie; |
1108 | struct bnx2x *bp = fp->bp; | 1202 | struct bnx2x *bp = fp->bp; |
1109 | struct net_device *dev = bp->dev; | 1203 | struct net_device *dev = bp->dev; |
1110 | int index = fp->index; | 1204 | int index = FP_IDX(fp); |
1111 | 1205 | ||
1112 | DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index); | 1206 | DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", |
1113 | bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0); | 1207 | index, FP_SB_ID(fp)); |
1208 | bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0); | ||
1114 | 1209 | ||
1115 | #ifdef BNX2X_STOP_ON_ERROR | 1210 | #ifdef BNX2X_STOP_ON_ERROR |
1116 | if (unlikely(bp->panic)) | 1211 | if (unlikely(bp->panic)) |
@@ -1123,6 +1218,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) | |||
1123 | prefetch(&fp->status_blk->u_status_block.status_block_index); | 1218 | prefetch(&fp->status_blk->u_status_block.status_block_index); |
1124 | 1219 | ||
1125 | netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi)); | 1220 | netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi)); |
1221 | |||
1126 | return IRQ_HANDLED; | 1222 | return IRQ_HANDLED; |
1127 | } | 1223 | } |
1128 | 1224 | ||
@@ -1131,26 +1227,28 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1131 | struct net_device *dev = dev_instance; | 1227 | struct net_device *dev = dev_instance; |
1132 | struct bnx2x *bp = netdev_priv(dev); | 1228 | struct bnx2x *bp = netdev_priv(dev); |
1133 | u16 status = bnx2x_ack_int(bp); | 1229 | u16 status = bnx2x_ack_int(bp); |
1230 | u16 mask; | ||
1134 | 1231 | ||
1232 | /* Return here if interrupt is shared and it's not for us */ | ||
1135 | if (unlikely(status == 0)) { | 1233 | if (unlikely(status == 0)) { |
1136 | DP(NETIF_MSG_INTR, "not our interrupt!\n"); | 1234 | DP(NETIF_MSG_INTR, "not our interrupt!\n"); |
1137 | return IRQ_NONE; | 1235 | return IRQ_NONE; |
1138 | } | 1236 | } |
1139 | 1237 | DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status); | |
1140 | DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status); | ||
1141 | 1238 | ||
1142 | #ifdef BNX2X_STOP_ON_ERROR | 1239 | #ifdef BNX2X_STOP_ON_ERROR |
1143 | if (unlikely(bp->panic)) | 1240 | if (unlikely(bp->panic)) |
1144 | return IRQ_HANDLED; | 1241 | return IRQ_HANDLED; |
1145 | #endif | 1242 | #endif |
1146 | 1243 | ||
1147 | /* Return here if interrupt is shared and is disabled */ | 1244 | /* Return here if interrupt is disabled */ |
1148 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | 1245 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { |
1149 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); | 1246 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); |
1150 | return IRQ_HANDLED; | 1247 | return IRQ_HANDLED; |
1151 | } | 1248 | } |
1152 | 1249 | ||
1153 | if (status & 0x2) { | 1250 | mask = 0x2 << bp->fp[0].sb_id; |
1251 | if (status & mask) { | ||
1154 | struct bnx2x_fastpath *fp = &bp->fp[0]; | 1252 | struct bnx2x_fastpath *fp = &bp->fp[0]; |
1155 | 1253 | ||
1156 | prefetch(fp->rx_cons_sb); | 1254 | prefetch(fp->rx_cons_sb); |
@@ -1160,13 +1258,11 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1160 | 1258 | ||
1161 | netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi)); | 1259 | netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi)); |
1162 | 1260 | ||
1163 | status &= ~0x2; | 1261 | status &= ~mask; |
1164 | if (!status) | ||
1165 | return IRQ_HANDLED; | ||
1166 | } | 1262 | } |
1167 | 1263 | ||
1168 | if (unlikely(status & 0x1)) { | ||
1169 | 1264 | ||
1265 | if (unlikely(status & 0x1)) { | ||
1170 | schedule_work(&bp->sp_task); | 1266 | schedule_work(&bp->sp_task); |
1171 | 1267 | ||
1172 | status &= ~0x1; | 1268 | status &= ~0x1; |
@@ -1174,8 +1270,9 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1174 | return IRQ_HANDLED; | 1270 | return IRQ_HANDLED; |
1175 | } | 1271 | } |
1176 | 1272 | ||
1177 | DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n", | 1273 | if (status) |
1178 | status); | 1274 | DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n", |
1275 | status); | ||
1179 | 1276 | ||
1180 | return IRQ_HANDLED; | 1277 | return IRQ_HANDLED; |
1181 | } | 1278 | } |
@@ -1193,7 +1290,7 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource) | |||
1193 | { | 1290 | { |
1194 | u32 lock_status; | 1291 | u32 lock_status; |
1195 | u32 resource_bit = (1 << resource); | 1292 | u32 resource_bit = (1 << resource); |
1196 | u8 port = bp->port; | 1293 | u8 port = BP_PORT(bp); |
1197 | int cnt; | 1294 | int cnt; |
1198 | 1295 | ||
1199 | /* Validating that the resource is within range */ | 1296 | /* Validating that the resource is within range */ |
@@ -1231,7 +1328,7 @@ static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource) | |||
1231 | { | 1328 | { |
1232 | u32 lock_status; | 1329 | u32 lock_status; |
1233 | u32 resource_bit = (1 << resource); | 1330 | u32 resource_bit = (1 << resource); |
1234 | u8 port = bp->port; | 1331 | u8 port = BP_PORT(bp); |
1235 | 1332 | ||
1236 | /* Validating that the resource is within range */ | 1333 | /* Validating that the resource is within range */ |
1237 | if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { | 1334 | if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { |
@@ -1258,7 +1355,7 @@ static void bnx2x_phy_hw_lock(struct bnx2x *bp) | |||
1258 | { | 1355 | { |
1259 | u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); | 1356 | u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); |
1260 | 1357 | ||
1261 | mutex_lock(&bp->phy_mutex); | 1358 | mutex_lock(&bp->port.phy_mutex); |
1262 | 1359 | ||
1263 | if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || | 1360 | if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || |
1264 | (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) | 1361 | (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) |
@@ -1273,14 +1370,14 @@ static void bnx2x_phy_hw_unlock(struct bnx2x *bp) | |||
1273 | (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) | 1370 | (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) |
1274 | bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO); | 1371 | bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO); |
1275 | 1372 | ||
1276 | mutex_unlock(&bp->phy_mutex); | 1373 | mutex_unlock(&bp->port.phy_mutex); |
1277 | } | 1374 | } |
1278 | 1375 | ||
1279 | int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode) | 1376 | int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode) |
1280 | { | 1377 | { |
1281 | /* The GPIO should be swapped if swap register is set and active */ | 1378 | /* The GPIO should be swapped if swap register is set and active */ |
1282 | int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && | 1379 | int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && |
1283 | REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port; | 1380 | REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp); |
1284 | int gpio_shift = gpio_num + | 1381 | int gpio_shift = gpio_num + |
1285 | (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); | 1382 | (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); |
1286 | u32 gpio_mask = (1 << gpio_shift); | 1383 | u32 gpio_mask = (1 << gpio_shift); |
@@ -1379,18 +1476,18 @@ static void bnx2x_calc_fc_adv(struct bnx2x *bp) | |||
1379 | { | 1476 | { |
1380 | switch (bp->link_vars.ieee_fc) { | 1477 | switch (bp->link_vars.ieee_fc) { |
1381 | case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: | 1478 | case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: |
1382 | bp->advertising &= ~(ADVERTISED_Asym_Pause | | 1479 | bp->port.advertising &= ~(ADVERTISED_Asym_Pause | |
1383 | ADVERTISED_Pause); | 1480 | ADVERTISED_Pause); |
1384 | break; | 1481 | break; |
1385 | case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: | 1482 | case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: |
1386 | bp->advertising |= (ADVERTISED_Asym_Pause | | 1483 | bp->port.advertising |= (ADVERTISED_Asym_Pause | |
1387 | ADVERTISED_Pause); | 1484 | ADVERTISED_Pause); |
1388 | break; | 1485 | break; |
1389 | case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: | 1486 | case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: |
1390 | bp->advertising |= ADVERTISED_Asym_Pause; | 1487 | bp->port.advertising |= ADVERTISED_Asym_Pause; |
1391 | break; | 1488 | break; |
1392 | default: | 1489 | default: |
1393 | bp->advertising &= ~(ADVERTISED_Asym_Pause | | 1490 | bp->port.advertising &= ~(ADVERTISED_Asym_Pause | |
1394 | ADVERTISED_Pause); | 1491 | ADVERTISED_Pause); |
1395 | break; | 1492 | break; |
1396 | } | 1493 | } |
@@ -1443,6 +1540,7 @@ static u8 bnx2x_initial_phy_init(struct bnx2x *bp) | |||
1443 | bnx2x_link_report(bp); | 1540 | bnx2x_link_report(bp); |
1444 | 1541 | ||
1445 | bnx2x_calc_fc_adv(bp); | 1542 | bnx2x_calc_fc_adv(bp); |
1543 | |||
1446 | return rc; | 1544 | return rc; |
1447 | } | 1545 | } |
1448 | 1546 | ||
@@ -1473,15 +1571,261 @@ static u8 bnx2x_link_test(struct bnx2x *bp) | |||
1473 | return rc; | 1571 | return rc; |
1474 | } | 1572 | } |
1475 | 1573 | ||
1574 | /* Calculates the sum of vn_min_rates. | ||
1575 | It's needed for further normalizing of the min_rates. | ||
1576 | |||
1577 | Returns: | ||
1578 | sum of vn_min_rates | ||
1579 | or | ||
1580 | 0 - if all the min_rates are 0. | ||
1581 | In the later case fainess algorithm should be deactivated. | ||
1582 | If not all min_rates are zero then those that are zeroes will | ||
1583 | be set to 1. | ||
1584 | */ | ||
1585 | static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp) | ||
1586 | { | ||
1587 | int i, port = BP_PORT(bp); | ||
1588 | u32 wsum = 0; | ||
1589 | int all_zero = 1; | ||
1590 | |||
1591 | for (i = 0; i < E1HVN_MAX; i++) { | ||
1592 | u32 vn_cfg = | ||
1593 | SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config); | ||
1594 | u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> | ||
1595 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; | ||
1596 | if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) { | ||
1597 | /* If min rate is zero - set it to 1 */ | ||
1598 | if (!vn_min_rate) | ||
1599 | vn_min_rate = DEF_MIN_RATE; | ||
1600 | else | ||
1601 | all_zero = 0; | ||
1602 | |||
1603 | wsum += vn_min_rate; | ||
1604 | } | ||
1605 | } | ||
1606 | |||
1607 | /* ... only if all min rates are zeros - disable FAIRNESS */ | ||
1608 | if (all_zero) | ||
1609 | return 0; | ||
1610 | |||
1611 | return wsum; | ||
1612 | } | ||
1613 | |||
1614 | static void bnx2x_init_port_minmax(struct bnx2x *bp, | ||
1615 | int en_fness, | ||
1616 | u16 port_rate, | ||
1617 | struct cmng_struct_per_port *m_cmng_port) | ||
1618 | { | ||
1619 | u32 r_param = port_rate / 8; | ||
1620 | int port = BP_PORT(bp); | ||
1621 | int i; | ||
1622 | |||
1623 | memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port)); | ||
1624 | |||
1625 | /* Enable minmax only if we are in e1hmf mode */ | ||
1626 | if (IS_E1HMF(bp)) { | ||
1627 | u32 fair_periodic_timeout_usec; | ||
1628 | u32 t_fair; | ||
1629 | |||
1630 | /* Enable rate shaping and fairness */ | ||
1631 | m_cmng_port->flags.cmng_vn_enable = 1; | ||
1632 | m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0; | ||
1633 | m_cmng_port->flags.rate_shaping_enable = 1; | ||
1634 | |||
1635 | if (!en_fness) | ||
1636 | DP(NETIF_MSG_IFUP, "All MIN values are zeroes" | ||
1637 | " fairness will be disabled\n"); | ||
1638 | |||
1639 | /* 100 usec in SDM ticks = 25 since each tick is 4 usec */ | ||
1640 | m_cmng_port->rs_vars.rs_periodic_timeout = | ||
1641 | RS_PERIODIC_TIMEOUT_USEC / 4; | ||
1642 | |||
1643 | /* this is the threshold below which no timer arming will occur | ||
1644 | 1.25 coefficient is for the threshold to be a little bigger | ||
1645 | than the real time, to compensate for timer in-accuracy */ | ||
1646 | m_cmng_port->rs_vars.rs_threshold = | ||
1647 | (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4; | ||
1648 | |||
1649 | /* resolution of fairness timer */ | ||
1650 | fair_periodic_timeout_usec = QM_ARB_BYTES / r_param; | ||
1651 | /* for 10G it is 1000usec. for 1G it is 10000usec. */ | ||
1652 | t_fair = T_FAIR_COEF / port_rate; | ||
1653 | |||
1654 | /* this is the threshold below which we won't arm | ||
1655 | the timer anymore */ | ||
1656 | m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES; | ||
1657 | |||
1658 | /* we multiply by 1e3/8 to get bytes/msec. | ||
1659 | We don't want the credits to pass a credit | ||
1660 | of the T_FAIR*FAIR_MEM (algorithm resolution) */ | ||
1661 | m_cmng_port->fair_vars.upper_bound = | ||
1662 | r_param * t_fair * FAIR_MEM; | ||
1663 | /* since each tick is 4 usec */ | ||
1664 | m_cmng_port->fair_vars.fairness_timeout = | ||
1665 | fair_periodic_timeout_usec / 4; | ||
1666 | |||
1667 | } else { | ||
1668 | /* Disable rate shaping and fairness */ | ||
1669 | m_cmng_port->flags.cmng_vn_enable = 0; | ||
1670 | m_cmng_port->flags.fairness_enable = 0; | ||
1671 | m_cmng_port->flags.rate_shaping_enable = 0; | ||
1672 | |||
1673 | DP(NETIF_MSG_IFUP, | ||
1674 | "Single function mode minmax will be disabled\n"); | ||
1675 | } | ||
1676 | |||
1677 | /* Store it to internal memory */ | ||
1678 | for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++) | ||
1679 | REG_WR(bp, BAR_XSTRORM_INTMEM + | ||
1680 | XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4, | ||
1681 | ((u32 *)(m_cmng_port))[i]); | ||
1682 | } | ||
1683 | |||
1684 | static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func, | ||
1685 | u32 wsum, u16 port_rate, | ||
1686 | struct cmng_struct_per_port *m_cmng_port) | ||
1687 | { | ||
1688 | struct rate_shaping_vars_per_vn m_rs_vn; | ||
1689 | struct fairness_vars_per_vn m_fair_vn; | ||
1690 | u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); | ||
1691 | u16 vn_min_rate, vn_max_rate; | ||
1692 | int i; | ||
1693 | |||
1694 | /* If function is hidden - set min and max to zeroes */ | ||
1695 | if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { | ||
1696 | vn_min_rate = 0; | ||
1697 | vn_max_rate = 0; | ||
1698 | |||
1699 | } else { | ||
1700 | vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> | ||
1701 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; | ||
1702 | /* If FAIRNESS is enabled (not all min rates are zeroes) and | ||
1703 | if current min rate is zero - set it to 1. | ||
1704 | This is a requirment of the algorithm. */ | ||
1705 | if ((vn_min_rate == 0) && wsum) | ||
1706 | vn_min_rate = DEF_MIN_RATE; | ||
1707 | vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> | ||
1708 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; | ||
1709 | } | ||
1710 | |||
1711 | DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d " | ||
1712 | "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum); | ||
1713 | |||
1714 | memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn)); | ||
1715 | memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn)); | ||
1716 | |||
1717 | /* global vn counter - maximal Mbps for this vn */ | ||
1718 | m_rs_vn.vn_counter.rate = vn_max_rate; | ||
1719 | |||
1720 | /* quota - number of bytes transmitted in this period */ | ||
1721 | m_rs_vn.vn_counter.quota = | ||
1722 | (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8; | ||
1723 | |||
1724 | #ifdef BNX2X_PER_PROT_QOS | ||
1725 | /* per protocol counter */ | ||
1726 | for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) { | ||
1727 | /* maximal Mbps for this protocol */ | ||
1728 | m_rs_vn.protocol_counters[protocol].rate = | ||
1729 | protocol_max_rate[protocol]; | ||
1730 | /* the quota in each timer period - | ||
1731 | number of bytes transmitted in this period */ | ||
1732 | m_rs_vn.protocol_counters[protocol].quota = | ||
1733 | (u32)(rs_periodic_timeout_usec * | ||
1734 | ((double)m_rs_vn. | ||
1735 | protocol_counters[protocol].rate/8)); | ||
1736 | } | ||
1737 | #endif | ||
1738 | |||
1739 | if (wsum) { | ||
1740 | /* credit for each period of the fairness algorithm: | ||
1741 | number of bytes in T_FAIR (the vn share the port rate). | ||
1742 | wsum should not be larger than 10000, thus | ||
1743 | T_FAIR_COEF / (8 * wsum) will always be grater than zero */ | ||
1744 | m_fair_vn.vn_credit_delta = | ||
1745 | max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))), | ||
1746 | (u64)(m_cmng_port->fair_vars.fair_threshold * 2)); | ||
1747 | DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n", | ||
1748 | m_fair_vn.vn_credit_delta); | ||
1749 | } | ||
1750 | |||
1751 | #ifdef BNX2X_PER_PROT_QOS | ||
1752 | do { | ||
1753 | u32 protocolWeightSum = 0; | ||
1754 | |||
1755 | for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) | ||
1756 | protocolWeightSum += | ||
1757 | drvInit.protocol_min_rate[protocol]; | ||
1758 | /* per protocol counter - | ||
1759 | NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */ | ||
1760 | if (protocolWeightSum > 0) { | ||
1761 | for (protocol = 0; | ||
1762 | protocol < NUM_OF_PROTOCOLS; protocol++) | ||
1763 | /* credit for each period of the | ||
1764 | fairness algorithm - number of bytes in | ||
1765 | T_FAIR (the protocol share the vn rate) */ | ||
1766 | m_fair_vn.protocol_credit_delta[protocol] = | ||
1767 | (u32)((vn_min_rate / 8) * t_fair * | ||
1768 | protocol_min_rate / protocolWeightSum); | ||
1769 | } | ||
1770 | } while (0); | ||
1771 | #endif | ||
1772 | |||
1773 | /* Store it to internal memory */ | ||
1774 | for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++) | ||
1775 | REG_WR(bp, BAR_XSTRORM_INTMEM + | ||
1776 | XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4, | ||
1777 | ((u32 *)(&m_rs_vn))[i]); | ||
1778 | |||
1779 | for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++) | ||
1780 | REG_WR(bp, BAR_XSTRORM_INTMEM + | ||
1781 | XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4, | ||
1782 | ((u32 *)(&m_fair_vn))[i]); | ||
1783 | } | ||
1784 | |||
1476 | /* This function is called upon link interrupt */ | 1785 | /* This function is called upon link interrupt */ |
1477 | static void bnx2x_link_attn(struct bnx2x *bp) | 1786 | static void bnx2x_link_attn(struct bnx2x *bp) |
1478 | { | 1787 | { |
1788 | int vn; | ||
1789 | |||
1479 | bnx2x_phy_hw_lock(bp); | 1790 | bnx2x_phy_hw_lock(bp); |
1480 | bnx2x_link_update(&bp->link_params, &bp->link_vars); | 1791 | bnx2x_link_update(&bp->link_params, &bp->link_vars); |
1481 | bnx2x_phy_hw_unlock(bp); | 1792 | bnx2x_phy_hw_unlock(bp); |
1482 | 1793 | ||
1483 | /* indicate link status */ | 1794 | /* indicate link status */ |
1484 | bnx2x_link_report(bp); | 1795 | bnx2x_link_report(bp); |
1796 | |||
1797 | if (IS_E1HMF(bp)) { | ||
1798 | int func; | ||
1799 | |||
1800 | for (vn = VN_0; vn < E1HVN_MAX; vn++) { | ||
1801 | if (vn == BP_E1HVN(bp)) | ||
1802 | continue; | ||
1803 | |||
1804 | func = ((vn << 1) | BP_PORT(bp)); | ||
1805 | |||
1806 | /* Set the attention towards other drivers | ||
1807 | on the same port */ | ||
1808 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + | ||
1809 | (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); | ||
1810 | } | ||
1811 | } | ||
1812 | |||
1813 | if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) { | ||
1814 | struct cmng_struct_per_port m_cmng_port; | ||
1815 | u32 wsum; | ||
1816 | int port = BP_PORT(bp); | ||
1817 | |||
1818 | /* Init RATE SHAPING and FAIRNESS contexts */ | ||
1819 | wsum = bnx2x_calc_vn_wsum(bp); | ||
1820 | bnx2x_init_port_minmax(bp, (int)wsum, | ||
1821 | bp->link_vars.line_speed, | ||
1822 | &m_cmng_port); | ||
1823 | if (IS_E1HMF(bp)) | ||
1824 | for (vn = VN_0; vn < E1HVN_MAX; vn++) | ||
1825 | bnx2x_init_vn_minmax(bp, 2*vn + port, | ||
1826 | wsum, bp->link_vars.line_speed, | ||
1827 | &m_cmng_port); | ||
1828 | } | ||
1485 | } | 1829 | } |
1486 | 1830 | ||
1487 | static void bnx2x__link_status_update(struct bnx2x *bp) | 1831 | static void bnx2x__link_status_update(struct bnx2x *bp) |
@@ -1495,6 +1839,20 @@ static void bnx2x__link_status_update(struct bnx2x *bp) | |||
1495 | bnx2x_link_report(bp); | 1839 | bnx2x_link_report(bp); |
1496 | } | 1840 | } |
1497 | 1841 | ||
1842 | static void bnx2x_pmf_update(struct bnx2x *bp) | ||
1843 | { | ||
1844 | int port = BP_PORT(bp); | ||
1845 | u32 val; | ||
1846 | |||
1847 | bp->port.pmf = 1; | ||
1848 | DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); | ||
1849 | |||
1850 | /* enable nig attention */ | ||
1851 | val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); | ||
1852 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); | ||
1853 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); | ||
1854 | } | ||
1855 | |||
1498 | /* end of Link */ | 1856 | /* end of Link */ |
1499 | 1857 | ||
1500 | /* slow path */ | 1858 | /* slow path */ |
@@ -1507,10 +1865,10 @@ static void bnx2x__link_status_update(struct bnx2x *bp) | |||
1507 | static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | 1865 | static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, |
1508 | u32 data_hi, u32 data_lo, int common) | 1866 | u32 data_hi, u32 data_lo, int common) |
1509 | { | 1867 | { |
1510 | int port = bp->port; | 1868 | int func = BP_FUNC(bp); |
1511 | 1869 | ||
1512 | DP(NETIF_MSG_TIMER, | 1870 | DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, |
1513 | "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n", | 1871 | "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n", |
1514 | (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) + | 1872 | (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) + |
1515 | (void *)bp->spq_prod_bd - (void *)bp->spq), command, | 1873 | (void *)bp->spq_prod_bd - (void *)bp->spq), command, |
1516 | HW_CID(bp, cid), data_hi, data_lo, bp->spq_left); | 1874 | HW_CID(bp, cid), data_hi, data_lo, bp->spq_left); |
@@ -1520,11 +1878,11 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | |||
1520 | return -EIO; | 1878 | return -EIO; |
1521 | #endif | 1879 | #endif |
1522 | 1880 | ||
1523 | spin_lock(&bp->spq_lock); | 1881 | spin_lock_bh(&bp->spq_lock); |
1524 | 1882 | ||
1525 | if (!bp->spq_left) { | 1883 | if (!bp->spq_left) { |
1526 | BNX2X_ERR("BUG! SPQ ring full!\n"); | 1884 | BNX2X_ERR("BUG! SPQ ring full!\n"); |
1527 | spin_unlock(&bp->spq_lock); | 1885 | spin_unlock_bh(&bp->spq_lock); |
1528 | bnx2x_panic(); | 1886 | bnx2x_panic(); |
1529 | return -EBUSY; | 1887 | return -EBUSY; |
1530 | } | 1888 | } |
@@ -1553,18 +1911,18 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | |||
1553 | bp->spq_prod_idx++; | 1911 | bp->spq_prod_idx++; |
1554 | } | 1912 | } |
1555 | 1913 | ||
1556 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port), | 1914 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), |
1557 | bp->spq_prod_idx); | 1915 | bp->spq_prod_idx); |
1558 | 1916 | ||
1559 | spin_unlock(&bp->spq_lock); | 1917 | spin_unlock_bh(&bp->spq_lock); |
1560 | return 0; | 1918 | return 0; |
1561 | } | 1919 | } |
1562 | 1920 | ||
1563 | /* acquire split MCP access lock register */ | 1921 | /* acquire split MCP access lock register */ |
1564 | static int bnx2x_lock_alr(struct bnx2x *bp) | 1922 | static int bnx2x_lock_alr(struct bnx2x *bp) |
1565 | { | 1923 | { |
1566 | int rc = 0; | ||
1567 | u32 i, j, val; | 1924 | u32 i, j, val; |
1925 | int rc = 0; | ||
1568 | 1926 | ||
1569 | might_sleep(); | 1927 | might_sleep(); |
1570 | i = 100; | 1928 | i = 100; |
@@ -1577,10 +1935,8 @@ static int bnx2x_lock_alr(struct bnx2x *bp) | |||
1577 | 1935 | ||
1578 | msleep(5); | 1936 | msleep(5); |
1579 | } | 1937 | } |
1580 | |||
1581 | if (!(val & (1L << 31))) { | 1938 | if (!(val & (1L << 31))) { |
1582 | BNX2X_ERR("Cannot acquire nvram interface\n"); | 1939 | BNX2X_ERR("Cannot acquire nvram interface\n"); |
1583 | |||
1584 | rc = -EBUSY; | 1940 | rc = -EBUSY; |
1585 | } | 1941 | } |
1586 | 1942 | ||
@@ -1631,8 +1987,9 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) | |||
1631 | 1987 | ||
1632 | static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) | 1988 | static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) |
1633 | { | 1989 | { |
1634 | int port = bp->port; | 1990 | int port = BP_PORT(bp); |
1635 | u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8; | 1991 | int func = BP_FUNC(bp); |
1992 | u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8; | ||
1636 | u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : | 1993 | u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : |
1637 | MISC_REG_AEU_MASK_ATTN_FUNC_0; | 1994 | MISC_REG_AEU_MASK_ATTN_FUNC_0; |
1638 | u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : | 1995 | u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : |
@@ -1716,14 +2073,14 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) | |||
1716 | 2073 | ||
1717 | static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) | 2074 | static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) |
1718 | { | 2075 | { |
1719 | int port = bp->port; | 2076 | int port = BP_PORT(bp); |
1720 | int reg_offset; | 2077 | int reg_offset; |
1721 | u32 val; | 2078 | u32 val; |
1722 | 2079 | ||
1723 | if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { | 2080 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
2081 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | ||
1724 | 2082 | ||
1725 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | 2083 | if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { |
1726 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | ||
1727 | 2084 | ||
1728 | val = REG_RD(bp, reg_offset); | 2085 | val = REG_RD(bp, reg_offset); |
1729 | val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; | 2086 | val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; |
@@ -1731,7 +2088,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) | |||
1731 | 2088 | ||
1732 | BNX2X_ERR("SPIO5 hw attention\n"); | 2089 | BNX2X_ERR("SPIO5 hw attention\n"); |
1733 | 2090 | ||
1734 | switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | 2091 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { |
1735 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | 2092 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: |
1736 | /* Fan failure attention */ | 2093 | /* Fan failure attention */ |
1737 | 2094 | ||
@@ -1762,6 +2119,17 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) | |||
1762 | break; | 2119 | break; |
1763 | } | 2120 | } |
1764 | } | 2121 | } |
2122 | |||
2123 | if (attn & HW_INTERRUT_ASSERT_SET_0) { | ||
2124 | |||
2125 | val = REG_RD(bp, reg_offset); | ||
2126 | val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); | ||
2127 | REG_WR(bp, reg_offset, val); | ||
2128 | |||
2129 | BNX2X_ERR("FATAL HW block attention set0 0x%x\n", | ||
2130 | (attn & HW_INTERRUT_ASSERT_SET_0)); | ||
2131 | bnx2x_panic(); | ||
2132 | } | ||
1765 | } | 2133 | } |
1766 | 2134 | ||
1767 | static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) | 2135 | static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) |
@@ -1776,6 +2144,23 @@ static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) | |||
1776 | if (val & 0x2) | 2144 | if (val & 0x2) |
1777 | BNX2X_ERR("FATAL error from DORQ\n"); | 2145 | BNX2X_ERR("FATAL error from DORQ\n"); |
1778 | } | 2146 | } |
2147 | |||
2148 | if (attn & HW_INTERRUT_ASSERT_SET_1) { | ||
2149 | |||
2150 | int port = BP_PORT(bp); | ||
2151 | int reg_offset; | ||
2152 | |||
2153 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : | ||
2154 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); | ||
2155 | |||
2156 | val = REG_RD(bp, reg_offset); | ||
2157 | val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); | ||
2158 | REG_WR(bp, reg_offset, val); | ||
2159 | |||
2160 | BNX2X_ERR("FATAL HW block attention set1 0x%x\n", | ||
2161 | (attn & HW_INTERRUT_ASSERT_SET_1)); | ||
2162 | bnx2x_panic(); | ||
2163 | } | ||
1779 | } | 2164 | } |
1780 | 2165 | ||
1781 | static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) | 2166 | static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) |
@@ -1799,13 +2184,41 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) | |||
1799 | if (val & 0x18000) | 2184 | if (val & 0x18000) |
1800 | BNX2X_ERR("FATAL error from PXP\n"); | 2185 | BNX2X_ERR("FATAL error from PXP\n"); |
1801 | } | 2186 | } |
2187 | |||
2188 | if (attn & HW_INTERRUT_ASSERT_SET_2) { | ||
2189 | |||
2190 | int port = BP_PORT(bp); | ||
2191 | int reg_offset; | ||
2192 | |||
2193 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : | ||
2194 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); | ||
2195 | |||
2196 | val = REG_RD(bp, reg_offset); | ||
2197 | val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); | ||
2198 | REG_WR(bp, reg_offset, val); | ||
2199 | |||
2200 | BNX2X_ERR("FATAL HW block attention set2 0x%x\n", | ||
2201 | (attn & HW_INTERRUT_ASSERT_SET_2)); | ||
2202 | bnx2x_panic(); | ||
2203 | } | ||
1802 | } | 2204 | } |
1803 | 2205 | ||
1804 | static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) | 2206 | static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) |
1805 | { | 2207 | { |
2208 | u32 val; | ||
2209 | |||
1806 | if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { | 2210 | if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { |
1807 | 2211 | ||
1808 | if (attn & BNX2X_MC_ASSERT_BITS) { | 2212 | if (attn & BNX2X_PMF_LINK_ASSERT) { |
2213 | int func = BP_FUNC(bp); | ||
2214 | |||
2215 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); | ||
2216 | bnx2x__link_status_update(bp); | ||
2217 | if (SHMEM_RD(bp, func_mb[func].drv_status) & | ||
2218 | DRV_STATUS_PMF) | ||
2219 | bnx2x_pmf_update(bp); | ||
2220 | |||
2221 | } else if (attn & BNX2X_MC_ASSERT_BITS) { | ||
1809 | 2222 | ||
1810 | BNX2X_ERR("MC assert!\n"); | 2223 | BNX2X_ERR("MC assert!\n"); |
1811 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); | 2224 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); |
@@ -1818,16 +2231,25 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) | |||
1818 | 2231 | ||
1819 | BNX2X_ERR("MCP assert!\n"); | 2232 | BNX2X_ERR("MCP assert!\n"); |
1820 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); | 2233 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); |
1821 | bnx2x_mc_assert(bp); | 2234 | bnx2x_fw_dump(bp); |
1822 | 2235 | ||
1823 | } else | 2236 | } else |
1824 | BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); | 2237 | BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); |
1825 | } | 2238 | } |
1826 | 2239 | ||
1827 | if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { | 2240 | if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { |
1828 | 2241 | BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); | |
2242 | if (attn & BNX2X_GRC_TIMEOUT) { | ||
2243 | val = CHIP_IS_E1H(bp) ? | ||
2244 | REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0; | ||
2245 | BNX2X_ERR("GRC time-out 0x%08x\n", val); | ||
2246 | } | ||
2247 | if (attn & BNX2X_GRC_RSV) { | ||
2248 | val = CHIP_IS_E1H(bp) ? | ||
2249 | REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0; | ||
2250 | BNX2X_ERR("GRC reserved 0x%08x\n", val); | ||
2251 | } | ||
1829 | REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); | 2252 | REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); |
1830 | BNX2X_ERR("LATCHED attention 0x%x (masked)\n", attn); | ||
1831 | } | 2253 | } |
1832 | } | 2254 | } |
1833 | 2255 | ||
@@ -1835,7 +2257,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
1835 | { | 2257 | { |
1836 | struct attn_route attn; | 2258 | struct attn_route attn; |
1837 | struct attn_route group_mask; | 2259 | struct attn_route group_mask; |
1838 | int port = bp->port; | 2260 | int port = BP_PORT(bp); |
1839 | int index; | 2261 | int index; |
1840 | u32 reg_addr; | 2262 | u32 reg_addr; |
1841 | u32 val; | 2263 | u32 val; |
@@ -1848,14 +2270,16 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
1848 | attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); | 2270 | attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); |
1849 | attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); | 2271 | attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); |
1850 | attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); | 2272 | attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); |
1851 | DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]); | 2273 | DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n", |
2274 | attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]); | ||
1852 | 2275 | ||
1853 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { | 2276 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { |
1854 | if (deasserted & (1 << index)) { | 2277 | if (deasserted & (1 << index)) { |
1855 | group_mask = bp->attn_group[index]; | 2278 | group_mask = bp->attn_group[index]; |
1856 | 2279 | ||
1857 | DP(NETIF_MSG_HW, "group[%d]: %llx\n", index, | 2280 | DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n", |
1858 | (unsigned long long)group_mask.sig[0]); | 2281 | index, group_mask.sig[0], group_mask.sig[1], |
2282 | group_mask.sig[2], group_mask.sig[3]); | ||
1859 | 2283 | ||
1860 | bnx2x_attn_int_deasserted3(bp, | 2284 | bnx2x_attn_int_deasserted3(bp, |
1861 | attn.sig[3] & group_mask.sig[3]); | 2285 | attn.sig[3] & group_mask.sig[3]); |
@@ -1867,22 +2291,6 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
1867 | attn.sig[0] & group_mask.sig[0]); | 2291 | attn.sig[0] & group_mask.sig[0]); |
1868 | 2292 | ||
1869 | if ((attn.sig[0] & group_mask.sig[0] & | 2293 | if ((attn.sig[0] & group_mask.sig[0] & |
1870 | HW_INTERRUT_ASSERT_SET_0) || | ||
1871 | (attn.sig[1] & group_mask.sig[1] & | ||
1872 | HW_INTERRUT_ASSERT_SET_1) || | ||
1873 | (attn.sig[2] & group_mask.sig[2] & | ||
1874 | HW_INTERRUT_ASSERT_SET_2)) | ||
1875 | BNX2X_ERR("FATAL HW block attention" | ||
1876 | " set0 0x%x set1 0x%x" | ||
1877 | " set2 0x%x\n", | ||
1878 | (attn.sig[0] & group_mask.sig[0] & | ||
1879 | HW_INTERRUT_ASSERT_SET_0), | ||
1880 | (attn.sig[1] & group_mask.sig[1] & | ||
1881 | HW_INTERRUT_ASSERT_SET_1), | ||
1882 | (attn.sig[2] & group_mask.sig[2] & | ||
1883 | HW_INTERRUT_ASSERT_SET_2)); | ||
1884 | |||
1885 | if ((attn.sig[0] & group_mask.sig[0] & | ||
1886 | HW_PRTY_ASSERT_SET_0) || | 2294 | HW_PRTY_ASSERT_SET_0) || |
1887 | (attn.sig[1] & group_mask.sig[1] & | 2295 | (attn.sig[1] & group_mask.sig[1] & |
1888 | HW_PRTY_ASSERT_SET_1) || | 2296 | HW_PRTY_ASSERT_SET_1) || |
@@ -1894,17 +2302,17 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
1894 | 2302 | ||
1895 | bnx2x_unlock_alr(bp); | 2303 | bnx2x_unlock_alr(bp); |
1896 | 2304 | ||
1897 | reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8; | 2305 | reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; |
1898 | 2306 | ||
1899 | val = ~deasserted; | 2307 | val = ~deasserted; |
1900 | /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", | 2308 | /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", |
1901 | val, BAR_IGU_INTMEM + reg_addr); */ | 2309 | val, BAR_IGU_INTMEM + reg_addr); */ |
1902 | REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val); | 2310 | REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val); |
1903 | 2311 | ||
1904 | if (bp->aeu_mask & (deasserted & 0xff)) | 2312 | if (bp->aeu_mask & (deasserted & 0xff)) |
1905 | BNX2X_ERR("IGU BUG\n"); | 2313 | BNX2X_ERR("IGU BUG!\n"); |
1906 | if (~bp->attn_state & deasserted) | 2314 | if (~bp->attn_state & deasserted) |
1907 | BNX2X_ERR("IGU BUG\n"); | 2315 | BNX2X_ERR("IGU BUG!\n"); |
1908 | 2316 | ||
1909 | reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : | 2317 | reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : |
1910 | MISC_REG_AEU_MASK_ATTN_FUNC_0; | 2318 | MISC_REG_AEU_MASK_ATTN_FUNC_0; |
@@ -1936,7 +2344,7 @@ static void bnx2x_attn_int(struct bnx2x *bp) | |||
1936 | attn_bits, attn_ack, asserted, deasserted); | 2344 | attn_bits, attn_ack, asserted, deasserted); |
1937 | 2345 | ||
1938 | if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) | 2346 | if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) |
1939 | BNX2X_ERR("bad attention state\n"); | 2347 | BNX2X_ERR("BAD attention state\n"); |
1940 | 2348 | ||
1941 | /* handle bits that were raised */ | 2349 | /* handle bits that were raised */ |
1942 | if (asserted) | 2350 | if (asserted) |
@@ -1951,6 +2359,7 @@ static void bnx2x_sp_task(struct work_struct *work) | |||
1951 | struct bnx2x *bp = container_of(work, struct bnx2x, sp_task); | 2359 | struct bnx2x *bp = container_of(work, struct bnx2x, sp_task); |
1952 | u16 status; | 2360 | u16 status; |
1953 | 2361 | ||
2362 | |||
1954 | /* Return here if interrupt is disabled */ | 2363 | /* Return here if interrupt is disabled */ |
1955 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | 2364 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { |
1956 | DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); | 2365 | DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); |
@@ -1958,19 +2367,15 @@ static void bnx2x_sp_task(struct work_struct *work) | |||
1958 | } | 2367 | } |
1959 | 2368 | ||
1960 | status = bnx2x_update_dsb_idx(bp); | 2369 | status = bnx2x_update_dsb_idx(bp); |
1961 | if (status == 0) | 2370 | /* if (status == 0) */ |
1962 | BNX2X_ERR("spurious slowpath interrupt!\n"); | 2371 | /* BNX2X_ERR("spurious slowpath interrupt!\n"); */ |
1963 | 2372 | ||
1964 | DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status); | 2373 | DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status); |
1965 | 2374 | ||
1966 | /* HW attentions */ | 2375 | /* HW attentions */ |
1967 | if (status & 0x1) | 2376 | if (status & 0x1) |
1968 | bnx2x_attn_int(bp); | 2377 | bnx2x_attn_int(bp); |
1969 | 2378 | ||
1970 | /* CStorm events: query_stats, port delete ramrod */ | ||
1971 | if (status & 0x2) | ||
1972 | bp->stat_pending = 0; | ||
1973 | |||
1974 | bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx, | 2379 | bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx, |
1975 | IGU_INT_NOP, 1); | 2380 | IGU_INT_NOP, 1); |
1976 | bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx), | 2381 | bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx), |
@@ -2109,13 +2514,13 @@ static inline long bnx2x_hilo(u32 *hiref) | |||
2109 | static void bnx2x_init_mac_stats(struct bnx2x *bp) | 2514 | static void bnx2x_init_mac_stats(struct bnx2x *bp) |
2110 | { | 2515 | { |
2111 | struct dmae_command *dmae; | 2516 | struct dmae_command *dmae; |
2112 | int port = bp->port; | 2517 | int port = BP_PORT(bp); |
2113 | int loader_idx = port * 8; | 2518 | int loader_idx = port * 8; |
2114 | u32 opcode; | 2519 | u32 opcode; |
2115 | u32 mac_addr; | 2520 | u32 mac_addr; |
2116 | 2521 | ||
2117 | bp->executer_idx = 0; | 2522 | bp->executer_idx = 0; |
2118 | if (bp->fw_mb) { | 2523 | if (bp->func_stx) { |
2119 | /* MCP */ | 2524 | /* MCP */ |
2120 | opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | | 2525 | opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | |
2121 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | 2526 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | |
@@ -2135,7 +2540,7 @@ static void bnx2x_init_mac_stats(struct bnx2x *bp) | |||
2135 | sizeof(u32)); | 2540 | sizeof(u32)); |
2136 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) + | 2541 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) + |
2137 | sizeof(u32)); | 2542 | sizeof(u32)); |
2138 | dmae->dst_addr_lo = bp->fw_mb >> 2; | 2543 | dmae->dst_addr_lo = bp->func_stx >> 2; |
2139 | dmae->dst_addr_hi = 0; | 2544 | dmae->dst_addr_hi = 0; |
2140 | dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) - | 2545 | dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) - |
2141 | sizeof(u32)) >> 2; | 2546 | sizeof(u32)) >> 2; |
@@ -2280,7 +2685,7 @@ static void bnx2x_init_mac_stats(struct bnx2x *bp) | |||
2280 | 2685 | ||
2281 | static void bnx2x_init_stats(struct bnx2x *bp) | 2686 | static void bnx2x_init_stats(struct bnx2x *bp) |
2282 | { | 2687 | { |
2283 | int port = bp->port; | 2688 | int port = BP_PORT(bp); |
2284 | 2689 | ||
2285 | bp->stats_state = STATS_STATE_DISABLE; | 2690 | bp->stats_state = STATS_STATE_DISABLE; |
2286 | bp->executer_idx = 0; | 2691 | bp->executer_idx = 0; |
@@ -2641,8 +3046,6 @@ static void bnx2x_update_net_stats(struct bnx2x *bp) | |||
2641 | 3046 | ||
2642 | static void bnx2x_update_stats(struct bnx2x *bp) | 3047 | static void bnx2x_update_stats(struct bnx2x *bp) |
2643 | { | 3048 | { |
2644 | int i; | ||
2645 | |||
2646 | if (!bnx2x_update_storm_stats(bp)) { | 3049 | if (!bnx2x_update_storm_stats(bp)) { |
2647 | 3050 | ||
2648 | if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { | 3051 | if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { |
@@ -2662,6 +3065,7 @@ static void bnx2x_update_stats(struct bnx2x *bp) | |||
2662 | if (bp->msglevel & NETIF_MSG_TIMER) { | 3065 | if (bp->msglevel & NETIF_MSG_TIMER) { |
2663 | struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats); | 3066 | struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats); |
2664 | struct net_device_stats *nstats = &bp->dev->stats; | 3067 | struct net_device_stats *nstats = &bp->dev->stats; |
3068 | int i; | ||
2665 | 3069 | ||
2666 | printk(KERN_DEBUG "%s:\n", bp->dev->name); | 3070 | printk(KERN_DEBUG "%s:\n", bp->dev->name); |
2667 | printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)" | 3071 | printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)" |
@@ -2707,7 +3111,7 @@ static void bnx2x_update_stats(struct bnx2x *bp) | |||
2707 | /* loader */ | 3111 | /* loader */ |
2708 | if (bp->executer_idx) { | 3112 | if (bp->executer_idx) { |
2709 | struct dmae_command *dmae = &bp->dmae; | 3113 | struct dmae_command *dmae = &bp->dmae; |
2710 | int port = bp->port; | 3114 | int port = BP_PORT(bp); |
2711 | int loader_idx = port * 8; | 3115 | int loader_idx = port * 8; |
2712 | 3116 | ||
2713 | memset(dmae, 0, sizeof(struct dmae_command)); | 3117 | memset(dmae, 0, sizeof(struct dmae_command)); |
@@ -2766,8 +3170,8 @@ static void bnx2x_timer(unsigned long data) | |||
2766 | rc = bnx2x_rx_int(fp, 1000); | 3170 | rc = bnx2x_rx_int(fp, 1000); |
2767 | } | 3171 | } |
2768 | 3172 | ||
2769 | if (!nomcp) { | 3173 | if (!BP_NOMCP(bp)) { |
2770 | int port = bp->port; | 3174 | int func = BP_FUNC(bp); |
2771 | u32 drv_pulse; | 3175 | u32 drv_pulse; |
2772 | u32 mcp_pulse; | 3176 | u32 mcp_pulse; |
2773 | 3177 | ||
@@ -2775,9 +3179,9 @@ static void bnx2x_timer(unsigned long data) | |||
2775 | bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; | 3179 | bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; |
2776 | /* TBD - add SYSTEM_TIME */ | 3180 | /* TBD - add SYSTEM_TIME */ |
2777 | drv_pulse = bp->fw_drv_pulse_wr_seq; | 3181 | drv_pulse = bp->fw_drv_pulse_wr_seq; |
2778 | SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse); | 3182 | SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse); |
2779 | 3183 | ||
2780 | mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) & | 3184 | mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) & |
2781 | MCP_PULSE_SEQ_MASK); | 3185 | MCP_PULSE_SEQ_MASK); |
2782 | /* The delta between driver pulse and mcp response | 3186 | /* The delta between driver pulse and mcp response |
2783 | * should be 1 (before mcp response) or 0 (after mcp response) | 3187 | * should be 1 (before mcp response) or 0 (after mcp response) |
@@ -2807,58 +3211,89 @@ timer_restart: | |||
2807 | * nic init service functions | 3211 | * nic init service functions |
2808 | */ | 3212 | */ |
2809 | 3213 | ||
2810 | static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, | 3214 | static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id) |
2811 | dma_addr_t mapping, int id) | ||
2812 | { | 3215 | { |
2813 | int port = bp->port; | 3216 | int port = BP_PORT(bp); |
2814 | u64 section; | 3217 | |
3218 | bnx2x_init_fill(bp, BAR_USTRORM_INTMEM + | ||
3219 | USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, | ||
3220 | sizeof(struct ustorm_def_status_block)/4); | ||
3221 | bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM + | ||
3222 | CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, | ||
3223 | sizeof(struct cstorm_def_status_block)/4); | ||
3224 | } | ||
3225 | |||
3226 | static void bnx2x_init_sb(struct bnx2x *bp, int sb_id, | ||
3227 | struct host_status_block *sb, dma_addr_t mapping) | ||
3228 | { | ||
3229 | int port = BP_PORT(bp); | ||
2815 | int index; | 3230 | int index; |
3231 | u64 section; | ||
2816 | 3232 | ||
2817 | /* USTORM */ | 3233 | /* USTORM */ |
2818 | section = ((u64)mapping) + offsetof(struct host_status_block, | 3234 | section = ((u64)mapping) + offsetof(struct host_status_block, |
2819 | u_status_block); | 3235 | u_status_block); |
2820 | sb->u_status_block.status_block_id = id; | 3236 | sb->u_status_block.status_block_id = sb_id; |
2821 | 3237 | ||
2822 | REG_WR(bp, BAR_USTRORM_INTMEM + | 3238 | REG_WR(bp, BAR_USTRORM_INTMEM + |
2823 | USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section)); | 3239 | USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section)); |
2824 | REG_WR(bp, BAR_USTRORM_INTMEM + | 3240 | REG_WR(bp, BAR_USTRORM_INTMEM + |
2825 | ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4), | 3241 | ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4), |
2826 | U64_HI(section)); | 3242 | U64_HI(section)); |
2827 | 3243 | ||
2828 | for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++) | 3244 | for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++) |
2829 | REG_WR16(bp, BAR_USTRORM_INTMEM + | 3245 | REG_WR16(bp, BAR_USTRORM_INTMEM + |
2830 | USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1); | 3246 | USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1); |
2831 | 3247 | ||
2832 | /* CSTORM */ | 3248 | /* CSTORM */ |
2833 | section = ((u64)mapping) + offsetof(struct host_status_block, | 3249 | section = ((u64)mapping) + offsetof(struct host_status_block, |
2834 | c_status_block); | 3250 | c_status_block); |
2835 | sb->c_status_block.status_block_id = id; | 3251 | sb->c_status_block.status_block_id = sb_id; |
2836 | 3252 | ||
2837 | REG_WR(bp, BAR_CSTRORM_INTMEM + | 3253 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
2838 | CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section)); | 3254 | CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section)); |
2839 | REG_WR(bp, BAR_CSTRORM_INTMEM + | 3255 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
2840 | ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4), | 3256 | ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4), |
2841 | U64_HI(section)); | 3257 | U64_HI(section)); |
2842 | 3258 | ||
2843 | for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++) | 3259 | for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++) |
2844 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | 3260 | REG_WR16(bp, BAR_CSTRORM_INTMEM + |
2845 | CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1); | 3261 | CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1); |
2846 | 3262 | ||
2847 | bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | 3263 | bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); |
3264 | } | ||
3265 | |||
3266 | static void bnx2x_zero_def_sb(struct bnx2x *bp) | ||
3267 | { | ||
3268 | int func = BP_FUNC(bp); | ||
3269 | |||
3270 | bnx2x_init_fill(bp, BAR_USTRORM_INTMEM + | ||
3271 | USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, | ||
3272 | sizeof(struct ustorm_def_status_block)/4); | ||
3273 | bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM + | ||
3274 | CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, | ||
3275 | sizeof(struct cstorm_def_status_block)/4); | ||
3276 | bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM + | ||
3277 | XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, | ||
3278 | sizeof(struct xstorm_def_status_block)/4); | ||
3279 | bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM + | ||
3280 | TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, | ||
3281 | sizeof(struct tstorm_def_status_block)/4); | ||
2848 | } | 3282 | } |
2849 | 3283 | ||
2850 | static void bnx2x_init_def_sb(struct bnx2x *bp, | 3284 | static void bnx2x_init_def_sb(struct bnx2x *bp, |
2851 | struct host_def_status_block *def_sb, | 3285 | struct host_def_status_block *def_sb, |
2852 | dma_addr_t mapping, int id) | 3286 | dma_addr_t mapping, int sb_id) |
2853 | { | 3287 | { |
2854 | int port = bp->port; | 3288 | int port = BP_PORT(bp); |
3289 | int func = BP_FUNC(bp); | ||
2855 | int index, val, reg_offset; | 3290 | int index, val, reg_offset; |
2856 | u64 section; | 3291 | u64 section; |
2857 | 3292 | ||
2858 | /* ATTN */ | 3293 | /* ATTN */ |
2859 | section = ((u64)mapping) + offsetof(struct host_def_status_block, | 3294 | section = ((u64)mapping) + offsetof(struct host_def_status_block, |
2860 | atten_status_block); | 3295 | atten_status_block); |
2861 | def_sb->atten_status_block.status_block_id = id; | 3296 | def_sb->atten_status_block.status_block_id = sb_id; |
2862 | 3297 | ||
2863 | bp->def_att_idx = 0; | 3298 | bp->def_att_idx = 0; |
2864 | bp->attn_state = 0; | 3299 | bp->attn_state = 0; |
@@ -2866,7 +3301,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
2866 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | 3301 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
2867 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | 3302 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
2868 | 3303 | ||
2869 | for (index = 0; index < 3; index++) { | 3304 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { |
2870 | bp->attn_group[index].sig[0] = REG_RD(bp, | 3305 | bp->attn_group[index].sig[0] = REG_RD(bp, |
2871 | reg_offset + 0x10*index); | 3306 | reg_offset + 0x10*index); |
2872 | bp->attn_group[index].sig[1] = REG_RD(bp, | 3307 | bp->attn_group[index].sig[1] = REG_RD(bp, |
@@ -2889,116 +3324,123 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
2889 | reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0); | 3324 | reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0); |
2890 | 3325 | ||
2891 | val = REG_RD(bp, reg_offset); | 3326 | val = REG_RD(bp, reg_offset); |
2892 | val |= id; | 3327 | val |= sb_id; |
2893 | REG_WR(bp, reg_offset, val); | 3328 | REG_WR(bp, reg_offset, val); |
2894 | 3329 | ||
2895 | /* USTORM */ | 3330 | /* USTORM */ |
2896 | section = ((u64)mapping) + offsetof(struct host_def_status_block, | 3331 | section = ((u64)mapping) + offsetof(struct host_def_status_block, |
2897 | u_def_status_block); | 3332 | u_def_status_block); |
2898 | def_sb->u_def_status_block.status_block_id = id; | 3333 | def_sb->u_def_status_block.status_block_id = sb_id; |
2899 | 3334 | ||
2900 | bp->def_u_idx = 0; | 3335 | bp->def_u_idx = 0; |
2901 | 3336 | ||
2902 | REG_WR(bp, BAR_USTRORM_INTMEM + | 3337 | REG_WR(bp, BAR_USTRORM_INTMEM + |
2903 | USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); | 3338 | USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); |
2904 | REG_WR(bp, BAR_USTRORM_INTMEM + | 3339 | REG_WR(bp, BAR_USTRORM_INTMEM + |
2905 | ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4), | 3340 | ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), |
2906 | U64_HI(section)); | 3341 | U64_HI(section)); |
2907 | REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), | 3342 | REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF + |
3343 | USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); | ||
3344 | REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func), | ||
2908 | BNX2X_BTR); | 3345 | BNX2X_BTR); |
2909 | 3346 | ||
2910 | for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) | 3347 | for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) |
2911 | REG_WR16(bp, BAR_USTRORM_INTMEM + | 3348 | REG_WR16(bp, BAR_USTRORM_INTMEM + |
2912 | USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1); | 3349 | USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); |
2913 | 3350 | ||
2914 | /* CSTORM */ | 3351 | /* CSTORM */ |
2915 | section = ((u64)mapping) + offsetof(struct host_def_status_block, | 3352 | section = ((u64)mapping) + offsetof(struct host_def_status_block, |
2916 | c_def_status_block); | 3353 | c_def_status_block); |
2917 | def_sb->c_def_status_block.status_block_id = id; | 3354 | def_sb->c_def_status_block.status_block_id = sb_id; |
2918 | 3355 | ||
2919 | bp->def_c_idx = 0; | 3356 | bp->def_c_idx = 0; |
2920 | 3357 | ||
2921 | REG_WR(bp, BAR_CSTRORM_INTMEM + | 3358 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
2922 | CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); | 3359 | CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); |
2923 | REG_WR(bp, BAR_CSTRORM_INTMEM + | 3360 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
2924 | ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4), | 3361 | ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), |
2925 | U64_HI(section)); | 3362 | U64_HI(section)); |
2926 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), | 3363 | REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + |
3364 | CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); | ||
3365 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func), | ||
2927 | BNX2X_BTR); | 3366 | BNX2X_BTR); |
2928 | 3367 | ||
2929 | for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) | 3368 | for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) |
2930 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | 3369 | REG_WR16(bp, BAR_CSTRORM_INTMEM + |
2931 | CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1); | 3370 | CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); |
2932 | 3371 | ||
2933 | /* TSTORM */ | 3372 | /* TSTORM */ |
2934 | section = ((u64)mapping) + offsetof(struct host_def_status_block, | 3373 | section = ((u64)mapping) + offsetof(struct host_def_status_block, |
2935 | t_def_status_block); | 3374 | t_def_status_block); |
2936 | def_sb->t_def_status_block.status_block_id = id; | 3375 | def_sb->t_def_status_block.status_block_id = sb_id; |
2937 | 3376 | ||
2938 | bp->def_t_idx = 0; | 3377 | bp->def_t_idx = 0; |
2939 | 3378 | ||
2940 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 3379 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
2941 | TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); | 3380 | TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); |
2942 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 3381 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
2943 | ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4), | 3382 | ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), |
2944 | U64_HI(section)); | 3383 | U64_HI(section)); |
2945 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), | 3384 | REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF + |
3385 | TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); | ||
3386 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func), | ||
2946 | BNX2X_BTR); | 3387 | BNX2X_BTR); |
2947 | 3388 | ||
2948 | for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++) | 3389 | for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++) |
2949 | REG_WR16(bp, BAR_TSTRORM_INTMEM + | 3390 | REG_WR16(bp, BAR_TSTRORM_INTMEM + |
2950 | TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1); | 3391 | TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); |
2951 | 3392 | ||
2952 | /* XSTORM */ | 3393 | /* XSTORM */ |
2953 | section = ((u64)mapping) + offsetof(struct host_def_status_block, | 3394 | section = ((u64)mapping) + offsetof(struct host_def_status_block, |
2954 | x_def_status_block); | 3395 | x_def_status_block); |
2955 | def_sb->x_def_status_block.status_block_id = id; | 3396 | def_sb->x_def_status_block.status_block_id = sb_id; |
2956 | 3397 | ||
2957 | bp->def_x_idx = 0; | 3398 | bp->def_x_idx = 0; |
2958 | 3399 | ||
2959 | REG_WR(bp, BAR_XSTRORM_INTMEM + | 3400 | REG_WR(bp, BAR_XSTRORM_INTMEM + |
2960 | XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); | 3401 | XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); |
2961 | REG_WR(bp, BAR_XSTRORM_INTMEM + | 3402 | REG_WR(bp, BAR_XSTRORM_INTMEM + |
2962 | ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4), | 3403 | ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), |
2963 | U64_HI(section)); | 3404 | U64_HI(section)); |
2964 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), | 3405 | REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF + |
3406 | XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); | ||
3407 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func), | ||
2965 | BNX2X_BTR); | 3408 | BNX2X_BTR); |
2966 | 3409 | ||
2967 | for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++) | 3410 | for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++) |
2968 | REG_WR16(bp, BAR_XSTRORM_INTMEM + | 3411 | REG_WR16(bp, BAR_XSTRORM_INTMEM + |
2969 | XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1); | 3412 | XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); |
2970 | 3413 | ||
2971 | bp->stat_pending = 0; | 3414 | bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); |
2972 | |||
2973 | bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | ||
2974 | } | 3415 | } |
2975 | 3416 | ||
2976 | static void bnx2x_update_coalesce(struct bnx2x *bp) | 3417 | static void bnx2x_update_coalesce(struct bnx2x *bp) |
2977 | { | 3418 | { |
2978 | int port = bp->port; | 3419 | int port = BP_PORT(bp); |
2979 | int i; | 3420 | int i; |
2980 | 3421 | ||
2981 | for_each_queue(bp, i) { | 3422 | for_each_queue(bp, i) { |
3423 | int sb_id = bp->fp[i].sb_id; | ||
2982 | 3424 | ||
2983 | /* HC_INDEX_U_ETH_RX_CQ_CONS */ | 3425 | /* HC_INDEX_U_ETH_RX_CQ_CONS */ |
2984 | REG_WR8(bp, BAR_USTRORM_INTMEM + | 3426 | REG_WR8(bp, BAR_USTRORM_INTMEM + |
2985 | USTORM_SB_HC_TIMEOUT_OFFSET(port, i, | 3427 | USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, |
2986 | HC_INDEX_U_ETH_RX_CQ_CONS), | 3428 | HC_INDEX_U_ETH_RX_CQ_CONS), |
2987 | bp->rx_ticks_int/12); | 3429 | bp->rx_ticks/12); |
2988 | REG_WR16(bp, BAR_USTRORM_INTMEM + | 3430 | REG_WR16(bp, BAR_USTRORM_INTMEM + |
2989 | USTORM_SB_HC_DISABLE_OFFSET(port, i, | 3431 | USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, |
2990 | HC_INDEX_U_ETH_RX_CQ_CONS), | 3432 | HC_INDEX_U_ETH_RX_CQ_CONS), |
2991 | bp->rx_ticks_int ? 0 : 1); | 3433 | bp->rx_ticks ? 0 : 1); |
2992 | 3434 | ||
2993 | /* HC_INDEX_C_ETH_TX_CQ_CONS */ | 3435 | /* HC_INDEX_C_ETH_TX_CQ_CONS */ |
2994 | REG_WR8(bp, BAR_CSTRORM_INTMEM + | 3436 | REG_WR8(bp, BAR_CSTRORM_INTMEM + |
2995 | CSTORM_SB_HC_TIMEOUT_OFFSET(port, i, | 3437 | CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, |
2996 | HC_INDEX_C_ETH_TX_CQ_CONS), | 3438 | HC_INDEX_C_ETH_TX_CQ_CONS), |
2997 | bp->tx_ticks_int/12); | 3439 | bp->tx_ticks/12); |
2998 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | 3440 | REG_WR16(bp, BAR_CSTRORM_INTMEM + |
2999 | CSTORM_SB_HC_DISABLE_OFFSET(port, i, | 3441 | CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, |
3000 | HC_INDEX_C_ETH_TX_CQ_CONS), | 3442 | HC_INDEX_C_ETH_TX_CQ_CONS), |
3001 | bp->tx_ticks_int ? 0 : 1); | 3443 | bp->tx_ticks ? 0 : 1); |
3002 | } | 3444 | } |
3003 | } | 3445 | } |
3004 | 3446 | ||
@@ -3006,7 +3448,6 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
3006 | { | 3448 | { |
3007 | u16 ring_prod; | 3449 | u16 ring_prod; |
3008 | int i, j; | 3450 | int i, j; |
3009 | int port = bp->port; | ||
3010 | 3451 | ||
3011 | bp->rx_buf_use_size = bp->dev->mtu; | 3452 | bp->rx_buf_use_size = bp->dev->mtu; |
3012 | 3453 | ||
@@ -3025,13 +3466,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
3025 | rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; | 3466 | rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; |
3026 | rx_bd->addr_hi = | 3467 | rx_bd->addr_hi = |
3027 | cpu_to_le32(U64_HI(fp->rx_desc_mapping + | 3468 | cpu_to_le32(U64_HI(fp->rx_desc_mapping + |
3028 | BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); | 3469 | BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); |
3029 | rx_bd->addr_lo = | 3470 | rx_bd->addr_lo = |
3030 | cpu_to_le32(U64_LO(fp->rx_desc_mapping + | 3471 | cpu_to_le32(U64_LO(fp->rx_desc_mapping + |
3031 | BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); | 3472 | BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); |
3032 | |||
3033 | } | 3473 | } |
3034 | 3474 | ||
3475 | /* CQ ring */ | ||
3035 | for (i = 1; i <= NUM_RCQ_RINGS; i++) { | 3476 | for (i = 1; i <= NUM_RCQ_RINGS; i++) { |
3036 | struct eth_rx_cqe_next_page *nextpg; | 3477 | struct eth_rx_cqe_next_page *nextpg; |
3037 | 3478 | ||
@@ -3039,10 +3480,10 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
3039 | &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; | 3480 | &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; |
3040 | nextpg->addr_hi = | 3481 | nextpg->addr_hi = |
3041 | cpu_to_le32(U64_HI(fp->rx_comp_mapping + | 3482 | cpu_to_le32(U64_HI(fp->rx_comp_mapping + |
3042 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); | 3483 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); |
3043 | nextpg->addr_lo = | 3484 | nextpg->addr_lo = |
3044 | cpu_to_le32(U64_LO(fp->rx_comp_mapping + | 3485 | cpu_to_le32(U64_LO(fp->rx_comp_mapping + |
3045 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); | 3486 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); |
3046 | } | 3487 | } |
3047 | 3488 | ||
3048 | /* rx completion queue */ | 3489 | /* rx completion queue */ |
@@ -3064,15 +3505,16 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
3064 | /* Warning! this will generate an interrupt (to the TSTORM) */ | 3505 | /* Warning! this will generate an interrupt (to the TSTORM) */ |
3065 | /* must only be done when chip is initialized */ | 3506 | /* must only be done when chip is initialized */ |
3066 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 3507 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
3067 | TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod); | 3508 | TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)), |
3509 | ring_prod); | ||
3068 | if (j != 0) | 3510 | if (j != 0) |
3069 | continue; | 3511 | continue; |
3070 | 3512 | ||
3071 | REG_WR(bp, BAR_USTRORM_INTMEM + | 3513 | REG_WR(bp, BAR_USTRORM_INTMEM + |
3072 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port), | 3514 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(BP_PORT(bp)), |
3073 | U64_LO(fp->rx_comp_mapping)); | 3515 | U64_LO(fp->rx_comp_mapping)); |
3074 | REG_WR(bp, BAR_USTRORM_INTMEM + | 3516 | REG_WR(bp, BAR_USTRORM_INTMEM + |
3075 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4, | 3517 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(BP_PORT(bp)) + 4, |
3076 | U64_HI(fp->rx_comp_mapping)); | 3518 | U64_HI(fp->rx_comp_mapping)); |
3077 | } | 3519 | } |
3078 | } | 3520 | } |
@@ -3090,10 +3532,10 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp) | |||
3090 | 3532 | ||
3091 | tx_bd->addr_hi = | 3533 | tx_bd->addr_hi = |
3092 | cpu_to_le32(U64_HI(fp->tx_desc_mapping + | 3534 | cpu_to_le32(U64_HI(fp->tx_desc_mapping + |
3093 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); | 3535 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); |
3094 | tx_bd->addr_lo = | 3536 | tx_bd->addr_lo = |
3095 | cpu_to_le32(U64_LO(fp->tx_desc_mapping + | 3537 | cpu_to_le32(U64_LO(fp->tx_desc_mapping + |
3096 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); | 3538 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); |
3097 | } | 3539 | } |
3098 | 3540 | ||
3099 | fp->tx_pkt_prod = 0; | 3541 | fp->tx_pkt_prod = 0; |
@@ -3107,7 +3549,7 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp) | |||
3107 | 3549 | ||
3108 | static void bnx2x_init_sp_ring(struct bnx2x *bp) | 3550 | static void bnx2x_init_sp_ring(struct bnx2x *bp) |
3109 | { | 3551 | { |
3110 | int port = bp->port; | 3552 | int func = BP_FUNC(bp); |
3111 | 3553 | ||
3112 | spin_lock_init(&bp->spq_lock); | 3554 | spin_lock_init(&bp->spq_lock); |
3113 | 3555 | ||
@@ -3117,12 +3559,13 @@ static void bnx2x_init_sp_ring(struct bnx2x *bp) | |||
3117 | bp->spq_prod_bd = bp->spq; | 3559 | bp->spq_prod_bd = bp->spq; |
3118 | bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; | 3560 | bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; |
3119 | 3561 | ||
3120 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port), | 3562 | REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func), |
3121 | U64_LO(bp->spq_mapping)); | 3563 | U64_LO(bp->spq_mapping)); |
3122 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4, | 3564 | REG_WR(bp, |
3565 | XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4, | ||
3123 | U64_HI(bp->spq_mapping)); | 3566 | U64_HI(bp->spq_mapping)); |
3124 | 3567 | ||
3125 | REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port), | 3568 | REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func), |
3126 | bp->spq_prod_idx); | 3569 | bp->spq_prod_idx); |
3127 | } | 3570 | } |
3128 | 3571 | ||
@@ -3133,6 +3576,7 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
3133 | for_each_queue(bp, i) { | 3576 | for_each_queue(bp, i) { |
3134 | struct eth_context *context = bnx2x_sp(bp, context[i].eth); | 3577 | struct eth_context *context = bnx2x_sp(bp, context[i].eth); |
3135 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 3578 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
3579 | u8 sb_id = FP_SB_ID(fp); | ||
3136 | 3580 | ||
3137 | context->xstorm_st_context.tx_bd_page_base_hi = | 3581 | context->xstorm_st_context.tx_bd_page_base_hi = |
3138 | U64_HI(fp->tx_desc_mapping); | 3582 | U64_HI(fp->tx_desc_mapping); |
@@ -3142,26 +3586,25 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
3142 | U64_HI(fp->tx_prods_mapping); | 3586 | U64_HI(fp->tx_prods_mapping); |
3143 | context->xstorm_st_context.db_data_addr_lo = | 3587 | context->xstorm_st_context.db_data_addr_lo = |
3144 | U64_LO(fp->tx_prods_mapping); | 3588 | U64_LO(fp->tx_prods_mapping); |
3145 | 3589 | context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) | | |
3146 | context->ustorm_st_context.rx_bd_page_base_hi = | 3590 | XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE); |
3591 | |||
3592 | context->ustorm_st_context.common.sb_index_numbers = | ||
3593 | BNX2X_RX_SB_INDEX_NUM; | ||
3594 | context->ustorm_st_context.common.clientId = FP_CL_ID(fp); | ||
3595 | context->ustorm_st_context.common.status_block_id = sb_id; | ||
3596 | context->ustorm_st_context.common.flags = | ||
3597 | USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT; | ||
3598 | context->ustorm_st_context.common.mc_alignment_size = 64; | ||
3599 | context->ustorm_st_context.common.bd_buff_size = | ||
3600 | bp->rx_buf_use_size; | ||
3601 | context->ustorm_st_context.common.bd_page_base_hi = | ||
3147 | U64_HI(fp->rx_desc_mapping); | 3602 | U64_HI(fp->rx_desc_mapping); |
3148 | context->ustorm_st_context.rx_bd_page_base_lo = | 3603 | context->ustorm_st_context.common.bd_page_base_lo = |
3149 | U64_LO(fp->rx_desc_mapping); | 3604 | U64_LO(fp->rx_desc_mapping); |
3150 | context->ustorm_st_context.status_block_id = i; | ||
3151 | context->ustorm_st_context.sb_index_number = | ||
3152 | HC_INDEX_U_ETH_RX_CQ_CONS; | ||
3153 | context->ustorm_st_context.rcq_base_address_hi = | ||
3154 | U64_HI(fp->rx_comp_mapping); | ||
3155 | context->ustorm_st_context.rcq_base_address_lo = | ||
3156 | U64_LO(fp->rx_comp_mapping); | ||
3157 | context->ustorm_st_context.flags = | ||
3158 | USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT; | ||
3159 | context->ustorm_st_context.mc_alignment_size = 64; | ||
3160 | context->ustorm_st_context.num_rss = bp->num_queues; | ||
3161 | |||
3162 | context->cstorm_st_context.sb_index_number = | 3605 | context->cstorm_st_context.sb_index_number = |
3163 | HC_INDEX_C_ETH_TX_CQ_CONS; | 3606 | HC_INDEX_C_ETH_TX_CQ_CONS; |
3164 | context->cstorm_st_context.status_block_id = i; | 3607 | context->cstorm_st_context.status_block_id = sb_id; |
3165 | 3608 | ||
3166 | context->xstorm_ag_context.cdu_reserved = | 3609 | context->xstorm_ag_context.cdu_reserved = |
3167 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i), | 3610 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i), |
@@ -3176,14 +3619,16 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
3176 | 3619 | ||
3177 | static void bnx2x_init_ind_table(struct bnx2x *bp) | 3620 | static void bnx2x_init_ind_table(struct bnx2x *bp) |
3178 | { | 3621 | { |
3179 | int port = bp->port; | 3622 | int port = BP_PORT(bp); |
3180 | int i; | 3623 | int i; |
3181 | 3624 | ||
3182 | if (!is_multi(bp)) | 3625 | if (!is_multi(bp)) |
3183 | return; | 3626 | return; |
3184 | 3627 | ||
3628 | DP(NETIF_MSG_IFUP, "Initializing indirection table\n"); | ||
3185 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) | 3629 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) |
3186 | REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i, | 3630 | REG_WR8(bp, BAR_TSTRORM_INTMEM + |
3631 | TSTORM_INDIRECTION_TABLE_OFFSET(port) + i, | ||
3187 | i % bp->num_queues); | 3632 | i % bp->num_queues); |
3188 | 3633 | ||
3189 | REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); | 3634 | REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); |
@@ -3191,77 +3636,74 @@ static void bnx2x_init_ind_table(struct bnx2x *bp) | |||
3191 | 3636 | ||
3192 | static void bnx2x_set_client_config(struct bnx2x *bp) | 3637 | static void bnx2x_set_client_config(struct bnx2x *bp) |
3193 | { | 3638 | { |
3194 | #ifdef BCM_VLAN | ||
3195 | int mode = bp->rx_mode; | ||
3196 | #endif | ||
3197 | int i, port = bp->port; | ||
3198 | struct tstorm_eth_client_config tstorm_client = {0}; | 3639 | struct tstorm_eth_client_config tstorm_client = {0}; |
3640 | int port = BP_PORT(bp); | ||
3641 | int i; | ||
3199 | 3642 | ||
3200 | tstorm_client.mtu = bp->dev->mtu; | 3643 | tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD; |
3201 | tstorm_client.statistics_counter_id = 0; | 3644 | tstorm_client.statistics_counter_id = 0; |
3202 | tstorm_client.config_flags = | 3645 | tstorm_client.config_flags = |
3203 | TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; | 3646 | TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; |
3204 | #ifdef BCM_VLAN | 3647 | #ifdef BCM_VLAN |
3205 | if (mode && bp->vlgrp) { | 3648 | if (bp->rx_mode && bp->vlgrp) { |
3206 | tstorm_client.config_flags |= | 3649 | tstorm_client.config_flags |= |
3207 | TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE; | 3650 | TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE; |
3208 | DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); | 3651 | DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); |
3209 | } | 3652 | } |
3210 | #endif | 3653 | #endif |
3211 | if (mode != BNX2X_RX_MODE_PROMISC) | ||
3212 | tstorm_client.drop_flags = | ||
3213 | TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR; | ||
3214 | 3654 | ||
3215 | for_each_queue(bp, i) { | 3655 | for_each_queue(bp, i) { |
3216 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 3656 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
3217 | TSTORM_CLIENT_CONFIG_OFFSET(port, i), | 3657 | TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id), |
3218 | ((u32 *)&tstorm_client)[0]); | 3658 | ((u32 *)&tstorm_client)[0]); |
3219 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 3659 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
3220 | TSTORM_CLIENT_CONFIG_OFFSET(port, i) + 4, | 3660 | TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4, |
3221 | ((u32 *)&tstorm_client)[1]); | 3661 | ((u32 *)&tstorm_client)[1]); |
3222 | } | 3662 | } |
3223 | 3663 | ||
3224 | /* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n", | 3664 | DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n", |
3225 | ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */ | 3665 | ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); |
3226 | } | 3666 | } |
3227 | 3667 | ||
3228 | static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | 3668 | static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) |
3229 | { | 3669 | { |
3230 | int mode = bp->rx_mode; | ||
3231 | int port = bp->port; | ||
3232 | struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0}; | 3670 | struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0}; |
3671 | int mode = bp->rx_mode; | ||
3672 | int mask = (1 << BP_L_ID(bp)); | ||
3673 | int func = BP_FUNC(bp); | ||
3233 | int i; | 3674 | int i; |
3234 | 3675 | ||
3235 | DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode); | 3676 | DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode); |
3236 | 3677 | ||
3237 | switch (mode) { | 3678 | switch (mode) { |
3238 | case BNX2X_RX_MODE_NONE: /* no Rx */ | 3679 | case BNX2X_RX_MODE_NONE: /* no Rx */ |
3239 | tstorm_mac_filter.ucast_drop_all = 1; | 3680 | tstorm_mac_filter.ucast_drop_all = mask; |
3240 | tstorm_mac_filter.mcast_drop_all = 1; | 3681 | tstorm_mac_filter.mcast_drop_all = mask; |
3241 | tstorm_mac_filter.bcast_drop_all = 1; | 3682 | tstorm_mac_filter.bcast_drop_all = mask; |
3242 | break; | 3683 | break; |
3243 | case BNX2X_RX_MODE_NORMAL: | 3684 | case BNX2X_RX_MODE_NORMAL: |
3244 | tstorm_mac_filter.bcast_accept_all = 1; | 3685 | tstorm_mac_filter.bcast_accept_all = mask; |
3245 | break; | 3686 | break; |
3246 | case BNX2X_RX_MODE_ALLMULTI: | 3687 | case BNX2X_RX_MODE_ALLMULTI: |
3247 | tstorm_mac_filter.mcast_accept_all = 1; | 3688 | tstorm_mac_filter.mcast_accept_all = mask; |
3248 | tstorm_mac_filter.bcast_accept_all = 1; | 3689 | tstorm_mac_filter.bcast_accept_all = mask; |
3249 | break; | 3690 | break; |
3250 | case BNX2X_RX_MODE_PROMISC: | 3691 | case BNX2X_RX_MODE_PROMISC: |
3251 | tstorm_mac_filter.ucast_accept_all = 1; | 3692 | tstorm_mac_filter.ucast_accept_all = mask; |
3252 | tstorm_mac_filter.mcast_accept_all = 1; | 3693 | tstorm_mac_filter.mcast_accept_all = mask; |
3253 | tstorm_mac_filter.bcast_accept_all = 1; | 3694 | tstorm_mac_filter.bcast_accept_all = mask; |
3254 | break; | 3695 | break; |
3255 | default: | 3696 | default: |
3256 | BNX2X_ERR("bad rx mode (%d)\n", mode); | 3697 | BNX2X_ERR("BAD rx mode (%d)\n", mode); |
3698 | break; | ||
3257 | } | 3699 | } |
3258 | 3700 | ||
3259 | for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) { | 3701 | for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) { |
3260 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 3702 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
3261 | TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4, | 3703 | TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4, |
3262 | ((u32 *)&tstorm_mac_filter)[i]); | 3704 | ((u32 *)&tstorm_mac_filter)[i]); |
3263 | 3705 | ||
3264 | /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i, | 3706 | /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i, |
3265 | ((u32 *)&tstorm_mac_filter)[i]); */ | 3707 | ((u32 *)&tstorm_mac_filter)[i]); */ |
3266 | } | 3708 | } |
3267 | 3709 | ||
@@ -3271,26 +3713,30 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | |||
3271 | 3713 | ||
3272 | static void bnx2x_init_internal(struct bnx2x *bp) | 3714 | static void bnx2x_init_internal(struct bnx2x *bp) |
3273 | { | 3715 | { |
3274 | int port = bp->port; | ||
3275 | struct tstorm_eth_function_common_config tstorm_config = {0}; | 3716 | struct tstorm_eth_function_common_config tstorm_config = {0}; |
3276 | struct stats_indication_flags stats_flags = {0}; | 3717 | struct stats_indication_flags stats_flags = {0}; |
3718 | int port = BP_PORT(bp); | ||
3719 | int func = BP_FUNC(bp); | ||
3720 | int i; | ||
3277 | 3721 | ||
3278 | if (is_multi(bp)) { | 3722 | if (is_multi(bp)) { |
3279 | tstorm_config.config_flags = MULTI_FLAGS; | 3723 | tstorm_config.config_flags = MULTI_FLAGS; |
3280 | tstorm_config.rss_result_mask = MULTI_MASK; | 3724 | tstorm_config.rss_result_mask = MULTI_MASK; |
3281 | } | 3725 | } |
3282 | 3726 | ||
3727 | tstorm_config.leading_client_id = BP_L_ID(bp); | ||
3728 | |||
3283 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 3729 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
3284 | TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port), | 3730 | TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func), |
3285 | (*(u32 *)&tstorm_config)); | 3731 | (*(u32 *)&tstorm_config)); |
3286 | 3732 | ||
3287 | /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n", | 3733 | /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n", |
3288 | (*(u32 *)&tstorm_config)); */ | 3734 | (*(u32 *)&tstorm_config)); */ |
3289 | 3735 | ||
3290 | bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ | 3736 | bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ |
3291 | bnx2x_set_storm_rx_mode(bp); | 3737 | bnx2x_set_storm_rx_mode(bp); |
3292 | 3738 | ||
3293 | stats_flags.collect_eth = cpu_to_le32(1); | 3739 | stats_flags.collect_eth = 1; |
3294 | 3740 | ||
3295 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), | 3741 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), |
3296 | ((u32 *)&stats_flags)[0]); | 3742 | ((u32 *)&stats_flags)[0]); |
@@ -3307,8 +3753,28 @@ static void bnx2x_init_internal(struct bnx2x *bp) | |||
3307 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4, | 3753 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4, |
3308 | ((u32 *)&stats_flags)[1]); | 3754 | ((u32 *)&stats_flags)[1]); |
3309 | 3755 | ||
3310 | /* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n", | 3756 | /* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n", |
3311 | ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */ | 3757 | ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */ |
3758 | |||
3759 | if (CHIP_IS_E1H(bp)) { | ||
3760 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, | ||
3761 | IS_E1HMF(bp)); | ||
3762 | REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET, | ||
3763 | IS_E1HMF(bp)); | ||
3764 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET, | ||
3765 | IS_E1HMF(bp)); | ||
3766 | REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET, | ||
3767 | IS_E1HMF(bp)); | ||
3768 | |||
3769 | REG_WR16(bp, BAR_XSTRORM_INTMEM + | ||
3770 | XSTORM_E1HOV_OFFSET(func), bp->e1hov); | ||
3771 | } | ||
3772 | |||
3773 | /* Zero this manualy as its initialization is | ||
3774 | currently missing in the initTool */ | ||
3775 | for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++) | ||
3776 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
3777 | USTORM_AGG_DATA_OFFSET + 4*i, 0); | ||
3312 | } | 3778 | } |
3313 | 3779 | ||
3314 | static void bnx2x_nic_init(struct bnx2x *bp) | 3780 | static void bnx2x_nic_init(struct bnx2x *bp) |
@@ -3318,15 +3784,20 @@ static void bnx2x_nic_init(struct bnx2x *bp) | |||
3318 | for_each_queue(bp, i) { | 3784 | for_each_queue(bp, i) { |
3319 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 3785 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
3320 | 3786 | ||
3787 | fp->bp = bp; | ||
3321 | fp->state = BNX2X_FP_STATE_CLOSED; | 3788 | fp->state = BNX2X_FP_STATE_CLOSED; |
3322 | DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n", | ||
3323 | bp, fp->status_blk, i); | ||
3324 | fp->index = i; | 3789 | fp->index = i; |
3325 | bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i); | 3790 | fp->cl_id = BP_L_ID(bp) + i; |
3791 | fp->sb_id = fp->cl_id; | ||
3792 | DP(NETIF_MSG_IFUP, | ||
3793 | "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n", | ||
3794 | bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp)); | ||
3795 | bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk, | ||
3796 | fp->status_blk_mapping); | ||
3326 | } | 3797 | } |
3327 | 3798 | ||
3328 | bnx2x_init_def_sb(bp, bp->def_status_blk, | 3799 | bnx2x_init_def_sb(bp, bp->def_status_blk, |
3329 | bp->def_status_blk_mapping, 0x10); | 3800 | bp->def_status_blk_mapping, DEF_SB_ID); |
3330 | bnx2x_update_coalesce(bp); | 3801 | bnx2x_update_coalesce(bp); |
3331 | bnx2x_init_rx_rings(bp); | 3802 | bnx2x_init_rx_rings(bp); |
3332 | bnx2x_init_tx_ring(bp); | 3803 | bnx2x_init_tx_ring(bp); |
@@ -3336,7 +3807,6 @@ static void bnx2x_nic_init(struct bnx2x *bp) | |||
3336 | bnx2x_init_stats(bp); | 3807 | bnx2x_init_stats(bp); |
3337 | bnx2x_init_ind_table(bp); | 3808 | bnx2x_init_ind_table(bp); |
3338 | bnx2x_int_enable(bp); | 3809 | bnx2x_int_enable(bp); |
3339 | |||
3340 | } | 3810 | } |
3341 | 3811 | ||
3342 | /* end of nic init */ | 3812 | /* end of nic init */ |
@@ -3374,7 +3844,7 @@ gunzip_nomem2: | |||
3374 | 3844 | ||
3375 | gunzip_nomem1: | 3845 | gunzip_nomem1: |
3376 | printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for" | 3846 | printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for" |
3377 | " uncompression\n", bp->dev->name); | 3847 | " un-compression\n", bp->dev->name); |
3378 | return -ENOMEM; | 3848 | return -ENOMEM; |
3379 | } | 3849 | } |
3380 | 3850 | ||
@@ -3402,7 +3872,7 @@ static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len) | |||
3402 | 3872 | ||
3403 | n = 10; | 3873 | n = 10; |
3404 | 3874 | ||
3405 | #define FNAME 0x8 | 3875 | #define FNAME 0x8 |
3406 | 3876 | ||
3407 | if (zbuf[3] & FNAME) | 3877 | if (zbuf[3] & FNAME) |
3408 | while ((zbuf[n++] != 0) && (n < len)); | 3878 | while ((zbuf[n++] != 0) && (n < len)); |
@@ -3439,41 +3909,25 @@ static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len) | |||
3439 | /* nic load/unload */ | 3909 | /* nic load/unload */ |
3440 | 3910 | ||
3441 | /* | 3911 | /* |
3442 | * general service functions | 3912 | * General service functions |
3443 | */ | 3913 | */ |
3444 | 3914 | ||
3445 | /* send a NIG loopback debug packet */ | 3915 | /* send a NIG loopback debug packet */ |
3446 | static void bnx2x_lb_pckt(struct bnx2x *bp) | 3916 | static void bnx2x_lb_pckt(struct bnx2x *bp) |
3447 | { | 3917 | { |
3448 | #ifdef USE_DMAE | ||
3449 | u32 wb_write[3]; | 3918 | u32 wb_write[3]; |
3450 | #endif | ||
3451 | 3919 | ||
3452 | /* Ethernet source and destination addresses */ | 3920 | /* Ethernet source and destination addresses */ |
3453 | #ifdef USE_DMAE | ||
3454 | wb_write[0] = 0x55555555; | 3921 | wb_write[0] = 0x55555555; |
3455 | wb_write[1] = 0x55555555; | 3922 | wb_write[1] = 0x55555555; |
3456 | wb_write[2] = 0x20; /* SOP */ | 3923 | wb_write[2] = 0x20; /* SOP */ |
3457 | REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); | 3924 | REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); |
3458 | #else | ||
3459 | REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555); | ||
3460 | REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555); | ||
3461 | /* SOP */ | ||
3462 | REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20); | ||
3463 | #endif | ||
3464 | 3925 | ||
3465 | /* NON-IP protocol */ | 3926 | /* NON-IP protocol */ |
3466 | #ifdef USE_DMAE | ||
3467 | wb_write[0] = 0x09000000; | 3927 | wb_write[0] = 0x09000000; |
3468 | wb_write[1] = 0x55555555; | 3928 | wb_write[1] = 0x55555555; |
3469 | wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ | 3929 | wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ |
3470 | REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); | 3930 | REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); |
3471 | #else | ||
3472 | REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000); | ||
3473 | REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555); | ||
3474 | /* EOP, eop_bvalid = 0 */ | ||
3475 | REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10); | ||
3476 | #endif | ||
3477 | } | 3931 | } |
3478 | 3932 | ||
3479 | /* some of the internal memories | 3933 | /* some of the internal memories |
@@ -3511,13 +3965,9 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
3511 | /* Wait until NIG register shows 1 packet of size 0x10 */ | 3965 | /* Wait until NIG register shows 1 packet of size 0x10 */ |
3512 | count = 1000 * factor; | 3966 | count = 1000 * factor; |
3513 | while (count) { | 3967 | while (count) { |
3514 | #ifdef BNX2X_DMAE_RD | 3968 | |
3515 | bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); | 3969 | bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); |
3516 | val = *bnx2x_sp(bp, wb_data[0]); | 3970 | val = *bnx2x_sp(bp, wb_data[0]); |
3517 | #else | ||
3518 | val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET); | ||
3519 | REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4); | ||
3520 | #endif | ||
3521 | if (val == 0x10) | 3971 | if (val == 0x10) |
3522 | break; | 3972 | break; |
3523 | 3973 | ||
@@ -3533,7 +3983,6 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
3533 | count = 1000 * factor; | 3983 | count = 1000 * factor; |
3534 | while (count) { | 3984 | while (count) { |
3535 | val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); | 3985 | val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); |
3536 | |||
3537 | if (val == 1) | 3986 | if (val == 1) |
3538 | break; | 3987 | break; |
3539 | 3988 | ||
@@ -3546,9 +3995,9 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
3546 | } | 3995 | } |
3547 | 3996 | ||
3548 | /* Reset and init BRB, PRS */ | 3997 | /* Reset and init BRB, PRS */ |
3549 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3); | 3998 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); |
3550 | msleep(50); | 3999 | msleep(50); |
3551 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3); | 4000 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); |
3552 | msleep(50); | 4001 | msleep(50); |
3553 | bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END); | 4002 | bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END); |
3554 | bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); | 4003 | bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); |
@@ -3572,13 +4021,9 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
3572 | packets of size 11*0x10 = 0xb0 */ | 4021 | packets of size 11*0x10 = 0xb0 */ |
3573 | count = 1000 * factor; | 4022 | count = 1000 * factor; |
3574 | while (count) { | 4023 | while (count) { |
3575 | #ifdef BNX2X_DMAE_RD | 4024 | |
3576 | bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); | 4025 | bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); |
3577 | val = *bnx2x_sp(bp, wb_data[0]); | 4026 | val = *bnx2x_sp(bp, wb_data[0]); |
3578 | #else | ||
3579 | val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET); | ||
3580 | REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4); | ||
3581 | #endif | ||
3582 | if (val == 0xb0) | 4027 | if (val == 0xb0) |
3583 | break; | 4028 | break; |
3584 | 4029 | ||
@@ -3648,85 +4093,75 @@ static void enable_blocks_attention(struct bnx2x *bp) | |||
3648 | REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); | 4093 | REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); |
3649 | REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); | 4094 | REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); |
3650 | REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); | 4095 | REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); |
3651 | /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ | 4096 | /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ |
3652 | /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ | 4097 | /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ |
3653 | REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); | 4098 | REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); |
3654 | REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); | 4099 | REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); |
3655 | REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); | 4100 | REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); |
3656 | /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ | 4101 | /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ |
3657 | /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ | 4102 | /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ |
3658 | REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); | 4103 | REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); |
3659 | REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); | 4104 | REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); |
3660 | REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); | 4105 | REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); |
3661 | REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); | 4106 | REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); |
3662 | /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ | 4107 | /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ |
3663 | /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ | 4108 | /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ |
3664 | REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000); | 4109 | if (CHIP_REV_IS_FPGA(bp)) |
4110 | REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); | ||
4111 | else | ||
4112 | REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000); | ||
3665 | REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); | 4113 | REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); |
3666 | REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); | 4114 | REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); |
3667 | REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); | 4115 | REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); |
3668 | /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ | 4116 | /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ |
3669 | /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */ | 4117 | /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */ |
3670 | REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); | 4118 | REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); |
3671 | REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); | 4119 | REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); |
3672 | /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ | 4120 | /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ |
3673 | REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ | 4121 | REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ |
3674 | } | 4122 | } |
3675 | 4123 | ||
3676 | static int bnx2x_function_init(struct bnx2x *bp, int mode) | 4124 | |
4125 | static int bnx2x_init_common(struct bnx2x *bp) | ||
3677 | { | 4126 | { |
3678 | int func = bp->port; | ||
3679 | int port = func ? PORT1 : PORT0; | ||
3680 | u32 val, i; | 4127 | u32 val, i; |
3681 | #ifdef USE_DMAE | ||
3682 | u32 wb_write[2]; | ||
3683 | #endif | ||
3684 | |||
3685 | DP(BNX2X_MSG_MCP, "function is %d mode is %x\n", func, mode); | ||
3686 | if ((func != 0) && (func != 1)) { | ||
3687 | BNX2X_ERR("BAD function number (%d)\n", func); | ||
3688 | return -ENODEV; | ||
3689 | } | ||
3690 | 4128 | ||
3691 | bnx2x_gunzip_init(bp); | 4129 | DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); |
3692 | 4130 | ||
3693 | if (mode & 0x1) { /* init common */ | 4131 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); |
3694 | DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n", | 4132 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); |
3695 | func, mode); | ||
3696 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, | ||
3697 | 0xffffffff); | ||
3698 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, | ||
3699 | 0xfffc); | ||
3700 | bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END); | ||
3701 | 4133 | ||
3702 | REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100); | 4134 | bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END); |
3703 | msleep(30); | 4135 | if (CHIP_IS_E1H(bp)) |
3704 | REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0); | 4136 | REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp)); |
3705 | 4137 | ||
3706 | bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END); | 4138 | REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100); |
3707 | bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END); | 4139 | msleep(30); |
4140 | REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0); | ||
3708 | 4141 | ||
3709 | bnx2x_init_pxp(bp); | 4142 | bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END); |
4143 | if (CHIP_IS_E1(bp)) { | ||
4144 | /* enable HW interrupt from PXP on USDM overflow | ||
4145 | bit 16 on INT_MASK_0 */ | ||
4146 | REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); | ||
4147 | } | ||
3710 | 4148 | ||
3711 | if (CHIP_REV(bp) == CHIP_REV_Ax) { | 4149 | bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END); |
3712 | /* enable HW interrupt from PXP on USDM | 4150 | bnx2x_init_pxp(bp); |
3713 | overflow bit 16 on INT_MASK_0 */ | ||
3714 | REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); | ||
3715 | } | ||
3716 | 4151 | ||
3717 | #ifdef __BIG_ENDIAN | 4152 | #ifdef __BIG_ENDIAN |
3718 | REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); | 4153 | REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); |
3719 | REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); | 4154 | REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); |
3720 | REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); | 4155 | REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); |
3721 | REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); | 4156 | REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); |
3722 | REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); | 4157 | REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); |
3723 | REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1); | 4158 | REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1); |
3724 | 4159 | ||
3725 | /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ | 4160 | /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ |
3726 | REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); | 4161 | REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); |
3727 | REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); | 4162 | REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); |
3728 | REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); | 4163 | REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); |
3729 | REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); | 4164 | REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); |
3730 | #endif | 4165 | #endif |
3731 | 4166 | ||
3732 | #ifndef BCM_ISCSI | 4167 | #ifndef BCM_ISCSI |
@@ -3734,92 +4169,105 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
3734 | REG_WR(bp, PRS_REG_NIC_MODE, 1); | 4169 | REG_WR(bp, PRS_REG_NIC_MODE, 1); |
3735 | #endif | 4170 | #endif |
3736 | 4171 | ||
3737 | REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5); | 4172 | REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); |
3738 | #ifdef BCM_ISCSI | 4173 | #ifdef BCM_ISCSI |
3739 | REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); | 4174 | REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); |
3740 | REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5); | 4175 | REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5); |
3741 | REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5); | 4176 | REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5); |
3742 | #endif | 4177 | #endif |
3743 | 4178 | ||
3744 | bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END); | 4179 | if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) |
4180 | REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); | ||
3745 | 4181 | ||
3746 | /* let the HW do it's magic ... */ | 4182 | /* let the HW do it's magic ... */ |
3747 | msleep(100); | 4183 | msleep(100); |
3748 | /* finish PXP init | 4184 | /* finish PXP init */ |
3749 | (can be moved up if we want to use the DMAE) */ | 4185 | val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); |
3750 | val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); | 4186 | if (val != 1) { |
3751 | if (val != 1) { | 4187 | BNX2X_ERR("PXP2 CFG failed\n"); |
3752 | BNX2X_ERR("PXP2 CFG failed\n"); | 4188 | return -EBUSY; |
3753 | return -EBUSY; | 4189 | } |
3754 | } | 4190 | val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); |
4191 | if (val != 1) { | ||
4192 | BNX2X_ERR("PXP2 RD_INIT failed\n"); | ||
4193 | return -EBUSY; | ||
4194 | } | ||
3755 | 4195 | ||
3756 | val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); | 4196 | REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); |
3757 | if (val != 1) { | 4197 | REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); |
3758 | BNX2X_ERR("PXP2 RD_INIT failed\n"); | ||
3759 | return -EBUSY; | ||
3760 | } | ||
3761 | 4198 | ||
3762 | REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); | 4199 | bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END); |
3763 | REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); | ||
3764 | 4200 | ||
3765 | bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8); | 4201 | /* clean the DMAE memory */ |
4202 | bp->dmae_ready = 1; | ||
4203 | bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8); | ||
3766 | 4204 | ||
3767 | bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END); | 4205 | bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END); |
3768 | bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END); | 4206 | bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END); |
3769 | bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END); | 4207 | bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END); |
3770 | bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END); | 4208 | bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END); |
3771 | 4209 | ||
3772 | #ifdef BNX2X_DMAE_RD | 4210 | bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); |
3773 | bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); | 4211 | bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); |
3774 | bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); | 4212 | bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); |
3775 | bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); | 4213 | bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); |
3776 | bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); | 4214 | |
3777 | #else | 4215 | bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END); |
3778 | REG_RD(bp, XSEM_REG_PASSIVE_BUFFER); | 4216 | /* soft reset pulse */ |
3779 | REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4); | 4217 | REG_WR(bp, QM_REG_SOFT_RESET, 1); |
3780 | REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8); | 4218 | REG_WR(bp, QM_REG_SOFT_RESET, 0); |
3781 | REG_RD(bp, CSEM_REG_PASSIVE_BUFFER); | ||
3782 | REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4); | ||
3783 | REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8); | ||
3784 | REG_RD(bp, TSEM_REG_PASSIVE_BUFFER); | ||
3785 | REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4); | ||
3786 | REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8); | ||
3787 | REG_RD(bp, USEM_REG_PASSIVE_BUFFER); | ||
3788 | REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4); | ||
3789 | REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8); | ||
3790 | #endif | ||
3791 | bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END); | ||
3792 | /* soft reset pulse */ | ||
3793 | REG_WR(bp, QM_REG_SOFT_RESET, 1); | ||
3794 | REG_WR(bp, QM_REG_SOFT_RESET, 0); | ||
3795 | 4219 | ||
3796 | #ifdef BCM_ISCSI | 4220 | #ifdef BCM_ISCSI |
3797 | bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END); | 4221 | bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END); |
3798 | #endif | 4222 | #endif |
3799 | bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END); | ||
3800 | REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS); | ||
3801 | if (CHIP_REV(bp) == CHIP_REV_Ax) { | ||
3802 | /* enable hw interrupt from doorbell Q */ | ||
3803 | REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); | ||
3804 | } | ||
3805 | 4223 | ||
3806 | bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END); | 4224 | bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END); |
4225 | REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT); | ||
4226 | if (!CHIP_REV_IS_SLOW(bp)) { | ||
4227 | /* enable hw interrupt from doorbell Q */ | ||
4228 | REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); | ||
4229 | } | ||
3807 | 4230 | ||
3808 | if (CHIP_REV_IS_SLOW(bp)) { | 4231 | bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END); |
3809 | /* fix for emulation and FPGA for no pause */ | 4232 | if (CHIP_REV_IS_SLOW(bp)) { |
3810 | REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513); | 4233 | /* fix for emulation and FPGA for no pause */ |
3811 | REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513); | 4234 | REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513); |
3812 | REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0); | 4235 | REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513); |
3813 | REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0); | 4236 | REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0); |
3814 | } | 4237 | REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0); |
4238 | } | ||
3815 | 4239 | ||
3816 | bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); | 4240 | bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); |
4241 | if (CHIP_IS_E1H(bp)) | ||
4242 | REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); | ||
3817 | 4243 | ||
3818 | bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END); | 4244 | bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END); |
3819 | bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END); | 4245 | bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END); |
3820 | bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END); | 4246 | bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END); |
3821 | bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END); | 4247 | bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END); |
3822 | 4248 | ||
4249 | if (CHIP_IS_E1H(bp)) { | ||
4250 | bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, | ||
4251 | STORM_INTMEM_SIZE_E1H/2); | ||
4252 | bnx2x_init_fill(bp, | ||
4253 | TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2, | ||
4254 | 0, STORM_INTMEM_SIZE_E1H/2); | ||
4255 | bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, | ||
4256 | STORM_INTMEM_SIZE_E1H/2); | ||
4257 | bnx2x_init_fill(bp, | ||
4258 | CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2, | ||
4259 | 0, STORM_INTMEM_SIZE_E1H/2); | ||
4260 | bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, | ||
4261 | STORM_INTMEM_SIZE_E1H/2); | ||
4262 | bnx2x_init_fill(bp, | ||
4263 | XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2, | ||
4264 | 0, STORM_INTMEM_SIZE_E1H/2); | ||
4265 | bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, | ||
4266 | STORM_INTMEM_SIZE_E1H/2); | ||
4267 | bnx2x_init_fill(bp, | ||
4268 | USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2, | ||
4269 | 0, STORM_INTMEM_SIZE_E1H/2); | ||
4270 | } else { /* E1 */ | ||
3823 | bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, | 4271 | bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, |
3824 | STORM_INTMEM_SIZE_E1); | 4272 | STORM_INTMEM_SIZE_E1); |
3825 | bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, | 4273 | bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, |
@@ -3828,157 +4276,141 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
3828 | STORM_INTMEM_SIZE_E1); | 4276 | STORM_INTMEM_SIZE_E1); |
3829 | bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, | 4277 | bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, |
3830 | STORM_INTMEM_SIZE_E1); | 4278 | STORM_INTMEM_SIZE_E1); |
4279 | } | ||
3831 | 4280 | ||
3832 | bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END); | 4281 | bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END); |
3833 | bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END); | 4282 | bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END); |
3834 | bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END); | 4283 | bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END); |
3835 | bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END); | 4284 | bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END); |
3836 | |||
3837 | /* sync semi rtc */ | ||
3838 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | ||
3839 | 0x80000000); | ||
3840 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, | ||
3841 | 0x80000000); | ||
3842 | |||
3843 | bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END); | ||
3844 | bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END); | ||
3845 | bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END); | ||
3846 | |||
3847 | REG_WR(bp, SRC_REG_SOFT_RST, 1); | ||
3848 | for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) { | ||
3849 | REG_WR(bp, i, 0xc0cac01a); | ||
3850 | /* TODO: replace with something meaningful */ | ||
3851 | } | ||
3852 | /* SRCH COMMON comes here */ | ||
3853 | REG_WR(bp, SRC_REG_SOFT_RST, 0); | ||
3854 | |||
3855 | if (sizeof(union cdu_context) != 1024) { | ||
3856 | /* we currently assume that a context is 1024 bytes */ | ||
3857 | printk(KERN_ALERT PFX "please adjust the size of" | ||
3858 | " cdu_context(%ld)\n", | ||
3859 | (long)sizeof(union cdu_context)); | ||
3860 | } | ||
3861 | val = (4 << 24) + (0 << 12) + 1024; | ||
3862 | REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); | ||
3863 | bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END); | ||
3864 | |||
3865 | bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END); | ||
3866 | REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); | ||
3867 | |||
3868 | bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END); | ||
3869 | bnx2x_init_block(bp, MISC_AEU_COMMON_START, | ||
3870 | MISC_AEU_COMMON_END); | ||
3871 | /* RXPCS COMMON comes here */ | ||
3872 | /* EMAC0 COMMON comes here */ | ||
3873 | /* EMAC1 COMMON comes here */ | ||
3874 | /* DBU COMMON comes here */ | ||
3875 | /* DBG COMMON comes here */ | ||
3876 | bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END); | ||
3877 | 4285 | ||
3878 | if (CHIP_REV_IS_SLOW(bp)) | 4286 | /* sync semi rtc */ |
3879 | msleep(200); | 4287 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, |
4288 | 0x80000000); | ||
4289 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, | ||
4290 | 0x80000000); | ||
3880 | 4291 | ||
3881 | /* finish CFC init */ | 4292 | bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END); |
3882 | val = REG_RD(bp, CFC_REG_LL_INIT_DONE); | 4293 | bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END); |
3883 | if (val != 1) { | 4294 | bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END); |
3884 | BNX2X_ERR("CFC LL_INIT failed\n"); | ||
3885 | return -EBUSY; | ||
3886 | } | ||
3887 | 4295 | ||
3888 | val = REG_RD(bp, CFC_REG_AC_INIT_DONE); | 4296 | REG_WR(bp, SRC_REG_SOFT_RST, 1); |
3889 | if (val != 1) { | 4297 | for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) { |
3890 | BNX2X_ERR("CFC AC_INIT failed\n"); | 4298 | REG_WR(bp, i, 0xc0cac01a); |
3891 | return -EBUSY; | 4299 | /* TODO: replace with something meaningful */ |
3892 | } | 4300 | } |
4301 | if (CHIP_IS_E1H(bp)) | ||
4302 | bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END); | ||
4303 | REG_WR(bp, SRC_REG_SOFT_RST, 0); | ||
3893 | 4304 | ||
3894 | val = REG_RD(bp, CFC_REG_CAM_INIT_DONE); | 4305 | if (sizeof(union cdu_context) != 1024) |
3895 | if (val != 1) { | 4306 | /* we currently assume that a context is 1024 bytes */ |
3896 | BNX2X_ERR("CFC CAM_INIT failed\n"); | 4307 | printk(KERN_ALERT PFX "please adjust the size of" |
3897 | return -EBUSY; | 4308 | " cdu_context(%ld)\n", (long)sizeof(union cdu_context)); |
3898 | } | ||
3899 | 4309 | ||
3900 | REG_WR(bp, CFC_REG_DEBUG0, 0); | 4310 | bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END); |
4311 | val = (4 << 24) + (0 << 12) + 1024; | ||
4312 | REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); | ||
4313 | if (CHIP_IS_E1(bp)) { | ||
4314 | /* !!! fix pxp client crdit until excel update */ | ||
4315 | REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264); | ||
4316 | REG_WR(bp, CDU_REG_CDU_DEBUG, 0); | ||
4317 | } | ||
3901 | 4318 | ||
3902 | /* read NIG statistic | 4319 | bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END); |
3903 | to see if this is our first up since powerup */ | 4320 | REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); |
3904 | #ifdef BNX2X_DMAE_RD | ||
3905 | bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); | ||
3906 | val = *bnx2x_sp(bp, wb_data[0]); | ||
3907 | #else | ||
3908 | val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET); | ||
3909 | REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4); | ||
3910 | #endif | ||
3911 | /* do internal memory self test */ | ||
3912 | if ((val == 0) && bnx2x_int_mem_test(bp)) { | ||
3913 | BNX2X_ERR("internal mem selftest failed\n"); | ||
3914 | return -EBUSY; | ||
3915 | } | ||
3916 | 4321 | ||
3917 | /* clear PXP2 attentions */ | 4322 | bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END); |
3918 | REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR); | 4323 | bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END); |
3919 | 4324 | ||
3920 | enable_blocks_attention(bp); | 4325 | /* PXPCS COMMON comes here */ |
3921 | /* enable_blocks_parity(bp); */ | 4326 | /* Reset PCIE errors for debug */ |
4327 | REG_WR(bp, 0x2814, 0xffffffff); | ||
4328 | REG_WR(bp, 0x3820, 0xffffffff); | ||
3922 | 4329 | ||
3923 | switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | 4330 | /* EMAC0 COMMON comes here */ |
3924 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | 4331 | /* EMAC1 COMMON comes here */ |
3925 | /* Fan failure is indicated by SPIO 5 */ | 4332 | /* DBU COMMON comes here */ |
3926 | bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, | 4333 | /* DBG COMMON comes here */ |
3927 | MISC_REGISTERS_SPIO_INPUT_HI_Z); | ||
3928 | 4334 | ||
3929 | /* set to active low mode */ | 4335 | bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END); |
3930 | val = REG_RD(bp, MISC_REG_SPIO_INT); | 4336 | if (CHIP_IS_E1H(bp)) { |
3931 | val |= ((1 << MISC_REGISTERS_SPIO_5) << | 4337 | REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp)); |
4338 | REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp)); | ||
4339 | } | ||
4340 | |||
4341 | if (CHIP_REV_IS_SLOW(bp)) | ||
4342 | msleep(200); | ||
4343 | |||
4344 | /* finish CFC init */ | ||
4345 | val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); | ||
4346 | if (val != 1) { | ||
4347 | BNX2X_ERR("CFC LL_INIT failed\n"); | ||
4348 | return -EBUSY; | ||
4349 | } | ||
4350 | val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); | ||
4351 | if (val != 1) { | ||
4352 | BNX2X_ERR("CFC AC_INIT failed\n"); | ||
4353 | return -EBUSY; | ||
4354 | } | ||
4355 | val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); | ||
4356 | if (val != 1) { | ||
4357 | BNX2X_ERR("CFC CAM_INIT failed\n"); | ||
4358 | return -EBUSY; | ||
4359 | } | ||
4360 | REG_WR(bp, CFC_REG_DEBUG0, 0); | ||
4361 | |||
4362 | /* read NIG statistic | ||
4363 | to see if this is our first up since powerup */ | ||
4364 | bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); | ||
4365 | val = *bnx2x_sp(bp, wb_data[0]); | ||
4366 | |||
4367 | /* do internal memory self test */ | ||
4368 | if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) { | ||
4369 | BNX2X_ERR("internal mem self test failed\n"); | ||
4370 | return -EBUSY; | ||
4371 | } | ||
4372 | |||
4373 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | ||
4374 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | ||
4375 | /* Fan failure is indicated by SPIO 5 */ | ||
4376 | bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, | ||
4377 | MISC_REGISTERS_SPIO_INPUT_HI_Z); | ||
4378 | |||
4379 | /* set to active low mode */ | ||
4380 | val = REG_RD(bp, MISC_REG_SPIO_INT); | ||
4381 | val |= ((1 << MISC_REGISTERS_SPIO_5) << | ||
3932 | MISC_REGISTERS_SPIO_INT_OLD_SET_POS); | 4382 | MISC_REGISTERS_SPIO_INT_OLD_SET_POS); |
3933 | REG_WR(bp, MISC_REG_SPIO_INT, val); | 4383 | REG_WR(bp, MISC_REG_SPIO_INT, val); |
3934 | 4384 | ||
3935 | /* enable interrupt to signal the IGU */ | 4385 | /* enable interrupt to signal the IGU */ |
3936 | val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); | 4386 | val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); |
3937 | val |= (1 << MISC_REGISTERS_SPIO_5); | 4387 | val |= (1 << MISC_REGISTERS_SPIO_5); |
3938 | REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); | 4388 | REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); |
3939 | break; | 4389 | break; |
3940 | 4390 | ||
3941 | default: | 4391 | default: |
3942 | break; | 4392 | break; |
3943 | } | 4393 | } |
3944 | 4394 | ||
3945 | } /* end of common init */ | 4395 | /* clear PXP2 attentions */ |
4396 | REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); | ||
3946 | 4397 | ||
3947 | /* per port init */ | 4398 | enable_blocks_attention(bp); |
3948 | 4399 | ||
3949 | /* the phys address is shifted right 12 bits and has an added | 4400 | return 0; |
3950 | 1=valid bit added to the 53rd bit | 4401 | } |
3951 | then since this is a wide register(TM) | ||
3952 | we split it into two 32 bit writes | ||
3953 | */ | ||
3954 | #define RQ_ONCHIP_AT_PORT_SIZE 384 | ||
3955 | #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) | ||
3956 | #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) | ||
3957 | #define PXP_ONE_ILT(x) ((x << 10) | x) | ||
3958 | 4402 | ||
3959 | DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func); | 4403 | static int bnx2x_init_port(struct bnx2x *bp) |
4404 | { | ||
4405 | int port = BP_PORT(bp); | ||
4406 | u32 val; | ||
3960 | 4407 | ||
3961 | REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0); | 4408 | DP(BNX2X_MSG_MCP, "starting port init port %x\n", port); |
4409 | |||
4410 | REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); | ||
3962 | 4411 | ||
3963 | /* Port PXP comes here */ | 4412 | /* Port PXP comes here */ |
3964 | /* Port PXP2 comes here */ | 4413 | /* Port PXP2 comes here */ |
3965 | |||
3966 | /* Offset is | ||
3967 | * Port0 0 | ||
3968 | * Port1 384 */ | ||
3969 | i = func * RQ_ONCHIP_AT_PORT_SIZE; | ||
3970 | #ifdef USE_DMAE | ||
3971 | wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)); | ||
3972 | wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)); | ||
3973 | REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2); | ||
3974 | #else | ||
3975 | REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, | ||
3976 | ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context))); | ||
3977 | REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4, | ||
3978 | ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context))); | ||
3979 | #endif | ||
3980 | REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i)); | ||
3981 | |||
3982 | #ifdef BCM_ISCSI | 4414 | #ifdef BCM_ISCSI |
3983 | /* Port0 1 | 4415 | /* Port0 1 |
3984 | * Port1 385 */ | 4416 | * Port1 385 */ |
@@ -4004,30 +4436,9 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
4004 | REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2); | 4436 | REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2); |
4005 | REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i)); | 4437 | REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i)); |
4006 | #endif | 4438 | #endif |
4007 | 4439 | /* Port CMs come here */ | |
4008 | /* Port TCM comes here */ | ||
4009 | /* Port UCM comes here */ | ||
4010 | /* Port CCM comes here */ | ||
4011 | bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START, | ||
4012 | func ? XCM_PORT1_END : XCM_PORT0_END); | ||
4013 | |||
4014 | #ifdef USE_DMAE | ||
4015 | wb_write[0] = 0; | ||
4016 | wb_write[1] = 0; | ||
4017 | #endif | ||
4018 | for (i = 0; i < 32; i++) { | ||
4019 | REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i); | ||
4020 | #ifdef USE_DMAE | ||
4021 | REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2); | ||
4022 | #else | ||
4023 | REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0); | ||
4024 | REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0); | ||
4025 | #endif | ||
4026 | } | ||
4027 | REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1); | ||
4028 | 4440 | ||
4029 | /* Port QM comes here */ | 4441 | /* Port QM comes here */ |
4030 | |||
4031 | #ifdef BCM_ISCSI | 4442 | #ifdef BCM_ISCSI |
4032 | REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20); | 4443 | REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20); |
4033 | REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31); | 4444 | REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31); |
@@ -4042,31 +4453,32 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
4042 | /* Port CSDM comes here */ | 4453 | /* Port CSDM comes here */ |
4043 | /* Port USDM comes here */ | 4454 | /* Port USDM comes here */ |
4044 | /* Port XSDM comes here */ | 4455 | /* Port XSDM comes here */ |
4045 | bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START, | 4456 | bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START, |
4046 | func ? TSEM_PORT1_END : TSEM_PORT0_END); | 4457 | port ? TSEM_PORT1_END : TSEM_PORT0_END); |
4047 | bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START, | 4458 | bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START, |
4048 | func ? USEM_PORT1_END : USEM_PORT0_END); | 4459 | port ? USEM_PORT1_END : USEM_PORT0_END); |
4049 | bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START, | 4460 | bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START, |
4050 | func ? CSEM_PORT1_END : CSEM_PORT0_END); | 4461 | port ? CSEM_PORT1_END : CSEM_PORT0_END); |
4051 | bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START, | 4462 | bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START, |
4052 | func ? XSEM_PORT1_END : XSEM_PORT0_END); | 4463 | port ? XSEM_PORT1_END : XSEM_PORT0_END); |
4053 | /* Port UPB comes here */ | 4464 | /* Port UPB comes here */ |
4054 | /* Port XSDM comes here */ | 4465 | /* Port XPB comes here */ |
4055 | bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START, | 4466 | |
4056 | func ? PBF_PORT1_END : PBF_PORT0_END); | 4467 | bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START, |
4468 | port ? PBF_PORT1_END : PBF_PORT0_END); | ||
4057 | 4469 | ||
4058 | /* configure PBF to work without PAUSE mtu 9000 */ | 4470 | /* configure PBF to work without PAUSE mtu 9000 */ |
4059 | REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0); | 4471 | REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); |
4060 | 4472 | ||
4061 | /* update threshold */ | 4473 | /* update threshold */ |
4062 | REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16)); | 4474 | REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); |
4063 | /* update init credit */ | 4475 | /* update init credit */ |
4064 | REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22); | 4476 | REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); |
4065 | 4477 | ||
4066 | /* probe changes */ | 4478 | /* probe changes */ |
4067 | REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1); | 4479 | REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); |
4068 | msleep(5); | 4480 | msleep(5); |
4069 | REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0); | 4481 | REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); |
4070 | 4482 | ||
4071 | #ifdef BCM_ISCSI | 4483 | #ifdef BCM_ISCSI |
4072 | /* tell the searcher where the T2 table is */ | 4484 | /* tell the searcher where the T2 table is */ |
@@ -4084,23 +4496,57 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
4084 | #endif | 4496 | #endif |
4085 | /* Port CDU comes here */ | 4497 | /* Port CDU comes here */ |
4086 | /* Port CFC comes here */ | 4498 | /* Port CFC comes here */ |
4087 | bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START, | 4499 | |
4088 | func ? HC_PORT1_END : HC_PORT0_END); | 4500 | if (CHIP_IS_E1(bp)) { |
4089 | bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START : | 4501 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); |
4502 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); | ||
4503 | } | ||
4504 | bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START, | ||
4505 | port ? HC_PORT1_END : HC_PORT0_END); | ||
4506 | |||
4507 | bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START : | ||
4090 | MISC_AEU_PORT0_START, | 4508 | MISC_AEU_PORT0_START, |
4091 | func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END); | 4509 | port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END); |
4510 | /* init aeu_mask_attn_func_0/1: | ||
4511 | * - SF mode: bits 3-7 are masked. only bits 0-2 are in use | ||
4512 | * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF | ||
4513 | * bits 4-7 are used for "per vn group attention" */ | ||
4514 | REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, | ||
4515 | (IS_E1HMF(bp) ? 0xF7 : 0x7)); | ||
4516 | |||
4092 | /* Port PXPCS comes here */ | 4517 | /* Port PXPCS comes here */ |
4093 | /* Port EMAC0 comes here */ | 4518 | /* Port EMAC0 comes here */ |
4094 | /* Port EMAC1 comes here */ | 4519 | /* Port EMAC1 comes here */ |
4095 | /* Port DBU comes here */ | 4520 | /* Port DBU comes here */ |
4096 | /* Port DBG comes here */ | 4521 | /* Port DBG comes here */ |
4097 | bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START, | 4522 | bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START, |
4098 | func ? NIG_PORT1_END : NIG_PORT0_END); | 4523 | port ? NIG_PORT1_END : NIG_PORT0_END); |
4099 | REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1); | 4524 | |
4525 | REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); | ||
4526 | |||
4527 | if (CHIP_IS_E1H(bp)) { | ||
4528 | u32 wsum; | ||
4529 | struct cmng_struct_per_port m_cmng_port; | ||
4530 | int vn; | ||
4531 | |||
4532 | /* 0x2 disable e1hov, 0x1 enable */ | ||
4533 | REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, | ||
4534 | (IS_E1HMF(bp) ? 0x1 : 0x2)); | ||
4535 | |||
4536 | /* Init RATE SHAPING and FAIRNESS contexts. | ||
4537 | Initialize as if there is 10G link. */ | ||
4538 | wsum = bnx2x_calc_vn_wsum(bp); | ||
4539 | bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port); | ||
4540 | if (IS_E1HMF(bp)) | ||
4541 | for (vn = VN_0; vn < E1HVN_MAX; vn++) | ||
4542 | bnx2x_init_vn_minmax(bp, 2*vn + port, | ||
4543 | wsum, 10000, &m_cmng_port); | ||
4544 | } | ||
4545 | |||
4100 | /* Port MCP comes here */ | 4546 | /* Port MCP comes here */ |
4101 | /* Port DMAE comes here */ | 4547 | /* Port DMAE comes here */ |
4102 | 4548 | ||
4103 | switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | 4549 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { |
4104 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | 4550 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: |
4105 | /* add SPIO 5 to group 0 */ | 4551 | /* add SPIO 5 to group 0 */ |
4106 | val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | 4552 | val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
@@ -4114,48 +4560,150 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
4114 | 4560 | ||
4115 | bnx2x__link_reset(bp); | 4561 | bnx2x__link_reset(bp); |
4116 | 4562 | ||
4563 | return 0; | ||
4564 | } | ||
4565 | |||
4566 | #define ILT_PER_FUNC (768/2) | ||
4567 | #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC) | ||
4568 | /* the phys address is shifted right 12 bits and has an added | ||
4569 | 1=valid bit added to the 53rd bit | ||
4570 | then since this is a wide register(TM) | ||
4571 | we split it into two 32 bit writes | ||
4572 | */ | ||
4573 | #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) | ||
4574 | #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) | ||
4575 | #define PXP_ONE_ILT(x) (((x) << 10) | x) | ||
4576 | #define PXP_ILT_RANGE(f, l) (((l) << 10) | f) | ||
4577 | |||
4578 | #define CNIC_ILT_LINES 0 | ||
4579 | |||
4580 | static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) | ||
4581 | { | ||
4582 | int reg; | ||
4583 | |||
4584 | if (CHIP_IS_E1H(bp)) | ||
4585 | reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; | ||
4586 | else /* E1 */ | ||
4587 | reg = PXP2_REG_RQ_ONCHIP_AT + index*8; | ||
4588 | |||
4589 | bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); | ||
4590 | } | ||
4591 | |||
4592 | static int bnx2x_init_func(struct bnx2x *bp) | ||
4593 | { | ||
4594 | int port = BP_PORT(bp); | ||
4595 | int func = BP_FUNC(bp); | ||
4596 | int i; | ||
4597 | |||
4598 | DP(BNX2X_MSG_MCP, "starting func init func %x\n", func); | ||
4599 | |||
4600 | i = FUNC_ILT_BASE(func); | ||
4601 | |||
4602 | bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context)); | ||
4603 | if (CHIP_IS_E1H(bp)) { | ||
4604 | REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i); | ||
4605 | REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES); | ||
4606 | } else /* E1 */ | ||
4607 | REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, | ||
4608 | PXP_ILT_RANGE(i, i + CNIC_ILT_LINES)); | ||
4609 | |||
4610 | |||
4611 | if (CHIP_IS_E1H(bp)) { | ||
4612 | for (i = 0; i < 9; i++) | ||
4613 | bnx2x_init_block(bp, | ||
4614 | cm_start[func][i], cm_end[func][i]); | ||
4615 | |||
4616 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); | ||
4617 | REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov); | ||
4618 | } | ||
4619 | |||
4620 | /* HC init per function */ | ||
4621 | if (CHIP_IS_E1H(bp)) { | ||
4622 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); | ||
4623 | |||
4624 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); | ||
4625 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); | ||
4626 | } | ||
4627 | bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]); | ||
4628 | |||
4629 | if (CHIP_IS_E1H(bp)) | ||
4630 | REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func); | ||
4631 | |||
4117 | /* Reset PCIE errors for debug */ | 4632 | /* Reset PCIE errors for debug */ |
4118 | REG_WR(bp, 0x2114, 0xffffffff); | 4633 | REG_WR(bp, 0x2114, 0xffffffff); |
4119 | REG_WR(bp, 0x2120, 0xffffffff); | 4634 | REG_WR(bp, 0x2120, 0xffffffff); |
4120 | REG_WR(bp, 0x2814, 0xffffffff); | ||
4121 | 4635 | ||
4122 | /* !!! move to init_values.h */ | 4636 | return 0; |
4123 | REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1); | 4637 | } |
4124 | REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1); | 4638 | |
4125 | REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1); | 4639 | static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) |
4126 | REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1); | 4640 | { |
4641 | int i, rc = 0; | ||
4127 | 4642 | ||
4128 | REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1); | 4643 | DP(BNX2X_MSG_MCP, "function %d load_code %x\n", |
4129 | REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1); | 4644 | BP_FUNC(bp), load_code); |
4130 | REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264); | ||
4131 | REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0); | ||
4132 | 4645 | ||
4133 | bnx2x_gunzip_end(bp); | 4646 | bp->dmae_ready = 0; |
4647 | mutex_init(&bp->dmae_mutex); | ||
4648 | bnx2x_gunzip_init(bp); | ||
4134 | 4649 | ||
4135 | if (!nomcp) { | 4650 | switch (load_code) { |
4136 | port = bp->port; | 4651 | case FW_MSG_CODE_DRV_LOAD_COMMON: |
4652 | rc = bnx2x_init_common(bp); | ||
4653 | if (rc) | ||
4654 | goto init_hw_err; | ||
4655 | /* no break */ | ||
4656 | |||
4657 | case FW_MSG_CODE_DRV_LOAD_PORT: | ||
4658 | bp->dmae_ready = 1; | ||
4659 | rc = bnx2x_init_port(bp); | ||
4660 | if (rc) | ||
4661 | goto init_hw_err; | ||
4662 | /* no break */ | ||
4663 | |||
4664 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: | ||
4665 | bp->dmae_ready = 1; | ||
4666 | rc = bnx2x_init_func(bp); | ||
4667 | if (rc) | ||
4668 | goto init_hw_err; | ||
4669 | break; | ||
4670 | |||
4671 | default: | ||
4672 | BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); | ||
4673 | break; | ||
4674 | } | ||
4675 | |||
4676 | if (!BP_NOMCP(bp)) { | ||
4677 | int func = BP_FUNC(bp); | ||
4137 | 4678 | ||
4138 | bp->fw_drv_pulse_wr_seq = | 4679 | bp->fw_drv_pulse_wr_seq = |
4139 | (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) & | 4680 | (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) & |
4140 | DRV_PULSE_SEQ_MASK); | 4681 | DRV_PULSE_SEQ_MASK); |
4141 | bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param); | 4682 | bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); |
4142 | DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n", | 4683 | DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n", |
4143 | bp->fw_drv_pulse_wr_seq, bp->fw_mb); | 4684 | bp->fw_drv_pulse_wr_seq, bp->func_stx); |
4144 | } else { | 4685 | } else |
4145 | bp->fw_mb = 0; | 4686 | bp->func_stx = 0; |
4146 | } | ||
4147 | 4687 | ||
4148 | return 0; | 4688 | /* this needs to be done before gunzip end */ |
4689 | bnx2x_zero_def_sb(bp); | ||
4690 | for_each_queue(bp, i) | ||
4691 | bnx2x_zero_sb(bp, BP_L_ID(bp) + i); | ||
4692 | |||
4693 | init_hw_err: | ||
4694 | bnx2x_gunzip_end(bp); | ||
4695 | |||
4696 | return rc; | ||
4149 | } | 4697 | } |
4150 | 4698 | ||
4151 | /* send the MCP a request, block until there is a reply */ | 4699 | /* send the MCP a request, block until there is a reply */ |
4152 | static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) | 4700 | static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) |
4153 | { | 4701 | { |
4154 | int port = bp->port; | 4702 | int func = BP_FUNC(bp); |
4155 | u32 seq = ++bp->fw_seq; | 4703 | u32 seq = ++bp->fw_seq; |
4156 | u32 rc = 0; | 4704 | u32 rc = 0; |
4157 | 4705 | ||
4158 | SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq)); | 4706 | SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); |
4159 | DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); | 4707 | DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); |
4160 | 4708 | ||
4161 | /* let the FW do it's magic ... */ | 4709 | /* let the FW do it's magic ... */ |
@@ -4164,7 +4712,7 @@ static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) | |||
4164 | if (CHIP_REV_IS_SLOW(bp)) | 4712 | if (CHIP_REV_IS_SLOW(bp)) |
4165 | msleep(900); | 4713 | msleep(900); |
4166 | 4714 | ||
4167 | rc = SHMEM_RD(bp, func_mb[port].fw_mb_header); | 4715 | rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); |
4168 | DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq); | 4716 | DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq); |
4169 | 4717 | ||
4170 | /* is this a reply to our command? */ | 4718 | /* is this a reply to our command? */ |
@@ -4229,15 +4777,13 @@ static void bnx2x_free_mem(struct bnx2x *bp) | |||
4229 | NUM_RCQ_BD); | 4777 | NUM_RCQ_BD); |
4230 | } | 4778 | } |
4231 | 4779 | ||
4232 | BNX2X_FREE(bp->fp); | ||
4233 | |||
4234 | /* end of fastpath */ | 4780 | /* end of fastpath */ |
4235 | 4781 | ||
4236 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, | 4782 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, |
4237 | (sizeof(struct host_def_status_block))); | 4783 | sizeof(struct host_def_status_block)); |
4238 | 4784 | ||
4239 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, | 4785 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, |
4240 | (sizeof(struct bnx2x_slowpath))); | 4786 | sizeof(struct bnx2x_slowpath)); |
4241 | 4787 | ||
4242 | #ifdef BCM_ISCSI | 4788 | #ifdef BCM_ISCSI |
4243 | BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024); | 4789 | BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024); |
@@ -4273,8 +4819,6 @@ static int bnx2x_alloc_mem(struct bnx2x *bp) | |||
4273 | int i; | 4819 | int i; |
4274 | 4820 | ||
4275 | /* fastpath */ | 4821 | /* fastpath */ |
4276 | BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues); | ||
4277 | |||
4278 | for_each_queue(bp, i) { | 4822 | for_each_queue(bp, i) { |
4279 | bnx2x_fp(bp, i, bp) = bp; | 4823 | bnx2x_fp(bp, i, bp) = bp; |
4280 | 4824 | ||
@@ -4370,8 +4914,6 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp) | |||
4370 | u16 sw_prod = fp->tx_pkt_prod; | 4914 | u16 sw_prod = fp->tx_pkt_prod; |
4371 | u16 sw_cons = fp->tx_pkt_cons; | 4915 | u16 sw_cons = fp->tx_pkt_cons; |
4372 | 4916 | ||
4373 | BUG_TRAP(fp->tx_buf_ring != NULL); | ||
4374 | |||
4375 | while (sw_cons != sw_prod) { | 4917 | while (sw_cons != sw_prod) { |
4376 | bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons)); | 4918 | bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons)); |
4377 | sw_cons++; | 4919 | sw_cons++; |
@@ -4386,8 +4928,6 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
4386 | for_each_queue(bp, j) { | 4928 | for_each_queue(bp, j) { |
4387 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 4929 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
4388 | 4930 | ||
4389 | BUG_TRAP(fp->rx_buf_ring != NULL); | ||
4390 | |||
4391 | for (i = 0; i < NUM_RX_BD; i++) { | 4931 | for (i = 0; i < NUM_RX_BD; i++) { |
4392 | struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; | 4932 | struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; |
4393 | struct sk_buff *skb = rx_buf->skb; | 4933 | struct sk_buff *skb = rx_buf->skb; |
@@ -4414,7 +4954,7 @@ static void bnx2x_free_skbs(struct bnx2x *bp) | |||
4414 | 4954 | ||
4415 | static void bnx2x_free_msix_irqs(struct bnx2x *bp) | 4955 | static void bnx2x_free_msix_irqs(struct bnx2x *bp) |
4416 | { | 4956 | { |
4417 | int i; | 4957 | int i, offset = 1; |
4418 | 4958 | ||
4419 | free_irq(bp->msix_table[0].vector, bp->dev); | 4959 | free_irq(bp->msix_table[0].vector, bp->dev); |
4420 | DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", | 4960 | DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", |
@@ -4422,26 +4962,22 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp) | |||
4422 | 4962 | ||
4423 | for_each_queue(bp, i) { | 4963 | for_each_queue(bp, i) { |
4424 | DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " | 4964 | DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " |
4425 | "state(%x)\n", i, bp->msix_table[i + 1].vector, | 4965 | "state %x\n", i, bp->msix_table[i + offset].vector, |
4426 | bnx2x_fp(bp, i, state)); | 4966 | bnx2x_fp(bp, i, state)); |
4427 | 4967 | ||
4428 | if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) | 4968 | if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) |
4429 | BNX2X_ERR("IRQ of fp #%d being freed while " | 4969 | BNX2X_ERR("IRQ of fp #%d being freed while " |
4430 | "state != closed\n", i); | 4970 | "state != closed\n", i); |
4431 | 4971 | ||
4432 | free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]); | 4972 | free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]); |
4433 | } | 4973 | } |
4434 | |||
4435 | } | 4974 | } |
4436 | 4975 | ||
4437 | static void bnx2x_free_irq(struct bnx2x *bp) | 4976 | static void bnx2x_free_irq(struct bnx2x *bp) |
4438 | { | 4977 | { |
4439 | |||
4440 | if (bp->flags & USING_MSIX_FLAG) { | 4978 | if (bp->flags & USING_MSIX_FLAG) { |
4441 | |||
4442 | bnx2x_free_msix_irqs(bp); | 4979 | bnx2x_free_msix_irqs(bp); |
4443 | pci_disable_msix(bp->pdev); | 4980 | pci_disable_msix(bp->pdev); |
4444 | |||
4445 | bp->flags &= ~USING_MSIX_FLAG; | 4981 | bp->flags &= ~USING_MSIX_FLAG; |
4446 | 4982 | ||
4447 | } else | 4983 | } else |
@@ -4450,87 +4986,87 @@ static void bnx2x_free_irq(struct bnx2x *bp) | |||
4450 | 4986 | ||
4451 | static int bnx2x_enable_msix(struct bnx2x *bp) | 4987 | static int bnx2x_enable_msix(struct bnx2x *bp) |
4452 | { | 4988 | { |
4453 | 4989 | int i, rc, offset; | |
4454 | int i; | ||
4455 | 4990 | ||
4456 | bp->msix_table[0].entry = 0; | 4991 | bp->msix_table[0].entry = 0; |
4457 | for_each_queue(bp, i) | 4992 | offset = 1; |
4458 | bp->msix_table[i + 1].entry = i + 1; | 4993 | DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n"); |
4459 | 4994 | ||
4460 | if (pci_enable_msix(bp->pdev, &bp->msix_table[0], | 4995 | for_each_queue(bp, i) { |
4461 | bp->num_queues + 1)){ | 4996 | int igu_vec = offset + i + BP_L_ID(bp); |
4462 | BNX2X_LOG("failed to enable MSI-X\n"); | ||
4463 | return -1; | ||
4464 | 4997 | ||
4998 | bp->msix_table[i + offset].entry = igu_vec; | ||
4999 | DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " | ||
5000 | "(fastpath #%u)\n", i + offset, igu_vec, i); | ||
4465 | } | 5001 | } |
4466 | 5002 | ||
5003 | rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], | ||
5004 | bp->num_queues + offset); | ||
5005 | if (rc) { | ||
5006 | DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n"); | ||
5007 | return -1; | ||
5008 | } | ||
4467 | bp->flags |= USING_MSIX_FLAG; | 5009 | bp->flags |= USING_MSIX_FLAG; |
4468 | 5010 | ||
4469 | return 0; | 5011 | return 0; |
4470 | |||
4471 | } | 5012 | } |
4472 | 5013 | ||
4473 | |||
4474 | static int bnx2x_req_msix_irqs(struct bnx2x *bp) | 5014 | static int bnx2x_req_msix_irqs(struct bnx2x *bp) |
4475 | { | 5015 | { |
4476 | 5016 | int i, rc, offset = 1; | |
4477 | int i, rc; | ||
4478 | 5017 | ||
4479 | rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0, | 5018 | rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0, |
4480 | bp->dev->name, bp->dev); | 5019 | bp->dev->name, bp->dev); |
4481 | |||
4482 | if (rc) { | 5020 | if (rc) { |
4483 | BNX2X_ERR("request sp irq failed\n"); | 5021 | BNX2X_ERR("request sp irq failed\n"); |
4484 | return -EBUSY; | 5022 | return -EBUSY; |
4485 | } | 5023 | } |
4486 | 5024 | ||
4487 | for_each_queue(bp, i) { | 5025 | for_each_queue(bp, i) { |
4488 | rc = request_irq(bp->msix_table[i + 1].vector, | 5026 | rc = request_irq(bp->msix_table[i + offset].vector, |
4489 | bnx2x_msix_fp_int, 0, | 5027 | bnx2x_msix_fp_int, 0, |
4490 | bp->dev->name, &bp->fp[i]); | 5028 | bp->dev->name, &bp->fp[i]); |
4491 | |||
4492 | if (rc) { | 5029 | if (rc) { |
4493 | BNX2X_ERR("request fp #%d irq failed " | 5030 | BNX2X_ERR("request fp #%d irq failed rc %d\n", |
4494 | "rc %d\n", i, rc); | 5031 | i + offset, rc); |
4495 | bnx2x_free_msix_irqs(bp); | 5032 | bnx2x_free_msix_irqs(bp); |
4496 | return -EBUSY; | 5033 | return -EBUSY; |
4497 | } | 5034 | } |
4498 | 5035 | ||
4499 | bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ; | 5036 | bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ; |
4500 | |||
4501 | } | 5037 | } |
4502 | 5038 | ||
4503 | return 0; | 5039 | return 0; |
4504 | |||
4505 | } | 5040 | } |
4506 | 5041 | ||
4507 | static int bnx2x_req_irq(struct bnx2x *bp) | 5042 | static int bnx2x_req_irq(struct bnx2x *bp) |
4508 | { | 5043 | { |
5044 | int rc; | ||
4509 | 5045 | ||
4510 | int rc = request_irq(bp->pdev->irq, bnx2x_interrupt, | 5046 | rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED, |
4511 | IRQF_SHARED, bp->dev->name, bp->dev); | 5047 | bp->dev->name, bp->dev); |
4512 | if (!rc) | 5048 | if (!rc) |
4513 | bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ; | 5049 | bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ; |
4514 | 5050 | ||
4515 | return rc; | 5051 | return rc; |
4516 | |||
4517 | } | 5052 | } |
4518 | 5053 | ||
4519 | /* | 5054 | /* |
4520 | * Init service functions | 5055 | * Init service functions |
4521 | */ | 5056 | */ |
4522 | 5057 | ||
4523 | static void bnx2x_set_mac_addr(struct bnx2x *bp) | 5058 | static void bnx2x_set_mac_addr_e1(struct bnx2x *bp) |
4524 | { | 5059 | { |
4525 | struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); | 5060 | struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); |
5061 | int port = BP_PORT(bp); | ||
4526 | 5062 | ||
4527 | /* CAM allocation | 5063 | /* CAM allocation |
4528 | * unicasts 0-31:port0 32-63:port1 | 5064 | * unicasts 0-31:port0 32-63:port1 |
4529 | * multicast 64-127:port0 128-191:port1 | 5065 | * multicast 64-127:port0 128-191:port1 |
4530 | */ | 5066 | */ |
4531 | config->hdr.length_6b = 2; | 5067 | config->hdr.length_6b = 2; |
4532 | config->hdr.offset = bp->port ? 31 : 0; | 5068 | config->hdr.offset = port ? 31 : 0; |
4533 | config->hdr.reserved0 = 0; | 5069 | config->hdr.client_id = BP_CL_ID(bp); |
4534 | config->hdr.reserved1 = 0; | 5070 | config->hdr.reserved1 = 0; |
4535 | 5071 | ||
4536 | /* primary MAC */ | 5072 | /* primary MAC */ |
@@ -4540,7 +5076,7 @@ static void bnx2x_set_mac_addr(struct bnx2x *bp) | |||
4540 | swab16(*(u16 *)&bp->dev->dev_addr[2]); | 5076 | swab16(*(u16 *)&bp->dev->dev_addr[2]); |
4541 | config->config_table[0].cam_entry.lsb_mac_addr = | 5077 | config->config_table[0].cam_entry.lsb_mac_addr = |
4542 | swab16(*(u16 *)&bp->dev->dev_addr[4]); | 5078 | swab16(*(u16 *)&bp->dev->dev_addr[4]); |
4543 | config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port); | 5079 | config->config_table[0].cam_entry.flags = cpu_to_le16(port); |
4544 | config->config_table[0].target_table_entry.flags = 0; | 5080 | config->config_table[0].target_table_entry.flags = 0; |
4545 | config->config_table[0].target_table_entry.client_id = 0; | 5081 | config->config_table[0].target_table_entry.client_id = 0; |
4546 | config->config_table[0].target_table_entry.vlan_id = 0; | 5082 | config->config_table[0].target_table_entry.vlan_id = 0; |
@@ -4554,7 +5090,7 @@ static void bnx2x_set_mac_addr(struct bnx2x *bp) | |||
4554 | config->config_table[1].cam_entry.msb_mac_addr = 0xffff; | 5090 | config->config_table[1].cam_entry.msb_mac_addr = 0xffff; |
4555 | config->config_table[1].cam_entry.middle_mac_addr = 0xffff; | 5091 | config->config_table[1].cam_entry.middle_mac_addr = 0xffff; |
4556 | config->config_table[1].cam_entry.lsb_mac_addr = 0xffff; | 5092 | config->config_table[1].cam_entry.lsb_mac_addr = 0xffff; |
4557 | config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port); | 5093 | config->config_table[1].cam_entry.flags = cpu_to_le16(port); |
4558 | config->config_table[1].target_table_entry.flags = | 5094 | config->config_table[1].target_table_entry.flags = |
4559 | TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; | 5095 | TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; |
4560 | config->config_table[1].target_table_entry.client_id = 0; | 5096 | config->config_table[1].target_table_entry.client_id = 0; |
@@ -4565,64 +5101,105 @@ static void bnx2x_set_mac_addr(struct bnx2x *bp) | |||
4565 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); | 5101 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); |
4566 | } | 5102 | } |
4567 | 5103 | ||
5104 | static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp) | ||
5105 | { | ||
5106 | struct mac_configuration_cmd_e1h *config = | ||
5107 | (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); | ||
5108 | |||
5109 | if (bp->state != BNX2X_STATE_OPEN) { | ||
5110 | DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); | ||
5111 | return; | ||
5112 | } | ||
5113 | |||
5114 | /* CAM allocation for E1H | ||
5115 | * unicasts: by func number | ||
5116 | * multicast: 20+FUNC*20, 20 each | ||
5117 | */ | ||
5118 | config->hdr.length_6b = 1; | ||
5119 | config->hdr.offset = BP_FUNC(bp); | ||
5120 | config->hdr.client_id = BP_CL_ID(bp); | ||
5121 | config->hdr.reserved1 = 0; | ||
5122 | |||
5123 | /* primary MAC */ | ||
5124 | config->config_table[0].msb_mac_addr = | ||
5125 | swab16(*(u16 *)&bp->dev->dev_addr[0]); | ||
5126 | config->config_table[0].middle_mac_addr = | ||
5127 | swab16(*(u16 *)&bp->dev->dev_addr[2]); | ||
5128 | config->config_table[0].lsb_mac_addr = | ||
5129 | swab16(*(u16 *)&bp->dev->dev_addr[4]); | ||
5130 | config->config_table[0].client_id = BP_L_ID(bp); | ||
5131 | config->config_table[0].vlan_id = 0; | ||
5132 | config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); | ||
5133 | config->config_table[0].flags = BP_PORT(bp); | ||
5134 | |||
5135 | DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n", | ||
5136 | config->config_table[0].msb_mac_addr, | ||
5137 | config->config_table[0].middle_mac_addr, | ||
5138 | config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp)); | ||
5139 | |||
5140 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | ||
5141 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), | ||
5142 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); | ||
5143 | } | ||
5144 | |||
4568 | static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, | 5145 | static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, |
4569 | int *state_p, int poll) | 5146 | int *state_p, int poll) |
4570 | { | 5147 | { |
4571 | /* can take a while if any port is running */ | 5148 | /* can take a while if any port is running */ |
4572 | int timeout = 500; | 5149 | int cnt = 500; |
4573 | 5150 | ||
4574 | DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n", | 5151 | DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n", |
4575 | poll ? "polling" : "waiting", state, idx); | 5152 | poll ? "polling" : "waiting", state, idx); |
4576 | 5153 | ||
4577 | might_sleep(); | 5154 | might_sleep(); |
4578 | 5155 | while (cnt--) { | |
4579 | while (timeout) { | ||
4580 | |||
4581 | if (poll) { | 5156 | if (poll) { |
4582 | bnx2x_rx_int(bp->fp, 10); | 5157 | bnx2x_rx_int(bp->fp, 10); |
4583 | /* If index is different from 0 | 5158 | /* if index is different from 0 |
4584 | * The reply for some commands will | 5159 | * the reply for some commands will |
4585 | * be on the none default queue | 5160 | * be on the none default queue |
4586 | */ | 5161 | */ |
4587 | if (idx) | 5162 | if (idx) |
4588 | bnx2x_rx_int(&bp->fp[idx], 10); | 5163 | bnx2x_rx_int(&bp->fp[idx], 10); |
4589 | } | 5164 | } |
4590 | 5165 | mb(); /* state is changed by bnx2x_sp_event() */ | |
4591 | mb(); /* state is changed by bnx2x_sp_event()*/ | ||
4592 | 5166 | ||
4593 | if (*state_p == state) | 5167 | if (*state_p == state) |
4594 | return 0; | 5168 | return 0; |
4595 | 5169 | ||
4596 | timeout--; | ||
4597 | msleep(1); | 5170 | msleep(1); |
4598 | |||
4599 | } | 5171 | } |
4600 | 5172 | ||
4601 | /* timeout! */ | 5173 | /* timeout! */ |
4602 | BNX2X_ERR("timeout %s for state %x on IDX [%d]\n", | 5174 | BNX2X_ERR("timeout %s for state %x on IDX [%d]\n", |
4603 | poll ? "polling" : "waiting", state, idx); | 5175 | poll ? "polling" : "waiting", state, idx); |
5176 | #ifdef BNX2X_STOP_ON_ERROR | ||
5177 | bnx2x_panic(); | ||
5178 | #endif | ||
4604 | 5179 | ||
4605 | return -EBUSY; | 5180 | return -EBUSY; |
4606 | } | 5181 | } |
4607 | 5182 | ||
4608 | static int bnx2x_setup_leading(struct bnx2x *bp) | 5183 | static int bnx2x_setup_leading(struct bnx2x *bp) |
4609 | { | 5184 | { |
5185 | int rc; | ||
4610 | 5186 | ||
4611 | /* reset IGU state */ | 5187 | /* reset IGU state */ |
4612 | bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | 5188 | bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); |
4613 | 5189 | ||
4614 | /* SETUP ramrod */ | 5190 | /* SETUP ramrod */ |
4615 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0); | 5191 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0); |
4616 | 5192 | ||
4617 | return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0); | 5193 | /* Wait for completion */ |
5194 | rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0); | ||
4618 | 5195 | ||
5196 | return rc; | ||
4619 | } | 5197 | } |
4620 | 5198 | ||
4621 | static int bnx2x_setup_multi(struct bnx2x *bp, int index) | 5199 | static int bnx2x_setup_multi(struct bnx2x *bp, int index) |
4622 | { | 5200 | { |
4623 | |||
4624 | /* reset IGU state */ | 5201 | /* reset IGU state */ |
4625 | bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | 5202 | bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); |
4626 | 5203 | ||
4627 | /* SETUP ramrod */ | 5204 | /* SETUP ramrod */ |
4628 | bp->fp[index].state = BNX2X_FP_STATE_OPENING; | 5205 | bp->fp[index].state = BNX2X_FP_STATE_OPENING; |
@@ -4631,82 +5208,107 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index) | |||
4631 | /* Wait for completion */ | 5208 | /* Wait for completion */ |
4632 | return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index, | 5209 | return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index, |
4633 | &(bp->fp[index].state), 0); | 5210 | &(bp->fp[index].state), 0); |
4634 | |||
4635 | } | 5211 | } |
4636 | 5212 | ||
4637 | |||
4638 | static int bnx2x_poll(struct napi_struct *napi, int budget); | 5213 | static int bnx2x_poll(struct napi_struct *napi, int budget); |
4639 | static void bnx2x_set_rx_mode(struct net_device *dev); | 5214 | static void bnx2x_set_rx_mode(struct net_device *dev); |
4640 | 5215 | ||
4641 | static int bnx2x_nic_load(struct bnx2x *bp, int req_irq) | 5216 | /* must be called with rtnl_lock */ |
5217 | static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | ||
4642 | { | 5218 | { |
4643 | u32 load_code; | 5219 | u32 load_code; |
4644 | int i; | 5220 | int i, rc; |
5221 | |||
5222 | #ifdef BNX2X_STOP_ON_ERROR | ||
5223 | if (unlikely(bp->panic)) | ||
5224 | return -EPERM; | ||
5225 | #endif | ||
4645 | 5226 | ||
4646 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; | 5227 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; |
4647 | 5228 | ||
4648 | /* Send LOAD_REQUEST command to MCP. | 5229 | /* Send LOAD_REQUEST command to MCP |
4649 | Returns the type of LOAD command: if it is the | 5230 | Returns the type of LOAD command: |
4650 | first port to be initialized common blocks should be | 5231 | if it is the first port to be initialized |
4651 | initialized, otherwise - not. | 5232 | common blocks should be initialized, otherwise - not |
4652 | */ | 5233 | */ |
4653 | if (!nomcp) { | 5234 | if (!BP_NOMCP(bp)) { |
4654 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); | 5235 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); |
4655 | if (!load_code) { | 5236 | if (!load_code) { |
4656 | BNX2X_ERR("MCP response failure, unloading\n"); | 5237 | BNX2X_ERR("MCP response failure, unloading\n"); |
4657 | return -EBUSY; | 5238 | return -EBUSY; |
4658 | } | 5239 | } |
4659 | if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { | 5240 | if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) |
4660 | BNX2X_ERR("MCP refused load request, unloading\n"); | ||
4661 | return -EBUSY; /* other port in diagnostic mode */ | 5241 | return -EBUSY; /* other port in diagnostic mode */ |
4662 | } | 5242 | |
4663 | } else { | 5243 | } else { |
4664 | load_code = FW_MSG_CODE_DRV_LOAD_COMMON; | 5244 | DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n", |
5245 | load_count[0], load_count[1], load_count[2]); | ||
5246 | load_count[0]++; | ||
5247 | load_count[1 + BP_PORT(bp)]++; | ||
5248 | DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n", | ||
5249 | load_count[0], load_count[1], load_count[2]); | ||
5250 | if (load_count[0] == 1) | ||
5251 | load_code = FW_MSG_CODE_DRV_LOAD_COMMON; | ||
5252 | else if (load_count[1 + BP_PORT(bp)] == 1) | ||
5253 | load_code = FW_MSG_CODE_DRV_LOAD_PORT; | ||
5254 | else | ||
5255 | load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; | ||
4665 | } | 5256 | } |
4666 | 5257 | ||
4667 | /* if we can't use msix we only need one fp, | 5258 | if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || |
4668 | * so try to enable msix with the requested number of fp's | 5259 | (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) |
5260 | bp->port.pmf = 1; | ||
5261 | else | ||
5262 | bp->port.pmf = 0; | ||
5263 | DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); | ||
5264 | |||
5265 | /* if we can't use MSI-X we only need one fp, | ||
5266 | * so try to enable MSI-X with the requested number of fp's | ||
4669 | * and fallback to inta with one fp | 5267 | * and fallback to inta with one fp |
4670 | */ | 5268 | */ |
4671 | if (req_irq) { | 5269 | if (use_inta) { |
4672 | if (use_inta) { | 5270 | bp->num_queues = 1; |
5271 | |||
5272 | } else { | ||
5273 | if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp))) | ||
5274 | /* user requested number */ | ||
5275 | bp->num_queues = use_multi; | ||
5276 | |||
5277 | else if (use_multi) | ||
5278 | bp->num_queues = min_t(u32, num_online_cpus(), | ||
5279 | BP_MAX_QUEUES(bp)); | ||
5280 | else | ||
4673 | bp->num_queues = 1; | 5281 | bp->num_queues = 1; |
4674 | } else { | 5282 | |
4675 | if ((use_multi > 1) && (use_multi <= 16)) | 5283 | if (bnx2x_enable_msix(bp)) { |
4676 | /* user requested number */ | 5284 | /* failed to enable MSI-X */ |
4677 | bp->num_queues = use_multi; | 5285 | bp->num_queues = 1; |
4678 | else if (use_multi == 1) | 5286 | if (use_multi) |
4679 | bp->num_queues = num_online_cpus(); | 5287 | BNX2X_ERR("Multi requested but failed" |
4680 | else | 5288 | " to enable MSI-X\n"); |
4681 | bp->num_queues = 1; | ||
4682 | |||
4683 | if (bnx2x_enable_msix(bp)) { | ||
4684 | /* failed to enable msix */ | ||
4685 | bp->num_queues = 1; | ||
4686 | if (use_multi) | ||
4687 | BNX2X_ERR("Multi requested but failed" | ||
4688 | " to enable MSI-X\n"); | ||
4689 | } | ||
4690 | } | 5289 | } |
4691 | } | 5290 | } |
4692 | 5291 | DP(NETIF_MSG_IFUP, | |
4693 | DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues); | 5292 | "set number of queues to %d\n", bp->num_queues); |
4694 | 5293 | ||
4695 | if (bnx2x_alloc_mem(bp)) | 5294 | if (bnx2x_alloc_mem(bp)) |
4696 | return -ENOMEM; | 5295 | return -ENOMEM; |
4697 | 5296 | ||
4698 | if (req_irq) { | 5297 | /* Disable interrupt handling until HW is initialized */ |
4699 | if (bp->flags & USING_MSIX_FLAG) { | 5298 | atomic_set(&bp->intr_sem, 1); |
4700 | if (bnx2x_req_msix_irqs(bp)) { | ||
4701 | pci_disable_msix(bp->pdev); | ||
4702 | goto load_error; | ||
4703 | } | ||
4704 | 5299 | ||
4705 | } else { | 5300 | if (bp->flags & USING_MSIX_FLAG) { |
4706 | if (bnx2x_req_irq(bp)) { | 5301 | rc = bnx2x_req_msix_irqs(bp); |
4707 | BNX2X_ERR("IRQ request failed, aborting\n"); | 5302 | if (rc) { |
4708 | goto load_error; | 5303 | pci_disable_msix(bp->pdev); |
4709 | } | 5304 | goto load_error; |
5305 | } | ||
5306 | } else { | ||
5307 | bnx2x_ack_int(bp); | ||
5308 | rc = bnx2x_req_irq(bp); | ||
5309 | if (rc) { | ||
5310 | BNX2X_ERR("IRQ request failed, aborting\n"); | ||
5311 | goto load_error; | ||
4710 | } | 5312 | } |
4711 | } | 5313 | } |
4712 | 5314 | ||
@@ -4714,26 +5316,25 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq) | |||
4714 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), | 5316 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), |
4715 | bnx2x_poll, 128); | 5317 | bnx2x_poll, 128); |
4716 | 5318 | ||
4717 | |||
4718 | /* Initialize HW */ | 5319 | /* Initialize HW */ |
4719 | if (bnx2x_function_init(bp, | 5320 | rc = bnx2x_init_hw(bp, load_code); |
4720 | (load_code == FW_MSG_CODE_DRV_LOAD_COMMON))) { | 5321 | if (rc) { |
4721 | BNX2X_ERR("HW init failed, aborting\n"); | 5322 | BNX2X_ERR("HW init failed, aborting\n"); |
4722 | goto load_error; | 5323 | goto load_error; |
4723 | } | 5324 | } |
4724 | 5325 | ||
4725 | 5326 | /* Enable interrupt handling */ | |
4726 | atomic_set(&bp->intr_sem, 0); | 5327 | atomic_set(&bp->intr_sem, 0); |
4727 | 5328 | ||
4728 | |||
4729 | /* Setup NIC internals and enable interrupts */ | 5329 | /* Setup NIC internals and enable interrupts */ |
4730 | bnx2x_nic_init(bp); | 5330 | bnx2x_nic_init(bp); |
4731 | 5331 | ||
4732 | /* Send LOAD_DONE command to MCP */ | 5332 | /* Send LOAD_DONE command to MCP */ |
4733 | if (!nomcp) { | 5333 | if (!BP_NOMCP(bp)) { |
4734 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); | 5334 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); |
4735 | if (!load_code) { | 5335 | if (!load_code) { |
4736 | BNX2X_ERR("MCP response failure, unloading\n"); | 5336 | BNX2X_ERR("MCP response failure, unloading\n"); |
5337 | rc = -EBUSY; | ||
4737 | goto load_int_disable; | 5338 | goto load_int_disable; |
4738 | } | 5339 | } |
4739 | } | 5340 | } |
@@ -4745,33 +5346,68 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq) | |||
4745 | for_each_queue(bp, i) | 5346 | for_each_queue(bp, i) |
4746 | napi_enable(&bnx2x_fp(bp, i, napi)); | 5347 | napi_enable(&bnx2x_fp(bp, i, napi)); |
4747 | 5348 | ||
4748 | if (bnx2x_setup_leading(bp)) | 5349 | rc = bnx2x_setup_leading(bp); |
5350 | if (rc) { | ||
5351 | #ifdef BNX2X_STOP_ON_ERROR | ||
5352 | bp->panic = 1; | ||
5353 | #endif | ||
4749 | goto load_stop_netif; | 5354 | goto load_stop_netif; |
5355 | } | ||
4750 | 5356 | ||
4751 | for_each_nondefault_queue(bp, i) | 5357 | if (CHIP_IS_E1H(bp)) |
4752 | if (bnx2x_setup_multi(bp, i)) | 5358 | if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { |
4753 | goto load_stop_netif; | 5359 | BNX2X_ERR("!!! mf_cfg function disabled\n"); |
5360 | bp->state = BNX2X_STATE_DISABLED; | ||
5361 | } | ||
4754 | 5362 | ||
4755 | bnx2x_set_mac_addr(bp); | 5363 | if (bp->state == BNX2X_STATE_OPEN) |
5364 | for_each_nondefault_queue(bp, i) { | ||
5365 | rc = bnx2x_setup_multi(bp, i); | ||
5366 | if (rc) | ||
5367 | goto load_stop_netif; | ||
5368 | } | ||
5369 | |||
5370 | if (CHIP_IS_E1(bp)) | ||
5371 | bnx2x_set_mac_addr_e1(bp); | ||
5372 | else | ||
5373 | bnx2x_set_mac_addr_e1h(bp); | ||
4756 | 5374 | ||
4757 | bnx2x_initial_phy_init(bp); | 5375 | if (bp->port.pmf) |
5376 | bnx2x_initial_phy_init(bp); | ||
4758 | 5377 | ||
4759 | /* Start fast path */ | 5378 | /* Start fast path */ |
4760 | if (req_irq) { /* IRQ is only requested from bnx2x_open */ | 5379 | switch (load_mode) { |
5380 | case LOAD_NORMAL: | ||
5381 | /* Tx queue should be only reenabled */ | ||
5382 | netif_wake_queue(bp->dev); | ||
5383 | bnx2x_set_rx_mode(bp->dev); | ||
5384 | break; | ||
5385 | |||
5386 | case LOAD_OPEN: | ||
5387 | /* IRQ is only requested from bnx2x_open */ | ||
4761 | netif_start_queue(bp->dev); | 5388 | netif_start_queue(bp->dev); |
5389 | bnx2x_set_rx_mode(bp->dev); | ||
4762 | if (bp->flags & USING_MSIX_FLAG) | 5390 | if (bp->flags & USING_MSIX_FLAG) |
4763 | printk(KERN_INFO PFX "%s: using MSI-X\n", | 5391 | printk(KERN_INFO PFX "%s: using MSI-X\n", |
4764 | bp->dev->name); | 5392 | bp->dev->name); |
5393 | break; | ||
4765 | 5394 | ||
4766 | /* Otherwise Tx queue should be only reenabled */ | 5395 | case LOAD_DIAG: |
4767 | } else if (netif_running(bp->dev)) { | ||
4768 | netif_wake_queue(bp->dev); | ||
4769 | bnx2x_set_rx_mode(bp->dev); | 5396 | bnx2x_set_rx_mode(bp->dev); |
5397 | bp->state = BNX2X_STATE_DIAG; | ||
5398 | break; | ||
5399 | |||
5400 | default: | ||
5401 | break; | ||
4770 | } | 5402 | } |
4771 | 5403 | ||
5404 | if (!bp->port.pmf) | ||
5405 | bnx2x__link_status_update(bp); | ||
5406 | |||
4772 | /* start the timer */ | 5407 | /* start the timer */ |
4773 | mod_timer(&bp->timer, jiffies + bp->current_interval); | 5408 | mod_timer(&bp->timer, jiffies + bp->current_interval); |
4774 | 5409 | ||
5410 | |||
4775 | return 0; | 5411 | return 0; |
4776 | 5412 | ||
4777 | load_stop_netif: | 5413 | load_stop_netif: |
@@ -4781,7 +5417,7 @@ load_stop_netif: | |||
4781 | load_int_disable: | 5417 | load_int_disable: |
4782 | bnx2x_int_disable_sync(bp); | 5418 | bnx2x_int_disable_sync(bp); |
4783 | 5419 | ||
4784 | bnx2x_free_skbs(bp); | 5420 | /* Release IRQs */ |
4785 | bnx2x_free_irq(bp); | 5421 | bnx2x_free_irq(bp); |
4786 | 5422 | ||
4787 | load_error: | 5423 | load_error: |
@@ -4789,95 +5425,50 @@ load_error: | |||
4789 | 5425 | ||
4790 | /* TBD we really need to reset the chip | 5426 | /* TBD we really need to reset the chip |
4791 | if we want to recover from this */ | 5427 | if we want to recover from this */ |
4792 | return -EBUSY; | 5428 | return rc; |
4793 | } | ||
4794 | |||
4795 | |||
4796 | static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) | ||
4797 | { | ||
4798 | int port = bp->port; | ||
4799 | #ifdef USE_DMAE | ||
4800 | u32 wb_write[2]; | ||
4801 | #endif | ||
4802 | int base, i; | ||
4803 | |||
4804 | DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code); | ||
4805 | |||
4806 | /* Do not rcv packets to BRB */ | ||
4807 | REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); | ||
4808 | /* Do not direct rcv packets that are not for MCP to the BRB */ | ||
4809 | REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : | ||
4810 | NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); | ||
4811 | |||
4812 | /* Configure IGU and AEU */ | ||
4813 | REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000); | ||
4814 | REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); | ||
4815 | |||
4816 | /* TODO: Close Doorbell port? */ | ||
4817 | |||
4818 | /* Clear ILT */ | ||
4819 | #ifdef USE_DMAE | ||
4820 | wb_write[0] = 0; | ||
4821 | wb_write[1] = 0; | ||
4822 | #endif | ||
4823 | base = port * RQ_ONCHIP_AT_PORT_SIZE; | ||
4824 | for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) { | ||
4825 | #ifdef USE_DMAE | ||
4826 | REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2); | ||
4827 | #else | ||
4828 | REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0); | ||
4829 | REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0); | ||
4830 | #endif | ||
4831 | } | ||
4832 | |||
4833 | if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) { | ||
4834 | /* reset_common */ | ||
4835 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | ||
4836 | 0xd3ffff7f); | ||
4837 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | ||
4838 | 0x1403); | ||
4839 | } | ||
4840 | } | 5429 | } |
4841 | 5430 | ||
4842 | static int bnx2x_stop_multi(struct bnx2x *bp, int index) | 5431 | static int bnx2x_stop_multi(struct bnx2x *bp, int index) |
4843 | { | 5432 | { |
4844 | |||
4845 | int rc; | 5433 | int rc; |
4846 | 5434 | ||
4847 | /* halt the connection */ | 5435 | /* halt the connection */ |
4848 | bp->fp[index].state = BNX2X_FP_STATE_HALTING; | 5436 | bp->fp[index].state = BNX2X_FP_STATE_HALTING; |
4849 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); | 5437 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); |
4850 | 5438 | ||
4851 | 5439 | /* Wait for completion */ | |
4852 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, | 5440 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, |
4853 | &(bp->fp[index].state), 1); | 5441 | &(bp->fp[index].state), 1); |
4854 | if (rc) /* timeout */ | 5442 | if (rc) /* timeout */ |
4855 | return rc; | 5443 | return rc; |
4856 | 5444 | ||
4857 | /* delete cfc entry */ | 5445 | /* delete cfc entry */ |
4858 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1); | 5446 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1); |
4859 | 5447 | ||
4860 | return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index, | 5448 | /* Wait for completion */ |
4861 | &(bp->fp[index].state), 1); | 5449 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index, |
4862 | 5450 | &(bp->fp[index].state), 1); | |
5451 | return rc; | ||
4863 | } | 5452 | } |
4864 | 5453 | ||
4865 | |||
4866 | static void bnx2x_stop_leading(struct bnx2x *bp) | 5454 | static void bnx2x_stop_leading(struct bnx2x *bp) |
4867 | { | 5455 | { |
4868 | u16 dsb_sp_prod_idx; | 5456 | u16 dsb_sp_prod_idx; |
4869 | /* if the other port is handling traffic, | 5457 | /* if the other port is handling traffic, |
4870 | this can take a lot of time */ | 5458 | this can take a lot of time */ |
4871 | int timeout = 500; | 5459 | int cnt = 500; |
5460 | int rc; | ||
4872 | 5461 | ||
4873 | might_sleep(); | 5462 | might_sleep(); |
4874 | 5463 | ||
4875 | /* Send HALT ramrod */ | 5464 | /* Send HALT ramrod */ |
4876 | bp->fp[0].state = BNX2X_FP_STATE_HALTING; | 5465 | bp->fp[0].state = BNX2X_FP_STATE_HALTING; |
4877 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0); | 5466 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0); |
4878 | 5467 | ||
4879 | if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, | 5468 | /* Wait for completion */ |
4880 | &(bp->fp[0].state), 1)) | 5469 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, |
5470 | &(bp->fp[0].state), 1); | ||
5471 | if (rc) /* timeout */ | ||
4881 | return; | 5472 | return; |
4882 | 5473 | ||
4883 | dsb_sp_prod_idx = *bp->dsb_sp_prod; | 5474 | dsb_sp_prod_idx = *bp->dsb_sp_prod; |
@@ -4889,29 +5480,110 @@ static void bnx2x_stop_leading(struct bnx2x *bp) | |||
4889 | we are going to reset the chip anyway | 5480 | we are going to reset the chip anyway |
4890 | so there is not much to do if this times out | 5481 | so there is not much to do if this times out |
4891 | */ | 5482 | */ |
4892 | while ((dsb_sp_prod_idx == *bp->dsb_sp_prod) && timeout) { | 5483 | while (dsb_sp_prod_idx == *bp->dsb_sp_prod) { |
4893 | timeout--; | ||
4894 | msleep(1); | 5484 | msleep(1); |
4895 | } | 5485 | if (!cnt) { |
4896 | if (!timeout) { | 5486 | DP(NETIF_MSG_IFDOWN, "timeout waiting for port del " |
4897 | DP(NETIF_MSG_IFDOWN, "timeout polling for completion " | 5487 | "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n", |
4898 | "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n", | 5488 | *bp->dsb_sp_prod, dsb_sp_prod_idx); |
4899 | *bp->dsb_sp_prod, dsb_sp_prod_idx); | 5489 | #ifdef BNX2X_STOP_ON_ERROR |
5490 | bnx2x_panic(); | ||
5491 | #endif | ||
5492 | break; | ||
5493 | } | ||
5494 | cnt--; | ||
4900 | } | 5495 | } |
4901 | bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; | 5496 | bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; |
4902 | bp->fp[0].state = BNX2X_FP_STATE_CLOSED; | 5497 | bp->fp[0].state = BNX2X_FP_STATE_CLOSED; |
4903 | } | 5498 | } |
4904 | 5499 | ||
5500 | static void bnx2x_reset_func(struct bnx2x *bp) | ||
5501 | { | ||
5502 | int port = BP_PORT(bp); | ||
5503 | int func = BP_FUNC(bp); | ||
5504 | int base, i; | ||
5505 | |||
5506 | /* Configure IGU */ | ||
5507 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); | ||
5508 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); | ||
5509 | |||
5510 | REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000); | ||
5511 | |||
5512 | /* Clear ILT */ | ||
5513 | base = FUNC_ILT_BASE(func); | ||
5514 | for (i = base; i < base + ILT_PER_FUNC; i++) | ||
5515 | bnx2x_ilt_wr(bp, i, 0); | ||
5516 | } | ||
5517 | |||
5518 | static void bnx2x_reset_port(struct bnx2x *bp) | ||
5519 | { | ||
5520 | int port = BP_PORT(bp); | ||
5521 | u32 val; | ||
5522 | |||
5523 | REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); | ||
5524 | |||
5525 | /* Do not rcv packets to BRB */ | ||
5526 | REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); | ||
5527 | /* Do not direct rcv packets that are not for MCP to the BRB */ | ||
5528 | REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : | ||
5529 | NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); | ||
5530 | |||
5531 | /* Configure AEU */ | ||
5532 | REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); | ||
5533 | |||
5534 | msleep(100); | ||
5535 | /* Check for BRB port occupancy */ | ||
5536 | val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); | ||
5537 | if (val) | ||
5538 | DP(NETIF_MSG_IFDOWN, | ||
5539 | "BRB1 is not empty %d blooks are occupied\n", val); | ||
5540 | |||
5541 | /* TODO: Close Doorbell port? */ | ||
5542 | } | ||
5543 | |||
5544 | static void bnx2x_reset_common(struct bnx2x *bp) | ||
5545 | { | ||
5546 | /* reset_common */ | ||
5547 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | ||
5548 | 0xd3ffff7f); | ||
5549 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403); | ||
5550 | } | ||
4905 | 5551 | ||
4906 | static int bnx2x_nic_unload(struct bnx2x *bp, int free_irq) | 5552 | static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) |
5553 | { | ||
5554 | DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", | ||
5555 | BP_FUNC(bp), reset_code); | ||
5556 | |||
5557 | switch (reset_code) { | ||
5558 | case FW_MSG_CODE_DRV_UNLOAD_COMMON: | ||
5559 | bnx2x_reset_port(bp); | ||
5560 | bnx2x_reset_func(bp); | ||
5561 | bnx2x_reset_common(bp); | ||
5562 | break; | ||
5563 | |||
5564 | case FW_MSG_CODE_DRV_UNLOAD_PORT: | ||
5565 | bnx2x_reset_port(bp); | ||
5566 | bnx2x_reset_func(bp); | ||
5567 | break; | ||
5568 | |||
5569 | case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: | ||
5570 | bnx2x_reset_func(bp); | ||
5571 | break; | ||
5572 | |||
5573 | default: | ||
5574 | BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code); | ||
5575 | break; | ||
5576 | } | ||
5577 | } | ||
5578 | |||
5579 | /* msut be called with rtnl_lock */ | ||
5580 | static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | ||
4907 | { | 5581 | { |
4908 | u32 reset_code = 0; | 5582 | u32 reset_code = 0; |
4909 | int i, timeout; | 5583 | int i, cnt; |
4910 | 5584 | ||
4911 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; | 5585 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; |
4912 | 5586 | ||
4913 | del_timer_sync(&bp->timer); | ||
4914 | |||
4915 | bp->rx_mode = BNX2X_RX_MODE_NONE; | 5587 | bp->rx_mode = BNX2X_RX_MODE_NONE; |
4916 | bnx2x_set_storm_rx_mode(bp); | 5588 | bnx2x_set_storm_rx_mode(bp); |
4917 | 5589 | ||
@@ -4920,21 +5592,44 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int free_irq) | |||
4920 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | 5592 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ |
4921 | } | 5593 | } |
4922 | 5594 | ||
5595 | del_timer_sync(&bp->timer); | ||
5596 | SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, | ||
5597 | (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); | ||
5598 | |||
4923 | /* Wait until all fast path tasks complete */ | 5599 | /* Wait until all fast path tasks complete */ |
4924 | for_each_queue(bp, i) { | 5600 | for_each_queue(bp, i) { |
4925 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 5601 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
4926 | 5602 | ||
4927 | timeout = 1000; | 5603 | #ifdef BNX2X_STOP_ON_ERROR |
4928 | while (bnx2x_has_work(fp) && (timeout--)) | 5604 | #ifdef __powerpc64__ |
5605 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", | ||
5606 | #else | ||
5607 | DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n", | ||
5608 | #endif | ||
5609 | fp->tpa_queue_used); | ||
5610 | #endif | ||
5611 | cnt = 1000; | ||
5612 | smp_rmb(); | ||
5613 | while (bnx2x_has_work(fp)) { | ||
4929 | msleep(1); | 5614 | msleep(1); |
4930 | if (!timeout) | 5615 | if (!cnt) { |
4931 | BNX2X_ERR("timeout waiting for queue[%d]\n", i); | 5616 | BNX2X_ERR("timeout waiting for queue[%d]\n", |
5617 | i); | ||
5618 | #ifdef BNX2X_STOP_ON_ERROR | ||
5619 | bnx2x_panic(); | ||
5620 | return -EBUSY; | ||
5621 | #else | ||
5622 | break; | ||
5623 | #endif | ||
5624 | } | ||
5625 | cnt--; | ||
5626 | smp_rmb(); | ||
5627 | } | ||
4932 | } | 5628 | } |
4933 | 5629 | ||
4934 | /* Wait until stat ramrod returns and all SP tasks complete */ | 5630 | /* Wait until all slow path tasks complete */ |
4935 | timeout = 1000; | 5631 | cnt = 1000; |
4936 | while ((bp->stat_pending || (bp->spq_left != MAX_SPQ_PENDING)) && | 5632 | while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--) |
4937 | (timeout--)) | ||
4938 | msleep(1); | 5633 | msleep(1); |
4939 | 5634 | ||
4940 | for_each_queue(bp, i) | 5635 | for_each_queue(bp, i) |
@@ -4942,59 +5637,84 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int free_irq) | |||
4942 | /* Disable interrupts after Tx and Rx are disabled on stack level */ | 5637 | /* Disable interrupts after Tx and Rx are disabled on stack level */ |
4943 | bnx2x_int_disable_sync(bp); | 5638 | bnx2x_int_disable_sync(bp); |
4944 | 5639 | ||
5640 | /* Release IRQs */ | ||
5641 | bnx2x_free_irq(bp); | ||
5642 | |||
4945 | if (bp->flags & NO_WOL_FLAG) | 5643 | if (bp->flags & NO_WOL_FLAG) |
4946 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; | 5644 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; |
4947 | 5645 | ||
4948 | else if (bp->wol) { | 5646 | else if (bp->wol) { |
4949 | u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1; | 5647 | u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; |
4950 | u8 *mac_addr = bp->dev->dev_addr; | 5648 | u8 *mac_addr = bp->dev->dev_addr; |
4951 | u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD | | 5649 | u32 val; |
4952 | EMAC_MODE_ACPI_RCVD); | ||
4953 | |||
4954 | EMAC_WR(EMAC_REG_EMAC_MODE, val); | ||
4955 | 5650 | ||
5651 | /* The mac address is written to entries 1-4 to | ||
5652 | preserve entry 0 which is used by the PMF */ | ||
4956 | val = (mac_addr[0] << 8) | mac_addr[1]; | 5653 | val = (mac_addr[0] << 8) | mac_addr[1]; |
4957 | EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val); | 5654 | EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val); |
4958 | 5655 | ||
4959 | val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | | 5656 | val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | |
4960 | (mac_addr[4] << 8) | mac_addr[5]; | 5657 | (mac_addr[4] << 8) | mac_addr[5]; |
4961 | EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val); | 5658 | EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4, |
5659 | val); | ||
4962 | 5660 | ||
4963 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; | 5661 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; |
4964 | 5662 | ||
4965 | } else | 5663 | } else |
4966 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | 5664 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
4967 | 5665 | ||
4968 | /* Close multi and leading connections */ | 5666 | /* Close multi and leading connections |
5667 | Completions for ramrods are collected in a synchronous way */ | ||
4969 | for_each_nondefault_queue(bp, i) | 5668 | for_each_nondefault_queue(bp, i) |
4970 | if (bnx2x_stop_multi(bp, i)) | 5669 | if (bnx2x_stop_multi(bp, i)) |
4971 | goto unload_error; | 5670 | goto unload_error; |
4972 | 5671 | ||
5672 | if (CHIP_IS_E1H(bp)) | ||
5673 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0); | ||
5674 | |||
4973 | bnx2x_stop_leading(bp); | 5675 | bnx2x_stop_leading(bp); |
5676 | #ifdef BNX2X_STOP_ON_ERROR | ||
5677 | /* If ramrod completion timed out - break here! */ | ||
5678 | if (bp->panic) { | ||
5679 | BNX2X_ERR("Stop leading failed!\n"); | ||
5680 | return -EBUSY; | ||
5681 | } | ||
5682 | #endif | ||
5683 | |||
4974 | if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) || | 5684 | if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) || |
4975 | (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) { | 5685 | (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) { |
4976 | DP(NETIF_MSG_IFDOWN, "failed to close leading properly!" | 5686 | DP(NETIF_MSG_IFDOWN, "failed to close leading properly! " |
4977 | "state 0x%x fp[0].state 0x%x", | 5687 | "state 0x%x fp[0].state 0x%x\n", |
4978 | bp->state, bp->fp[0].state); | 5688 | bp->state, bp->fp[0].state); |
4979 | } | 5689 | } |
4980 | 5690 | ||
4981 | unload_error: | 5691 | unload_error: |
4982 | bnx2x__link_reset(bp); | 5692 | if (!BP_NOMCP(bp)) |
4983 | |||
4984 | if (!nomcp) | ||
4985 | reset_code = bnx2x_fw_command(bp, reset_code); | 5693 | reset_code = bnx2x_fw_command(bp, reset_code); |
4986 | else | 5694 | else { |
4987 | reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; | 5695 | DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n", |
5696 | load_count[0], load_count[1], load_count[2]); | ||
5697 | load_count[0]--; | ||
5698 | load_count[1 + BP_PORT(bp)]--; | ||
5699 | DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n", | ||
5700 | load_count[0], load_count[1], load_count[2]); | ||
5701 | if (load_count[0] == 0) | ||
5702 | reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; | ||
5703 | else if (load_count[1 + BP_PORT(bp)] == 0) | ||
5704 | reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; | ||
5705 | else | ||
5706 | reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; | ||
5707 | } | ||
4988 | 5708 | ||
4989 | /* Release IRQs */ | 5709 | if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) || |
4990 | if (free_irq) | 5710 | (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT)) |
4991 | bnx2x_free_irq(bp); | 5711 | bnx2x__link_reset(bp); |
4992 | 5712 | ||
4993 | /* Reset the chip */ | 5713 | /* Reset the chip */ |
4994 | bnx2x_reset_chip(bp, reset_code); | 5714 | bnx2x_reset_chip(bp, reset_code); |
4995 | 5715 | ||
4996 | /* Report UNLOAD_DONE to MCP */ | 5716 | /* Report UNLOAD_DONE to MCP */ |
4997 | if (!nomcp) | 5717 | if (!BP_NOMCP(bp)) |
4998 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); | 5718 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); |
4999 | 5719 | ||
5000 | /* Free SKBs and driver internals */ | 5720 | /* Free SKBs and driver internals */ |
@@ -5008,6 +5728,29 @@ unload_error: | |||
5008 | return 0; | 5728 | return 0; |
5009 | } | 5729 | } |
5010 | 5730 | ||
5731 | static void bnx2x_reset_task(struct work_struct *work) | ||
5732 | { | ||
5733 | struct bnx2x *bp = container_of(work, struct bnx2x, reset_task); | ||
5734 | |||
5735 | #ifdef BNX2X_STOP_ON_ERROR | ||
5736 | BNX2X_ERR("reset task called but STOP_ON_ERROR defined" | ||
5737 | " so reset not done to allow debug dump,\n" | ||
5738 | KERN_ERR " you will need to reboot when done\n"); | ||
5739 | return; | ||
5740 | #endif | ||
5741 | |||
5742 | rtnl_lock(); | ||
5743 | |||
5744 | if (!netif_running(bp->dev)) | ||
5745 | goto reset_task_exit; | ||
5746 | |||
5747 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | ||
5748 | bnx2x_nic_load(bp, LOAD_NORMAL); | ||
5749 | |||
5750 | reset_task_exit: | ||
5751 | rtnl_unlock(); | ||
5752 | } | ||
5753 | |||
5011 | /* end of nic load/unload */ | 5754 | /* end of nic load/unload */ |
5012 | 5755 | ||
5013 | /* ethtool_ops */ | 5756 | /* ethtool_ops */ |
@@ -5016,9 +5759,139 @@ unload_error: | |||
5016 | * Init service functions | 5759 | * Init service functions |
5017 | */ | 5760 | */ |
5018 | 5761 | ||
5019 | static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) | 5762 | static void __devinit bnx2x_undi_unload(struct bnx2x *bp) |
5763 | { | ||
5764 | u32 val; | ||
5765 | |||
5766 | /* Check if there is any driver already loaded */ | ||
5767 | val = REG_RD(bp, MISC_REG_UNPREPARED); | ||
5768 | if (val == 0x1) { | ||
5769 | /* Check if it is the UNDI driver | ||
5770 | * UNDI driver initializes CID offset for normal bell to 0x7 | ||
5771 | */ | ||
5772 | val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); | ||
5773 | if (val == 0x7) { | ||
5774 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | ||
5775 | /* save our func and fw_seq */ | ||
5776 | int func = BP_FUNC(bp); | ||
5777 | u16 fw_seq = bp->fw_seq; | ||
5778 | |||
5779 | BNX2X_DEV_INFO("UNDI is active! reset device\n"); | ||
5780 | |||
5781 | /* try unload UNDI on port 0 */ | ||
5782 | bp->func = 0; | ||
5783 | bp->fw_seq = (SHMEM_RD(bp, | ||
5784 | func_mb[bp->func].drv_mb_header) & | ||
5785 | DRV_MSG_SEQ_NUMBER_MASK); | ||
5786 | |||
5787 | reset_code = bnx2x_fw_command(bp, reset_code); | ||
5788 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); | ||
5789 | |||
5790 | /* if UNDI is loaded on the other port */ | ||
5791 | if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { | ||
5792 | |||
5793 | bp->func = 1; | ||
5794 | bp->fw_seq = (SHMEM_RD(bp, | ||
5795 | func_mb[bp->func].drv_mb_header) & | ||
5796 | DRV_MSG_SEQ_NUMBER_MASK); | ||
5797 | |||
5798 | bnx2x_fw_command(bp, | ||
5799 | DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS); | ||
5800 | bnx2x_fw_command(bp, | ||
5801 | DRV_MSG_CODE_UNLOAD_DONE); | ||
5802 | |||
5803 | /* restore our func and fw_seq */ | ||
5804 | bp->func = func; | ||
5805 | bp->fw_seq = fw_seq; | ||
5806 | } | ||
5807 | |||
5808 | /* reset device */ | ||
5809 | REG_WR(bp, | ||
5810 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | ||
5811 | 0xd3ffff7f); | ||
5812 | REG_WR(bp, | ||
5813 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | ||
5814 | 0x1403); | ||
5815 | } | ||
5816 | } | ||
5817 | } | ||
5818 | |||
5819 | static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) | ||
5820 | { | ||
5821 | u32 val, val2, val3, val4, id; | ||
5822 | |||
5823 | /* Get the chip revision id and number. */ | ||
5824 | /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ | ||
5825 | val = REG_RD(bp, MISC_REG_CHIP_NUM); | ||
5826 | id = ((val & 0xffff) << 16); | ||
5827 | val = REG_RD(bp, MISC_REG_CHIP_REV); | ||
5828 | id |= ((val & 0xf) << 12); | ||
5829 | val = REG_RD(bp, MISC_REG_CHIP_METAL); | ||
5830 | id |= ((val & 0xff) << 4); | ||
5831 | REG_RD(bp, MISC_REG_BOND_ID); | ||
5832 | id |= (val & 0xf); | ||
5833 | bp->common.chip_id = id; | ||
5834 | bp->link_params.chip_id = bp->common.chip_id; | ||
5835 | BNX2X_DEV_INFO("chip ID is 0x%x\n", id); | ||
5836 | |||
5837 | val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); | ||
5838 | bp->common.flash_size = (NVRAM_1MB_SIZE << | ||
5839 | (val & MCPR_NVM_CFG4_FLASH_SIZE)); | ||
5840 | BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", | ||
5841 | bp->common.flash_size, bp->common.flash_size); | ||
5842 | |||
5843 | bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); | ||
5844 | bp->link_params.shmem_base = bp->common.shmem_base; | ||
5845 | BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base); | ||
5846 | |||
5847 | if (!bp->common.shmem_base || | ||
5848 | (bp->common.shmem_base < 0xA0000) || | ||
5849 | (bp->common.shmem_base >= 0xC0000)) { | ||
5850 | BNX2X_DEV_INFO("MCP not active\n"); | ||
5851 | bp->flags |= NO_MCP_FLAG; | ||
5852 | return; | ||
5853 | } | ||
5854 | |||
5855 | val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); | ||
5856 | if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) | ||
5857 | != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) | ||
5858 | BNX2X_ERR("BAD MCP validity signature\n"); | ||
5859 | |||
5860 | bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); | ||
5861 | bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board); | ||
5862 | |||
5863 | BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n", | ||
5864 | bp->common.hw_config, bp->common.board); | ||
5865 | |||
5866 | bp->link_params.hw_led_mode = ((bp->common.hw_config & | ||
5867 | SHARED_HW_CFG_LED_MODE_MASK) >> | ||
5868 | SHARED_HW_CFG_LED_MODE_SHIFT); | ||
5869 | |||
5870 | val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; | ||
5871 | bp->common.bc_ver = val; | ||
5872 | BNX2X_DEV_INFO("bc_ver %X\n", val); | ||
5873 | if (val < BNX2X_BC_VER) { | ||
5874 | /* for now only warn | ||
5875 | * later we might need to enforce this */ | ||
5876 | BNX2X_ERR("This driver needs bc_ver %X but found %X," | ||
5877 | " please upgrade BC\n", BNX2X_BC_VER, val); | ||
5878 | } | ||
5879 | BNX2X_DEV_INFO("%sWoL Capable\n", | ||
5880 | (bp->flags & NO_WOL_FLAG)? "Not " : ""); | ||
5881 | |||
5882 | val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); | ||
5883 | val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); | ||
5884 | val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); | ||
5885 | val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); | ||
5886 | |||
5887 | printk(KERN_INFO PFX "part number %X-%X-%X-%X\n", | ||
5888 | val, val2, val3, val4); | ||
5889 | } | ||
5890 | |||
5891 | static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, | ||
5892 | u32 switch_cfg) | ||
5020 | { | 5893 | { |
5021 | int port = bp->port; | 5894 | int port = BP_PORT(bp); |
5022 | u32 ext_phy_type; | 5895 | u32 ext_phy_type; |
5023 | 5896 | ||
5024 | switch (switch_cfg) { | 5897 | switch (switch_cfg) { |
@@ -5032,31 +5905,33 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) | |||
5032 | BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n", | 5905 | BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n", |
5033 | ext_phy_type); | 5906 | ext_phy_type); |
5034 | 5907 | ||
5035 | bp->supported |= (SUPPORTED_10baseT_Half | | 5908 | bp->port.supported |= (SUPPORTED_10baseT_Half | |
5036 | SUPPORTED_10baseT_Full | | 5909 | SUPPORTED_10baseT_Full | |
5037 | SUPPORTED_100baseT_Half | | 5910 | SUPPORTED_100baseT_Half | |
5038 | SUPPORTED_100baseT_Full | | 5911 | SUPPORTED_100baseT_Full | |
5039 | SUPPORTED_1000baseT_Full | | 5912 | SUPPORTED_1000baseT_Full | |
5040 | SUPPORTED_2500baseX_Full | | 5913 | SUPPORTED_2500baseX_Full | |
5041 | SUPPORTED_TP | SUPPORTED_FIBRE | | 5914 | SUPPORTED_TP | |
5042 | SUPPORTED_Autoneg | | 5915 | SUPPORTED_FIBRE | |
5043 | SUPPORTED_Pause | | 5916 | SUPPORTED_Autoneg | |
5044 | SUPPORTED_Asym_Pause); | 5917 | SUPPORTED_Pause | |
5918 | SUPPORTED_Asym_Pause); | ||
5045 | break; | 5919 | break; |
5046 | 5920 | ||
5047 | case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: | 5921 | case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: |
5048 | BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n", | 5922 | BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n", |
5049 | ext_phy_type); | 5923 | ext_phy_type); |
5050 | 5924 | ||
5051 | bp->supported |= (SUPPORTED_10baseT_Half | | 5925 | bp->port.supported |= (SUPPORTED_10baseT_Half | |
5052 | SUPPORTED_10baseT_Full | | 5926 | SUPPORTED_10baseT_Full | |
5053 | SUPPORTED_100baseT_Half | | 5927 | SUPPORTED_100baseT_Half | |
5054 | SUPPORTED_100baseT_Full | | 5928 | SUPPORTED_100baseT_Full | |
5055 | SUPPORTED_1000baseT_Full | | 5929 | SUPPORTED_1000baseT_Full | |
5056 | SUPPORTED_TP | SUPPORTED_FIBRE | | 5930 | SUPPORTED_TP | |
5057 | SUPPORTED_Autoneg | | 5931 | SUPPORTED_FIBRE | |
5058 | SUPPORTED_Pause | | 5932 | SUPPORTED_Autoneg | |
5059 | SUPPORTED_Asym_Pause); | 5933 | SUPPORTED_Pause | |
5934 | SUPPORTED_Asym_Pause); | ||
5060 | break; | 5935 | break; |
5061 | 5936 | ||
5062 | default: | 5937 | default: |
@@ -5066,9 +5941,9 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) | |||
5066 | return; | 5941 | return; |
5067 | } | 5942 | } |
5068 | 5943 | ||
5069 | bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + | 5944 | bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + |
5070 | port*0x10); | 5945 | port*0x10); |
5071 | BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr); | 5946 | BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); |
5072 | break; | 5947 | break; |
5073 | 5948 | ||
5074 | case SWITCH_CFG_10G: | 5949 | case SWITCH_CFG_10G: |
@@ -5081,75 +5956,75 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) | |||
5081 | BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n", | 5956 | BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n", |
5082 | ext_phy_type); | 5957 | ext_phy_type); |
5083 | 5958 | ||
5084 | bp->supported |= (SUPPORTED_10baseT_Half | | 5959 | bp->port.supported |= (SUPPORTED_10baseT_Half | |
5085 | SUPPORTED_10baseT_Full | | 5960 | SUPPORTED_10baseT_Full | |
5086 | SUPPORTED_100baseT_Half | | 5961 | SUPPORTED_100baseT_Half | |
5087 | SUPPORTED_100baseT_Full | | 5962 | SUPPORTED_100baseT_Full | |
5088 | SUPPORTED_1000baseT_Full | | 5963 | SUPPORTED_1000baseT_Full | |
5089 | SUPPORTED_2500baseX_Full | | 5964 | SUPPORTED_2500baseX_Full | |
5090 | SUPPORTED_10000baseT_Full | | 5965 | SUPPORTED_10000baseT_Full | |
5091 | SUPPORTED_TP | SUPPORTED_FIBRE | | 5966 | SUPPORTED_TP | |
5092 | SUPPORTED_Autoneg | | 5967 | SUPPORTED_FIBRE | |
5093 | SUPPORTED_Pause | | 5968 | SUPPORTED_Autoneg | |
5094 | SUPPORTED_Asym_Pause); | 5969 | SUPPORTED_Pause | |
5970 | SUPPORTED_Asym_Pause); | ||
5095 | break; | 5971 | break; |
5096 | 5972 | ||
5097 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: | 5973 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: |
5098 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n", | 5974 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n", |
5099 | ext_phy_type); | 5975 | ext_phy_type); |
5100 | 5976 | ||
5101 | bp->supported |= (SUPPORTED_10000baseT_Full | | 5977 | bp->port.supported |= (SUPPORTED_10000baseT_Full | |
5102 | SUPPORTED_FIBRE | | 5978 | SUPPORTED_FIBRE | |
5103 | SUPPORTED_Pause | | 5979 | SUPPORTED_Pause | |
5104 | SUPPORTED_Asym_Pause); | 5980 | SUPPORTED_Asym_Pause); |
5105 | break; | 5981 | break; |
5106 | 5982 | ||
5107 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: | 5983 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: |
5108 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n", | 5984 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n", |
5109 | ext_phy_type); | 5985 | ext_phy_type); |
5110 | 5986 | ||
5111 | bp->supported |= (SUPPORTED_10000baseT_Full | | 5987 | bp->port.supported |= (SUPPORTED_10000baseT_Full | |
5112 | SUPPORTED_1000baseT_Full | | 5988 | SUPPORTED_1000baseT_Full | |
5113 | SUPPORTED_Autoneg | | 5989 | SUPPORTED_FIBRE | |
5114 | SUPPORTED_FIBRE | | 5990 | SUPPORTED_Pause | |
5115 | SUPPORTED_Pause | | 5991 | SUPPORTED_Asym_Pause); |
5116 | SUPPORTED_Asym_Pause); | ||
5117 | break; | 5992 | break; |
5118 | 5993 | ||
5119 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: | 5994 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: |
5120 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n", | 5995 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n", |
5121 | ext_phy_type); | 5996 | ext_phy_type); |
5122 | 5997 | ||
5123 | bp->supported |= (SUPPORTED_10000baseT_Full | | 5998 | bp->port.supported |= (SUPPORTED_10000baseT_Full | |
5124 | SUPPORTED_1000baseT_Full | | 5999 | SUPPORTED_1000baseT_Full | |
5125 | SUPPORTED_FIBRE | | 6000 | SUPPORTED_FIBRE | |
5126 | SUPPORTED_Autoneg | | 6001 | SUPPORTED_Autoneg | |
5127 | SUPPORTED_Pause | | 6002 | SUPPORTED_Pause | |
5128 | SUPPORTED_Asym_Pause); | 6003 | SUPPORTED_Asym_Pause); |
5129 | break; | 6004 | break; |
5130 | 6005 | ||
5131 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: | 6006 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: |
5132 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n", | 6007 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n", |
5133 | ext_phy_type); | 6008 | ext_phy_type); |
5134 | 6009 | ||
5135 | bp->supported |= (SUPPORTED_10000baseT_Full | | 6010 | bp->port.supported |= (SUPPORTED_10000baseT_Full | |
5136 | SUPPORTED_2500baseX_Full | | 6011 | SUPPORTED_2500baseX_Full | |
5137 | SUPPORTED_1000baseT_Full | | 6012 | SUPPORTED_1000baseT_Full | |
5138 | SUPPORTED_FIBRE | | 6013 | SUPPORTED_FIBRE | |
5139 | SUPPORTED_Autoneg | | 6014 | SUPPORTED_Autoneg | |
5140 | SUPPORTED_Pause | | 6015 | SUPPORTED_Pause | |
5141 | SUPPORTED_Asym_Pause); | 6016 | SUPPORTED_Asym_Pause); |
5142 | break; | 6017 | break; |
5143 | 6018 | ||
5144 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: | 6019 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: |
5145 | BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n", | 6020 | BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n", |
5146 | ext_phy_type); | 6021 | ext_phy_type); |
5147 | 6022 | ||
5148 | bp->supported |= (SUPPORTED_10000baseT_Full | | 6023 | bp->port.supported |= (SUPPORTED_10000baseT_Full | |
5149 | SUPPORTED_TP | | 6024 | SUPPORTED_TP | |
5150 | SUPPORTED_Autoneg | | 6025 | SUPPORTED_Autoneg | |
5151 | SUPPORTED_Pause | | 6026 | SUPPORTED_Pause | |
5152 | SUPPORTED_Asym_Pause); | 6027 | SUPPORTED_Asym_Pause); |
5153 | break; | 6028 | break; |
5154 | 6029 | ||
5155 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: | 6030 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: |
@@ -5164,61 +6039,61 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) | |||
5164 | return; | 6039 | return; |
5165 | } | 6040 | } |
5166 | 6041 | ||
5167 | bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + | 6042 | bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + |
5168 | port*0x18); | 6043 | port*0x18); |
5169 | BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr); | 6044 | BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); |
5170 | 6045 | ||
5171 | break; | 6046 | break; |
5172 | 6047 | ||
5173 | default: | 6048 | default: |
5174 | BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", | 6049 | BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", |
5175 | bp->link_config); | 6050 | bp->port.link_config); |
5176 | return; | 6051 | return; |
5177 | } | 6052 | } |
5178 | bp->link_params.phy_addr = bp->phy_addr; | 6053 | bp->link_params.phy_addr = bp->port.phy_addr; |
5179 | 6054 | ||
5180 | /* mask what we support according to speed_cap_mask */ | 6055 | /* mask what we support according to speed_cap_mask */ |
5181 | if (!(bp->link_params.speed_cap_mask & | 6056 | if (!(bp->link_params.speed_cap_mask & |
5182 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) | 6057 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) |
5183 | bp->supported &= ~SUPPORTED_10baseT_Half; | 6058 | bp->port.supported &= ~SUPPORTED_10baseT_Half; |
5184 | 6059 | ||
5185 | if (!(bp->link_params.speed_cap_mask & | 6060 | if (!(bp->link_params.speed_cap_mask & |
5186 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) | 6061 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) |
5187 | bp->supported &= ~SUPPORTED_10baseT_Full; | 6062 | bp->port.supported &= ~SUPPORTED_10baseT_Full; |
5188 | 6063 | ||
5189 | if (!(bp->link_params.speed_cap_mask & | 6064 | if (!(bp->link_params.speed_cap_mask & |
5190 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) | 6065 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) |
5191 | bp->supported &= ~SUPPORTED_100baseT_Half; | 6066 | bp->port.supported &= ~SUPPORTED_100baseT_Half; |
5192 | 6067 | ||
5193 | if (!(bp->link_params.speed_cap_mask & | 6068 | if (!(bp->link_params.speed_cap_mask & |
5194 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) | 6069 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) |
5195 | bp->supported &= ~SUPPORTED_100baseT_Full; | 6070 | bp->port.supported &= ~SUPPORTED_100baseT_Full; |
5196 | 6071 | ||
5197 | if (!(bp->link_params.speed_cap_mask & | 6072 | if (!(bp->link_params.speed_cap_mask & |
5198 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) | 6073 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) |
5199 | bp->supported &= ~(SUPPORTED_1000baseT_Half | | 6074 | bp->port.supported &= ~(SUPPORTED_1000baseT_Half | |
5200 | SUPPORTED_1000baseT_Full); | 6075 | SUPPORTED_1000baseT_Full); |
5201 | 6076 | ||
5202 | if (!(bp->link_params.speed_cap_mask & | 6077 | if (!(bp->link_params.speed_cap_mask & |
5203 | PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) | 6078 | PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) |
5204 | bp->supported &= ~SUPPORTED_2500baseX_Full; | 6079 | bp->port.supported &= ~SUPPORTED_2500baseX_Full; |
5205 | 6080 | ||
5206 | if (!(bp->link_params.speed_cap_mask & | 6081 | if (!(bp->link_params.speed_cap_mask & |
5207 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) | 6082 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) |
5208 | bp->supported &= ~SUPPORTED_10000baseT_Full; | 6083 | bp->port.supported &= ~SUPPORTED_10000baseT_Full; |
5209 | 6084 | ||
5210 | BNX2X_DEV_INFO("supported 0x%x\n", bp->supported); | 6085 | BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported); |
5211 | } | 6086 | } |
5212 | 6087 | ||
5213 | static void bnx2x_link_settings_requested(struct bnx2x *bp) | 6088 | static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) |
5214 | { | 6089 | { |
5215 | bp->link_params.req_duplex = DUPLEX_FULL; | 6090 | bp->link_params.req_duplex = DUPLEX_FULL; |
5216 | 6091 | ||
5217 | switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) { | 6092 | switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) { |
5218 | case PORT_FEATURE_LINK_SPEED_AUTO: | 6093 | case PORT_FEATURE_LINK_SPEED_AUTO: |
5219 | if (bp->supported & SUPPORTED_Autoneg) { | 6094 | if (bp->port.supported & SUPPORTED_Autoneg) { |
5220 | bp->link_params.req_line_speed = SPEED_AUTO_NEG; | 6095 | bp->link_params.req_line_speed = SPEED_AUTO_NEG; |
5221 | bp->advertising = bp->supported; | 6096 | bp->port.advertising = bp->port.supported; |
5222 | } else { | 6097 | } else { |
5223 | u32 ext_phy_type = | 6098 | u32 ext_phy_type = |
5224 | XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); | 6099 | XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); |
@@ -5229,7 +6104,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) | |||
5229 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) { | 6104 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) { |
5230 | /* force 10G, no AN */ | 6105 | /* force 10G, no AN */ |
5231 | bp->link_params.req_line_speed = SPEED_10000; | 6106 | bp->link_params.req_line_speed = SPEED_10000; |
5232 | bp->advertising = | 6107 | bp->port.advertising = |
5233 | (ADVERTISED_10000baseT_Full | | 6108 | (ADVERTISED_10000baseT_Full | |
5234 | ADVERTISED_FIBRE); | 6109 | ADVERTISED_FIBRE); |
5235 | break; | 6110 | break; |
@@ -5237,98 +6112,98 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) | |||
5237 | BNX2X_ERR("NVRAM config error. " | 6112 | BNX2X_ERR("NVRAM config error. " |
5238 | "Invalid link_config 0x%x" | 6113 | "Invalid link_config 0x%x" |
5239 | " Autoneg not supported\n", | 6114 | " Autoneg not supported\n", |
5240 | bp->link_config); | 6115 | bp->port.link_config); |
5241 | return; | 6116 | return; |
5242 | } | 6117 | } |
5243 | break; | 6118 | break; |
5244 | 6119 | ||
5245 | case PORT_FEATURE_LINK_SPEED_10M_FULL: | 6120 | case PORT_FEATURE_LINK_SPEED_10M_FULL: |
5246 | if (bp->supported & SUPPORTED_10baseT_Full) { | 6121 | if (bp->port.supported & SUPPORTED_10baseT_Full) { |
5247 | bp->link_params.req_line_speed = SPEED_10; | 6122 | bp->link_params.req_line_speed = SPEED_10; |
5248 | bp->advertising = (ADVERTISED_10baseT_Full | | 6123 | bp->port.advertising = (ADVERTISED_10baseT_Full | |
5249 | ADVERTISED_TP); | 6124 | ADVERTISED_TP); |
5250 | } else { | 6125 | } else { |
5251 | BNX2X_ERR("NVRAM config error. " | 6126 | BNX2X_ERR("NVRAM config error. " |
5252 | "Invalid link_config 0x%x" | 6127 | "Invalid link_config 0x%x" |
5253 | " speed_cap_mask 0x%x\n", | 6128 | " speed_cap_mask 0x%x\n", |
5254 | bp->link_config, | 6129 | bp->port.link_config, |
5255 | bp->link_params.speed_cap_mask); | 6130 | bp->link_params.speed_cap_mask); |
5256 | return; | 6131 | return; |
5257 | } | 6132 | } |
5258 | break; | 6133 | break; |
5259 | 6134 | ||
5260 | case PORT_FEATURE_LINK_SPEED_10M_HALF: | 6135 | case PORT_FEATURE_LINK_SPEED_10M_HALF: |
5261 | if (bp->supported & SUPPORTED_10baseT_Half) { | 6136 | if (bp->port.supported & SUPPORTED_10baseT_Half) { |
5262 | bp->link_params.req_line_speed = SPEED_10; | 6137 | bp->link_params.req_line_speed = SPEED_10; |
5263 | bp->link_params.req_duplex = DUPLEX_HALF; | 6138 | bp->link_params.req_duplex = DUPLEX_HALF; |
5264 | bp->advertising = (ADVERTISED_10baseT_Half | | 6139 | bp->port.advertising = (ADVERTISED_10baseT_Half | |
5265 | ADVERTISED_TP); | 6140 | ADVERTISED_TP); |
5266 | } else { | 6141 | } else { |
5267 | BNX2X_ERR("NVRAM config error. " | 6142 | BNX2X_ERR("NVRAM config error. " |
5268 | "Invalid link_config 0x%x" | 6143 | "Invalid link_config 0x%x" |
5269 | " speed_cap_mask 0x%x\n", | 6144 | " speed_cap_mask 0x%x\n", |
5270 | bp->link_config, | 6145 | bp->port.link_config, |
5271 | bp->link_params.speed_cap_mask); | 6146 | bp->link_params.speed_cap_mask); |
5272 | return; | 6147 | return; |
5273 | } | 6148 | } |
5274 | break; | 6149 | break; |
5275 | 6150 | ||
5276 | case PORT_FEATURE_LINK_SPEED_100M_FULL: | 6151 | case PORT_FEATURE_LINK_SPEED_100M_FULL: |
5277 | if (bp->supported & SUPPORTED_100baseT_Full) { | 6152 | if (bp->port.supported & SUPPORTED_100baseT_Full) { |
5278 | bp->link_params.req_line_speed = SPEED_100; | 6153 | bp->link_params.req_line_speed = SPEED_100; |
5279 | bp->advertising = (ADVERTISED_100baseT_Full | | 6154 | bp->port.advertising = (ADVERTISED_100baseT_Full | |
5280 | ADVERTISED_TP); | 6155 | ADVERTISED_TP); |
5281 | } else { | 6156 | } else { |
5282 | BNX2X_ERR("NVRAM config error. " | 6157 | BNX2X_ERR("NVRAM config error. " |
5283 | "Invalid link_config 0x%x" | 6158 | "Invalid link_config 0x%x" |
5284 | " speed_cap_mask 0x%x\n", | 6159 | " speed_cap_mask 0x%x\n", |
5285 | bp->link_config, | 6160 | bp->port.link_config, |
5286 | bp->link_params.speed_cap_mask); | 6161 | bp->link_params.speed_cap_mask); |
5287 | return; | 6162 | return; |
5288 | } | 6163 | } |
5289 | break; | 6164 | break; |
5290 | 6165 | ||
5291 | case PORT_FEATURE_LINK_SPEED_100M_HALF: | 6166 | case PORT_FEATURE_LINK_SPEED_100M_HALF: |
5292 | if (bp->supported & SUPPORTED_100baseT_Half) { | 6167 | if (bp->port.supported & SUPPORTED_100baseT_Half) { |
5293 | bp->link_params.req_line_speed = SPEED_100; | 6168 | bp->link_params.req_line_speed = SPEED_100; |
5294 | bp->link_params.req_duplex = DUPLEX_HALF; | 6169 | bp->link_params.req_duplex = DUPLEX_HALF; |
5295 | bp->advertising = (ADVERTISED_100baseT_Half | | 6170 | bp->port.advertising = (ADVERTISED_100baseT_Half | |
5296 | ADVERTISED_TP); | 6171 | ADVERTISED_TP); |
5297 | } else { | 6172 | } else { |
5298 | BNX2X_ERR("NVRAM config error. " | 6173 | BNX2X_ERR("NVRAM config error. " |
5299 | "Invalid link_config 0x%x" | 6174 | "Invalid link_config 0x%x" |
5300 | " speed_cap_mask 0x%x\n", | 6175 | " speed_cap_mask 0x%x\n", |
5301 | bp->link_config, | 6176 | bp->port.link_config, |
5302 | bp->link_params.speed_cap_mask); | 6177 | bp->link_params.speed_cap_mask); |
5303 | return; | 6178 | return; |
5304 | } | 6179 | } |
5305 | break; | 6180 | break; |
5306 | 6181 | ||
5307 | case PORT_FEATURE_LINK_SPEED_1G: | 6182 | case PORT_FEATURE_LINK_SPEED_1G: |
5308 | if (bp->supported & SUPPORTED_1000baseT_Full) { | 6183 | if (bp->port.supported & SUPPORTED_1000baseT_Full) { |
5309 | bp->link_params.req_line_speed = SPEED_1000; | 6184 | bp->link_params.req_line_speed = SPEED_1000; |
5310 | bp->advertising = (ADVERTISED_1000baseT_Full | | 6185 | bp->port.advertising = (ADVERTISED_1000baseT_Full | |
5311 | ADVERTISED_TP); | 6186 | ADVERTISED_TP); |
5312 | } else { | 6187 | } else { |
5313 | BNX2X_ERR("NVRAM config error. " | 6188 | BNX2X_ERR("NVRAM config error. " |
5314 | "Invalid link_config 0x%x" | 6189 | "Invalid link_config 0x%x" |
5315 | " speed_cap_mask 0x%x\n", | 6190 | " speed_cap_mask 0x%x\n", |
5316 | bp->link_config, | 6191 | bp->port.link_config, |
5317 | bp->link_params.speed_cap_mask); | 6192 | bp->link_params.speed_cap_mask); |
5318 | return; | 6193 | return; |
5319 | } | 6194 | } |
5320 | break; | 6195 | break; |
5321 | 6196 | ||
5322 | case PORT_FEATURE_LINK_SPEED_2_5G: | 6197 | case PORT_FEATURE_LINK_SPEED_2_5G: |
5323 | if (bp->supported & SUPPORTED_2500baseX_Full) { | 6198 | if (bp->port.supported & SUPPORTED_2500baseX_Full) { |
5324 | bp->link_params.req_line_speed = SPEED_2500; | 6199 | bp->link_params.req_line_speed = SPEED_2500; |
5325 | bp->advertising = (ADVERTISED_2500baseX_Full | | 6200 | bp->port.advertising = (ADVERTISED_2500baseX_Full | |
5326 | ADVERTISED_TP); | 6201 | ADVERTISED_TP); |
5327 | } else { | 6202 | } else { |
5328 | BNX2X_ERR("NVRAM config error. " | 6203 | BNX2X_ERR("NVRAM config error. " |
5329 | "Invalid link_config 0x%x" | 6204 | "Invalid link_config 0x%x" |
5330 | " speed_cap_mask 0x%x\n", | 6205 | " speed_cap_mask 0x%x\n", |
5331 | bp->link_config, | 6206 | bp->port.link_config, |
5332 | bp->link_params.speed_cap_mask); | 6207 | bp->link_params.speed_cap_mask); |
5333 | return; | 6208 | return; |
5334 | } | 6209 | } |
@@ -5337,15 +6212,15 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) | |||
5337 | case PORT_FEATURE_LINK_SPEED_10G_CX4: | 6212 | case PORT_FEATURE_LINK_SPEED_10G_CX4: |
5338 | case PORT_FEATURE_LINK_SPEED_10G_KX4: | 6213 | case PORT_FEATURE_LINK_SPEED_10G_KX4: |
5339 | case PORT_FEATURE_LINK_SPEED_10G_KR: | 6214 | case PORT_FEATURE_LINK_SPEED_10G_KR: |
5340 | if (bp->supported & SUPPORTED_10000baseT_Full) { | 6215 | if (bp->port.supported & SUPPORTED_10000baseT_Full) { |
5341 | bp->link_params.req_line_speed = SPEED_10000; | 6216 | bp->link_params.req_line_speed = SPEED_10000; |
5342 | bp->advertising = (ADVERTISED_10000baseT_Full | | 6217 | bp->port.advertising = (ADVERTISED_10000baseT_Full | |
5343 | ADVERTISED_FIBRE); | 6218 | ADVERTISED_FIBRE); |
5344 | } else { | 6219 | } else { |
5345 | BNX2X_ERR("NVRAM config error. " | 6220 | BNX2X_ERR("NVRAM config error. " |
5346 | "Invalid link_config 0x%x" | 6221 | "Invalid link_config 0x%x" |
5347 | " speed_cap_mask 0x%x\n", | 6222 | " speed_cap_mask 0x%x\n", |
5348 | bp->link_config, | 6223 | bp->port.link_config, |
5349 | bp->link_params.speed_cap_mask); | 6224 | bp->link_params.speed_cap_mask); |
5350 | return; | 6225 | return; |
5351 | } | 6226 | } |
@@ -5354,64 +6229,33 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) | |||
5354 | default: | 6229 | default: |
5355 | BNX2X_ERR("NVRAM config error. " | 6230 | BNX2X_ERR("NVRAM config error. " |
5356 | "BAD link speed link_config 0x%x\n", | 6231 | "BAD link speed link_config 0x%x\n", |
5357 | bp->link_config); | 6232 | bp->port.link_config); |
5358 | bp->link_params.req_line_speed = SPEED_AUTO_NEG; | 6233 | bp->link_params.req_line_speed = SPEED_AUTO_NEG; |
5359 | bp->advertising = bp->supported; | 6234 | bp->port.advertising = bp->port.supported; |
5360 | break; | 6235 | break; |
5361 | } | 6236 | } |
5362 | 6237 | ||
5363 | bp->link_params.req_flow_ctrl = (bp->link_config & | 6238 | bp->link_params.req_flow_ctrl = (bp->port.link_config & |
5364 | PORT_FEATURE_FLOW_CONTROL_MASK); | 6239 | PORT_FEATURE_FLOW_CONTROL_MASK); |
5365 | if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) && | 6240 | if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) && |
5366 | (!bp->supported & SUPPORTED_Autoneg)) | 6241 | (!bp->port.supported & SUPPORTED_Autoneg)) |
5367 | bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE; | 6242 | bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE; |
5368 | 6243 | ||
5369 | BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x" | 6244 | BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x" |
5370 | " advertising 0x%x\n", | 6245 | " advertising 0x%x\n", |
5371 | bp->link_params.req_line_speed, | 6246 | bp->link_params.req_line_speed, |
5372 | bp->link_params.req_duplex, | 6247 | bp->link_params.req_duplex, |
5373 | bp->link_params.req_flow_ctrl, bp->advertising); | 6248 | bp->link_params.req_flow_ctrl, bp->port.advertising); |
5374 | } | 6249 | } |
5375 | 6250 | ||
5376 | static void bnx2x_get_hwinfo(struct bnx2x *bp) | 6251 | static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) |
5377 | { | 6252 | { |
5378 | u32 val, val2, val3, val4, id; | 6253 | int port = BP_PORT(bp); |
5379 | int port = bp->port; | 6254 | u32 val, val2; |
5380 | |||
5381 | bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); | ||
5382 | BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base); | ||
5383 | |||
5384 | /* Get the chip revision id and number. */ | ||
5385 | /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ | ||
5386 | val = REG_RD(bp, MISC_REG_CHIP_NUM); | ||
5387 | id = ((val & 0xffff) << 16); | ||
5388 | val = REG_RD(bp, MISC_REG_CHIP_REV); | ||
5389 | id |= ((val & 0xf) << 12); | ||
5390 | val = REG_RD(bp, MISC_REG_CHIP_METAL); | ||
5391 | id |= ((val & 0xff) << 4); | ||
5392 | REG_RD(bp, MISC_REG_BOND_ID); | ||
5393 | id |= (val & 0xf); | ||
5394 | bp->chip_id = id; | ||
5395 | BNX2X_DEV_INFO("chip ID is %x\n", id); | ||
5396 | 6255 | ||
5397 | bp->link_params.bp = bp; | 6256 | bp->link_params.bp = bp; |
6257 | bp->link_params.port = port; | ||
5398 | 6258 | ||
5399 | if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) { | ||
5400 | BNX2X_DEV_INFO("MCP not active\n"); | ||
5401 | nomcp = 1; | ||
5402 | goto set_mac; | ||
5403 | } | ||
5404 | |||
5405 | val = SHMEM_RD(bp, validity_map[port]); | ||
5406 | if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) | ||
5407 | != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) | ||
5408 | BNX2X_ERR("BAD MCP validity signature\n"); | ||
5409 | |||
5410 | bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) & | ||
5411 | DRV_MSG_SEQ_NUMBER_MASK); | ||
5412 | |||
5413 | bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); | ||
5414 | bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board); | ||
5415 | bp->link_params.serdes_config = | 6259 | bp->link_params.serdes_config = |
5416 | SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config); | 6260 | SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config); |
5417 | bp->link_params.lane_config = | 6261 | bp->link_params.lane_config = |
@@ -5423,19 +6267,18 @@ static void bnx2x_get_hwinfo(struct bnx2x *bp) | |||
5423 | SHMEM_RD(bp, | 6267 | SHMEM_RD(bp, |
5424 | dev_info.port_hw_config[port].speed_capability_mask); | 6268 | dev_info.port_hw_config[port].speed_capability_mask); |
5425 | 6269 | ||
5426 | bp->link_config = | 6270 | bp->port.link_config = |
5427 | SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); | 6271 | SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); |
5428 | 6272 | ||
5429 | BNX2X_DEV_INFO("serdes_config (%08x) lane_config (%08x)\n" | 6273 | BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n" |
5430 | KERN_INFO " ext_phy_config (%08x) speed_cap_mask (%08x)" | 6274 | KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x" |
5431 | " link_config (%08x)\n", | 6275 | " link_config 0x%08x\n", |
5432 | bp->link_params.serdes_config, | 6276 | bp->link_params.serdes_config, |
5433 | bp->link_params.lane_config, | 6277 | bp->link_params.lane_config, |
5434 | bp->link_params.ext_phy_config, | 6278 | bp->link_params.ext_phy_config, |
5435 | bp->link_params.speed_cap_mask, | 6279 | bp->link_params.speed_cap_mask, bp->port.link_config); |
5436 | bp->link_config); | ||
5437 | 6280 | ||
5438 | bp->link_params.switch_cfg = (bp->link_config & | 6281 | bp->link_params.switch_cfg = (bp->port.link_config & |
5439 | PORT_FEATURE_CONNECTED_SWITCH_MASK); | 6282 | PORT_FEATURE_CONNECTED_SWITCH_MASK); |
5440 | bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); | 6283 | bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); |
5441 | 6284 | ||
@@ -5451,43 +6294,126 @@ static void bnx2x_get_hwinfo(struct bnx2x *bp) | |||
5451 | bp->dev->dev_addr[5] = (u8)(val & 0xff); | 6294 | bp->dev->dev_addr[5] = (u8)(val & 0xff); |
5452 | memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); | 6295 | memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); |
5453 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); | 6296 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); |
6297 | } | ||
5454 | 6298 | ||
6299 | static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | ||
6300 | { | ||
6301 | int func = BP_FUNC(bp); | ||
6302 | u32 val, val2; | ||
6303 | int rc = 0; | ||
5455 | 6304 | ||
6305 | bnx2x_get_common_hwinfo(bp); | ||
5456 | 6306 | ||
5457 | val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); | 6307 | bp->e1hov = 0; |
5458 | val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); | 6308 | bp->e1hmf = 0; |
5459 | val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); | 6309 | if (CHIP_IS_E1H(bp)) { |
5460 | val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); | 6310 | bp->mf_config = |
6311 | SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); | ||
5461 | 6312 | ||
5462 | printk(KERN_INFO PFX "part number %X-%X-%X-%X\n", | 6313 | val = |
5463 | val, val2, val3, val4); | 6314 | (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) & |
6315 | FUNC_MF_CFG_E1HOV_TAG_MASK); | ||
6316 | if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { | ||
5464 | 6317 | ||
5465 | /* bc ver */ | 6318 | bp->e1hov = val; |
5466 | if (!nomcp) { | 6319 | bp->e1hmf = 1; |
5467 | bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8); | 6320 | BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d " |
5468 | BNX2X_DEV_INFO("bc_ver %X\n", val); | 6321 | "(0x%04x)\n", |
5469 | if (val < BNX2X_BC_VER) { | 6322 | func, bp->e1hov, bp->e1hov); |
5470 | /* for now only warn | 6323 | } else { |
5471 | * later we might need to enforce this */ | 6324 | BNX2X_DEV_INFO("Single function mode\n"); |
5472 | BNX2X_ERR("This driver needs bc_ver %X but found %X," | 6325 | if (BP_E1HVN(bp)) { |
5473 | " please upgrade BC\n", BNX2X_BC_VER, val); | 6326 | BNX2X_ERR("!!! No valid E1HOV for func %d," |
6327 | " aborting\n", func); | ||
6328 | rc = -EPERM; | ||
6329 | } | ||
5474 | } | 6330 | } |
5475 | } else { | ||
5476 | bp->bc_ver = 0; | ||
5477 | } | 6331 | } |
5478 | 6332 | ||
5479 | val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); | 6333 | if (!BP_NOMCP(bp)) { |
5480 | bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); | 6334 | bnx2x_get_port_hwinfo(bp); |
5481 | BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", | 6335 | |
5482 | bp->flash_size, bp->flash_size); | 6336 | bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) & |
6337 | DRV_MSG_SEQ_NUMBER_MASK); | ||
6338 | BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); | ||
6339 | } | ||
6340 | |||
6341 | if (IS_E1HMF(bp)) { | ||
6342 | val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper); | ||
6343 | val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower); | ||
6344 | if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && | ||
6345 | (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) { | ||
6346 | bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff); | ||
6347 | bp->dev->dev_addr[1] = (u8)(val2 & 0xff); | ||
6348 | bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff); | ||
6349 | bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff); | ||
6350 | bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff); | ||
6351 | bp->dev->dev_addr[5] = (u8)(val & 0xff); | ||
6352 | memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, | ||
6353 | ETH_ALEN); | ||
6354 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, | ||
6355 | ETH_ALEN); | ||
6356 | } | ||
5483 | 6357 | ||
5484 | return; | 6358 | return rc; |
6359 | } | ||
6360 | |||
6361 | if (BP_NOMCP(bp)) { | ||
6362 | /* only supposed to happen on emulation/FPGA */ | ||
6363 | BNX2X_ERR("warning rendom MAC workaround active\n"); | ||
6364 | random_ether_addr(bp->dev->dev_addr); | ||
6365 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); | ||
6366 | } | ||
6367 | |||
6368 | return rc; | ||
6369 | } | ||
6370 | |||
6371 | static int __devinit bnx2x_init_bp(struct bnx2x *bp) | ||
6372 | { | ||
6373 | int func = BP_FUNC(bp); | ||
6374 | int rc; | ||
6375 | |||
6376 | if (nomcp) | ||
6377 | bp->flags |= NO_MCP_FLAG; | ||
6378 | |||
6379 | mutex_init(&bp->port.phy_mutex); | ||
6380 | |||
6381 | INIT_WORK(&bp->sp_task, bnx2x_sp_task); | ||
6382 | INIT_WORK(&bp->reset_task, bnx2x_reset_task); | ||
6383 | |||
6384 | rc = bnx2x_get_hwinfo(bp); | ||
6385 | |||
6386 | /* need to reset chip if undi was active */ | ||
6387 | if (!BP_NOMCP(bp)) | ||
6388 | bnx2x_undi_unload(bp); | ||
6389 | |||
6390 | if (CHIP_REV_IS_FPGA(bp)) | ||
6391 | printk(KERN_ERR PFX "FPGA detected\n"); | ||
5485 | 6392 | ||
5486 | set_mac: /* only supposed to happen on emulation/FPGA */ | 6393 | if (BP_NOMCP(bp) && (func == 0)) |
5487 | BNX2X_ERR("warning rendom MAC workaround active\n"); | 6394 | printk(KERN_ERR PFX |
5488 | random_ether_addr(bp->dev->dev_addr); | 6395 | "MCP disabled, must load devices in order!\n"); |
5489 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6); | ||
5490 | 6396 | ||
6397 | bp->tx_ring_size = MAX_TX_AVAIL; | ||
6398 | bp->rx_ring_size = MAX_RX_AVAIL; | ||
6399 | |||
6400 | bp->rx_csum = 1; | ||
6401 | bp->rx_offset = 0; | ||
6402 | |||
6403 | bp->tx_ticks = 50; | ||
6404 | bp->rx_ticks = 25; | ||
6405 | |||
6406 | bp->stats_ticks = 1000000 & 0xffff00; | ||
6407 | |||
6408 | bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); | ||
6409 | bp->current_interval = (poll ? poll : bp->timer_interval); | ||
6410 | |||
6411 | init_timer(&bp->timer); | ||
6412 | bp->timer.expires = jiffies + bp->current_interval; | ||
6413 | bp->timer.data = (unsigned long) bp; | ||
6414 | bp->timer.function = bnx2x_timer; | ||
6415 | |||
6416 | return rc; | ||
5491 | } | 6417 | } |
5492 | 6418 | ||
5493 | /* | 6419 | /* |
@@ -5500,8 +6426,8 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5500 | { | 6426 | { |
5501 | struct bnx2x *bp = netdev_priv(dev); | 6427 | struct bnx2x *bp = netdev_priv(dev); |
5502 | 6428 | ||
5503 | cmd->supported = bp->supported; | 6429 | cmd->supported = bp->port.supported; |
5504 | cmd->advertising = bp->advertising; | 6430 | cmd->advertising = bp->port.advertising; |
5505 | 6431 | ||
5506 | if (netif_carrier_ok(dev)) { | 6432 | if (netif_carrier_ok(dev)) { |
5507 | cmd->speed = bp->link_vars.line_speed; | 6433 | cmd->speed = bp->link_vars.line_speed; |
@@ -5510,6 +6436,14 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5510 | cmd->speed = bp->link_params.req_line_speed; | 6436 | cmd->speed = bp->link_params.req_line_speed; |
5511 | cmd->duplex = bp->link_params.req_duplex; | 6437 | cmd->duplex = bp->link_params.req_duplex; |
5512 | } | 6438 | } |
6439 | if (IS_E1HMF(bp)) { | ||
6440 | u16 vn_max_rate; | ||
6441 | |||
6442 | vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> | ||
6443 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; | ||
6444 | if (vn_max_rate < cmd->speed) | ||
6445 | cmd->speed = vn_max_rate; | ||
6446 | } | ||
5513 | 6447 | ||
5514 | if (bp->link_params.switch_cfg == SWITCH_CFG_10G) { | 6448 | if (bp->link_params.switch_cfg == SWITCH_CFG_10G) { |
5515 | u32 ext_phy_type = | 6449 | u32 ext_phy_type = |
@@ -5541,7 +6475,7 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5541 | } else | 6475 | } else |
5542 | cmd->port = PORT_TP; | 6476 | cmd->port = PORT_TP; |
5543 | 6477 | ||
5544 | cmd->phy_address = bp->phy_addr; | 6478 | cmd->phy_address = bp->port.phy_addr; |
5545 | cmd->transceiver = XCVR_INTERNAL; | 6479 | cmd->transceiver = XCVR_INTERNAL; |
5546 | 6480 | ||
5547 | if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) | 6481 | if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) |
@@ -5568,6 +6502,9 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5568 | struct bnx2x *bp = netdev_priv(dev); | 6502 | struct bnx2x *bp = netdev_priv(dev); |
5569 | u32 advertising; | 6503 | u32 advertising; |
5570 | 6504 | ||
6505 | if (IS_E1HMF(bp)) | ||
6506 | return 0; | ||
6507 | |||
5571 | DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" | 6508 | DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" |
5572 | DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" | 6509 | DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" |
5573 | DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" | 6510 | DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" |
@@ -5577,24 +6514,25 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5577 | cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); | 6514 | cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); |
5578 | 6515 | ||
5579 | if (cmd->autoneg == AUTONEG_ENABLE) { | 6516 | if (cmd->autoneg == AUTONEG_ENABLE) { |
5580 | if (!(bp->supported & SUPPORTED_Autoneg)) { | 6517 | if (!(bp->port.supported & SUPPORTED_Autoneg)) { |
5581 | DP(NETIF_MSG_LINK, "Aotoneg not supported\n"); | 6518 | DP(NETIF_MSG_LINK, "Autoneg not supported\n"); |
5582 | return -EINVAL; | 6519 | return -EINVAL; |
5583 | } | 6520 | } |
5584 | 6521 | ||
5585 | /* advertise the requested speed and duplex if supported */ | 6522 | /* advertise the requested speed and duplex if supported */ |
5586 | cmd->advertising &= bp->supported; | 6523 | cmd->advertising &= bp->port.supported; |
5587 | 6524 | ||
5588 | bp->link_params.req_line_speed = SPEED_AUTO_NEG; | 6525 | bp->link_params.req_line_speed = SPEED_AUTO_NEG; |
5589 | bp->link_params.req_duplex = DUPLEX_FULL; | 6526 | bp->link_params.req_duplex = DUPLEX_FULL; |
5590 | bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising); | 6527 | bp->port.advertising |= (ADVERTISED_Autoneg | |
6528 | cmd->advertising); | ||
5591 | 6529 | ||
5592 | } else { /* forced speed */ | 6530 | } else { /* forced speed */ |
5593 | /* advertise the requested speed and duplex if supported */ | 6531 | /* advertise the requested speed and duplex if supported */ |
5594 | switch (cmd->speed) { | 6532 | switch (cmd->speed) { |
5595 | case SPEED_10: | 6533 | case SPEED_10: |
5596 | if (cmd->duplex == DUPLEX_FULL) { | 6534 | if (cmd->duplex == DUPLEX_FULL) { |
5597 | if (!(bp->supported & | 6535 | if (!(bp->port.supported & |
5598 | SUPPORTED_10baseT_Full)) { | 6536 | SUPPORTED_10baseT_Full)) { |
5599 | DP(NETIF_MSG_LINK, | 6537 | DP(NETIF_MSG_LINK, |
5600 | "10M full not supported\n"); | 6538 | "10M full not supported\n"); |
@@ -5604,7 +6542,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5604 | advertising = (ADVERTISED_10baseT_Full | | 6542 | advertising = (ADVERTISED_10baseT_Full | |
5605 | ADVERTISED_TP); | 6543 | ADVERTISED_TP); |
5606 | } else { | 6544 | } else { |
5607 | if (!(bp->supported & | 6545 | if (!(bp->port.supported & |
5608 | SUPPORTED_10baseT_Half)) { | 6546 | SUPPORTED_10baseT_Half)) { |
5609 | DP(NETIF_MSG_LINK, | 6547 | DP(NETIF_MSG_LINK, |
5610 | "10M half not supported\n"); | 6548 | "10M half not supported\n"); |
@@ -5618,7 +6556,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5618 | 6556 | ||
5619 | case SPEED_100: | 6557 | case SPEED_100: |
5620 | if (cmd->duplex == DUPLEX_FULL) { | 6558 | if (cmd->duplex == DUPLEX_FULL) { |
5621 | if (!(bp->supported & | 6559 | if (!(bp->port.supported & |
5622 | SUPPORTED_100baseT_Full)) { | 6560 | SUPPORTED_100baseT_Full)) { |
5623 | DP(NETIF_MSG_LINK, | 6561 | DP(NETIF_MSG_LINK, |
5624 | "100M full not supported\n"); | 6562 | "100M full not supported\n"); |
@@ -5628,7 +6566,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5628 | advertising = (ADVERTISED_100baseT_Full | | 6566 | advertising = (ADVERTISED_100baseT_Full | |
5629 | ADVERTISED_TP); | 6567 | ADVERTISED_TP); |
5630 | } else { | 6568 | } else { |
5631 | if (!(bp->supported & | 6569 | if (!(bp->port.supported & |
5632 | SUPPORTED_100baseT_Half)) { | 6570 | SUPPORTED_100baseT_Half)) { |
5633 | DP(NETIF_MSG_LINK, | 6571 | DP(NETIF_MSG_LINK, |
5634 | "100M half not supported\n"); | 6572 | "100M half not supported\n"); |
@@ -5646,7 +6584,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5646 | return -EINVAL; | 6584 | return -EINVAL; |
5647 | } | 6585 | } |
5648 | 6586 | ||
5649 | if (!(bp->supported & SUPPORTED_1000baseT_Full)) { | 6587 | if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) { |
5650 | DP(NETIF_MSG_LINK, "1G full not supported\n"); | 6588 | DP(NETIF_MSG_LINK, "1G full not supported\n"); |
5651 | return -EINVAL; | 6589 | return -EINVAL; |
5652 | } | 6590 | } |
@@ -5662,7 +6600,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5662 | return -EINVAL; | 6600 | return -EINVAL; |
5663 | } | 6601 | } |
5664 | 6602 | ||
5665 | if (!(bp->supported & SUPPORTED_2500baseX_Full)) { | 6603 | if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) { |
5666 | DP(NETIF_MSG_LINK, | 6604 | DP(NETIF_MSG_LINK, |
5667 | "2.5G full not supported\n"); | 6605 | "2.5G full not supported\n"); |
5668 | return -EINVAL; | 6606 | return -EINVAL; |
@@ -5678,7 +6616,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5678 | return -EINVAL; | 6616 | return -EINVAL; |
5679 | } | 6617 | } |
5680 | 6618 | ||
5681 | if (!(bp->supported & SUPPORTED_10000baseT_Full)) { | 6619 | if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) { |
5682 | DP(NETIF_MSG_LINK, "10G full not supported\n"); | 6620 | DP(NETIF_MSG_LINK, "10G full not supported\n"); |
5683 | return -EINVAL; | 6621 | return -EINVAL; |
5684 | } | 6622 | } |
@@ -5694,16 +6632,18 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5694 | 6632 | ||
5695 | bp->link_params.req_line_speed = cmd->speed; | 6633 | bp->link_params.req_line_speed = cmd->speed; |
5696 | bp->link_params.req_duplex = cmd->duplex; | 6634 | bp->link_params.req_duplex = cmd->duplex; |
5697 | bp->advertising = advertising; | 6635 | bp->port.advertising = advertising; |
5698 | } | 6636 | } |
5699 | 6637 | ||
5700 | DP(NETIF_MSG_LINK, "req_line_speed %d\n" | 6638 | DP(NETIF_MSG_LINK, "req_line_speed %d\n" |
5701 | DP_LEVEL " req_duplex %d advertising 0x%x\n", | 6639 | DP_LEVEL " req_duplex %d advertising 0x%x\n", |
5702 | bp->link_params.req_line_speed, bp->link_params.req_duplex, | 6640 | bp->link_params.req_line_speed, bp->link_params.req_duplex, |
5703 | bp->advertising); | 6641 | bp->port.advertising); |
5704 | 6642 | ||
5705 | bnx2x_stop_stats(bp); | 6643 | if (netif_running(dev)) { |
5706 | bnx2x_link_set(bp); | 6644 | bnx2x_stop_stats(bp); |
6645 | bnx2x_link_set(bp); | ||
6646 | } | ||
5707 | 6647 | ||
5708 | return 0; | 6648 | return 0; |
5709 | } | 6649 | } |
@@ -5720,21 +6660,23 @@ static void bnx2x_get_drvinfo(struct net_device *dev, | |||
5720 | strcpy(info->version, DRV_MODULE_VERSION); | 6660 | strcpy(info->version, DRV_MODULE_VERSION); |
5721 | 6661 | ||
5722 | phy_fw_ver[0] = '\0'; | 6662 | phy_fw_ver[0] = '\0'; |
5723 | bnx2x_phy_hw_lock(bp); | 6663 | if (bp->port.pmf) { |
5724 | bnx2x_get_ext_phy_fw_version(&bp->link_params, | 6664 | bnx2x_phy_hw_lock(bp); |
5725 | (bp->state != BNX2X_STATE_CLOSED), | 6665 | bnx2x_get_ext_phy_fw_version(&bp->link_params, |
5726 | phy_fw_ver, PHY_FW_VER_LEN); | 6666 | (bp->state != BNX2X_STATE_CLOSED), |
5727 | bnx2x_phy_hw_unlock(bp); | 6667 | phy_fw_ver, PHY_FW_VER_LEN); |
6668 | bnx2x_phy_hw_unlock(bp); | ||
6669 | } | ||
5728 | 6670 | ||
5729 | snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s", | 6671 | snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s", |
5730 | BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, | 6672 | BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, |
5731 | BCM_5710_FW_REVISION_VERSION, | 6673 | BCM_5710_FW_REVISION_VERSION, |
5732 | BCM_5710_FW_COMPILE_FLAGS, bp->bc_ver, | 6674 | BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver, |
5733 | ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver); | 6675 | ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver); |
5734 | strcpy(info->bus_info, pci_name(bp->pdev)); | 6676 | strcpy(info->bus_info, pci_name(bp->pdev)); |
5735 | info->n_stats = BNX2X_NUM_STATS; | 6677 | info->n_stats = BNX2X_NUM_STATS; |
5736 | info->testinfo_len = BNX2X_NUM_TESTS; | 6678 | info->testinfo_len = BNX2X_NUM_TESTS; |
5737 | info->eedump_len = bp->flash_size; | 6679 | info->eedump_len = bp->common.flash_size; |
5738 | info->regdump_len = 0; | 6680 | info->regdump_len = 0; |
5739 | } | 6681 | } |
5740 | 6682 | ||
@@ -5767,9 +6709,9 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
5767 | return -EINVAL; | 6709 | return -EINVAL; |
5768 | 6710 | ||
5769 | bp->wol = 1; | 6711 | bp->wol = 1; |
5770 | } else { | 6712 | } else |
5771 | bp->wol = 0; | 6713 | bp->wol = 0; |
5772 | } | 6714 | |
5773 | return 0; | 6715 | return 0; |
5774 | } | 6716 | } |
5775 | 6717 | ||
@@ -5792,13 +6734,13 @@ static int bnx2x_nway_reset(struct net_device *dev) | |||
5792 | { | 6734 | { |
5793 | struct bnx2x *bp = netdev_priv(dev); | 6735 | struct bnx2x *bp = netdev_priv(dev); |
5794 | 6736 | ||
5795 | if (bp->state != BNX2X_STATE_OPEN) { | 6737 | if (!bp->port.pmf) |
5796 | DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state); | 6738 | return 0; |
5797 | return -EAGAIN; | ||
5798 | } | ||
5799 | 6739 | ||
5800 | bnx2x_stop_stats(bp); | 6740 | if (netif_running(dev)) { |
5801 | bnx2x_link_set(bp); | 6741 | bnx2x_stop_stats(bp); |
6742 | bnx2x_link_set(bp); | ||
6743 | } | ||
5802 | 6744 | ||
5803 | return 0; | 6745 | return 0; |
5804 | } | 6746 | } |
@@ -5807,12 +6749,12 @@ static int bnx2x_get_eeprom_len(struct net_device *dev) | |||
5807 | { | 6749 | { |
5808 | struct bnx2x *bp = netdev_priv(dev); | 6750 | struct bnx2x *bp = netdev_priv(dev); |
5809 | 6751 | ||
5810 | return bp->flash_size; | 6752 | return bp->common.flash_size; |
5811 | } | 6753 | } |
5812 | 6754 | ||
5813 | static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) | 6755 | static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) |
5814 | { | 6756 | { |
5815 | int port = bp->port; | 6757 | int port = BP_PORT(bp); |
5816 | int count, i; | 6758 | int count, i; |
5817 | u32 val = 0; | 6759 | u32 val = 0; |
5818 | 6760 | ||
@@ -5834,7 +6776,7 @@ static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) | |||
5834 | } | 6776 | } |
5835 | 6777 | ||
5836 | if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { | 6778 | if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { |
5837 | DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n"); | 6779 | DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n"); |
5838 | return -EBUSY; | 6780 | return -EBUSY; |
5839 | } | 6781 | } |
5840 | 6782 | ||
@@ -5843,7 +6785,7 @@ static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) | |||
5843 | 6785 | ||
5844 | static int bnx2x_release_nvram_lock(struct bnx2x *bp) | 6786 | static int bnx2x_release_nvram_lock(struct bnx2x *bp) |
5845 | { | 6787 | { |
5846 | int port = bp->port; | 6788 | int port = BP_PORT(bp); |
5847 | int count, i; | 6789 | int count, i; |
5848 | u32 val = 0; | 6790 | u32 val = 0; |
5849 | 6791 | ||
@@ -5865,7 +6807,7 @@ static int bnx2x_release_nvram_lock(struct bnx2x *bp) | |||
5865 | } | 6807 | } |
5866 | 6808 | ||
5867 | if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { | 6809 | if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { |
5868 | DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n"); | 6810 | DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n"); |
5869 | return -EBUSY; | 6811 | return -EBUSY; |
5870 | } | 6812 | } |
5871 | 6813 | ||
@@ -5929,7 +6871,6 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val, | |||
5929 | 6871 | ||
5930 | if (val & MCPR_NVM_COMMAND_DONE) { | 6872 | if (val & MCPR_NVM_COMMAND_DONE) { |
5931 | val = REG_RD(bp, MCP_REG_MCPR_NVM_READ); | 6873 | val = REG_RD(bp, MCP_REG_MCPR_NVM_READ); |
5932 | DP(NETIF_MSG_NVM, "val 0x%08x\n", val); | ||
5933 | /* we read nvram data in cpu order | 6874 | /* we read nvram data in cpu order |
5934 | * but ethtool sees it as an array of bytes | 6875 | * but ethtool sees it as an array of bytes |
5935 | * converting to big-endian will do the work */ | 6876 | * converting to big-endian will do the work */ |
@@ -5951,16 +6892,16 @@ static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, | |||
5951 | u32 val; | 6892 | u32 val; |
5952 | 6893 | ||
5953 | if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { | 6894 | if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { |
5954 | DP(NETIF_MSG_NVM, | 6895 | DP(BNX2X_MSG_NVM, |
5955 | "Invalid parameter: offset 0x%x buf_size 0x%x\n", | 6896 | "Invalid parameter: offset 0x%x buf_size 0x%x\n", |
5956 | offset, buf_size); | 6897 | offset, buf_size); |
5957 | return -EINVAL; | 6898 | return -EINVAL; |
5958 | } | 6899 | } |
5959 | 6900 | ||
5960 | if (offset + buf_size > bp->flash_size) { | 6901 | if (offset + buf_size > bp->common.flash_size) { |
5961 | DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +" | 6902 | DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" |
5962 | " buf_size (0x%x) > flash_size (0x%x)\n", | 6903 | " buf_size (0x%x) > flash_size (0x%x)\n", |
5963 | offset, buf_size, bp->flash_size); | 6904 | offset, buf_size, bp->common.flash_size); |
5964 | return -EINVAL; | 6905 | return -EINVAL; |
5965 | } | 6906 | } |
5966 | 6907 | ||
@@ -6004,7 +6945,7 @@ static int bnx2x_get_eeprom(struct net_device *dev, | |||
6004 | struct bnx2x *bp = netdev_priv(dev); | 6945 | struct bnx2x *bp = netdev_priv(dev); |
6005 | int rc; | 6946 | int rc; |
6006 | 6947 | ||
6007 | DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n" | 6948 | DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" |
6008 | DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", | 6949 | DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", |
6009 | eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, | 6950 | eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, |
6010 | eeprom->len, eeprom->len); | 6951 | eeprom->len, eeprom->len); |
@@ -6066,10 +7007,10 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, | |||
6066 | u32 align_offset; | 7007 | u32 align_offset; |
6067 | u32 val; | 7008 | u32 val; |
6068 | 7009 | ||
6069 | if (offset + buf_size > bp->flash_size) { | 7010 | if (offset + buf_size > bp->common.flash_size) { |
6070 | DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +" | 7011 | DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" |
6071 | " buf_size (0x%x) > flash_size (0x%x)\n", | 7012 | " buf_size (0x%x) > flash_size (0x%x)\n", |
6072 | offset, buf_size, bp->flash_size); | 7013 | offset, buf_size, bp->common.flash_size); |
6073 | return -EINVAL; | 7014 | return -EINVAL; |
6074 | } | 7015 | } |
6075 | 7016 | ||
@@ -6093,8 +7034,6 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, | |||
6093 | * convert it back to cpu order */ | 7034 | * convert it back to cpu order */ |
6094 | val = be32_to_cpu(val); | 7035 | val = be32_to_cpu(val); |
6095 | 7036 | ||
6096 | DP(NETIF_MSG_NVM, "val 0x%08x\n", val); | ||
6097 | |||
6098 | rc = bnx2x_nvram_write_dword(bp, align_offset, val, | 7037 | rc = bnx2x_nvram_write_dword(bp, align_offset, val, |
6099 | cmd_flags); | 7038 | cmd_flags); |
6100 | } | 7039 | } |
@@ -6114,21 +7053,20 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, | |||
6114 | u32 val; | 7053 | u32 val; |
6115 | u32 written_so_far; | 7054 | u32 written_so_far; |
6116 | 7055 | ||
6117 | if (buf_size == 1) { /* ethtool */ | 7056 | if (buf_size == 1) /* ethtool */ |
6118 | return bnx2x_nvram_write1(bp, offset, data_buf, buf_size); | 7057 | return bnx2x_nvram_write1(bp, offset, data_buf, buf_size); |
6119 | } | ||
6120 | 7058 | ||
6121 | if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { | 7059 | if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { |
6122 | DP(NETIF_MSG_NVM, | 7060 | DP(BNX2X_MSG_NVM, |
6123 | "Invalid parameter: offset 0x%x buf_size 0x%x\n", | 7061 | "Invalid parameter: offset 0x%x buf_size 0x%x\n", |
6124 | offset, buf_size); | 7062 | offset, buf_size); |
6125 | return -EINVAL; | 7063 | return -EINVAL; |
6126 | } | 7064 | } |
6127 | 7065 | ||
6128 | if (offset + buf_size > bp->flash_size) { | 7066 | if (offset + buf_size > bp->common.flash_size) { |
6129 | DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +" | 7067 | DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" |
6130 | " buf_size (0x%x) > flash_size (0x%x)\n", | 7068 | " buf_size (0x%x) > flash_size (0x%x)\n", |
6131 | offset, buf_size, bp->flash_size); | 7069 | offset, buf_size, bp->common.flash_size); |
6132 | return -EINVAL; | 7070 | return -EINVAL; |
6133 | } | 7071 | } |
6134 | 7072 | ||
@@ -6151,7 +7089,6 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, | |||
6151 | cmd_flags |= MCPR_NVM_COMMAND_FIRST; | 7089 | cmd_flags |= MCPR_NVM_COMMAND_FIRST; |
6152 | 7090 | ||
6153 | memcpy(&val, data_buf, 4); | 7091 | memcpy(&val, data_buf, 4); |
6154 | DP(NETIF_MSG_NVM, "val 0x%08x\n", val); | ||
6155 | 7092 | ||
6156 | rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags); | 7093 | rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags); |
6157 | 7094 | ||
@@ -6175,7 +7112,7 @@ static int bnx2x_set_eeprom(struct net_device *dev, | |||
6175 | struct bnx2x *bp = netdev_priv(dev); | 7112 | struct bnx2x *bp = netdev_priv(dev); |
6176 | int rc; | 7113 | int rc; |
6177 | 7114 | ||
6178 | DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n" | 7115 | DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" |
6179 | DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", | 7116 | DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", |
6180 | eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, | 7117 | eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, |
6181 | eeprom->len, eeprom->len); | 7118 | eeprom->len, eeprom->len); |
@@ -6183,20 +7120,23 @@ static int bnx2x_set_eeprom(struct net_device *dev, | |||
6183 | /* parameters already validated in ethtool_set_eeprom */ | 7120 | /* parameters already validated in ethtool_set_eeprom */ |
6184 | 7121 | ||
6185 | /* If the magic number is PHY (0x00504859) upgrade the PHY FW */ | 7122 | /* If the magic number is PHY (0x00504859) upgrade the PHY FW */ |
6186 | if (eeprom->magic == 0x00504859) { | 7123 | if (eeprom->magic == 0x00504859) |
6187 | 7124 | if (bp->port.pmf) { | |
6188 | bnx2x_phy_hw_lock(bp); | 7125 | |
6189 | rc = bnx2x_flash_download(bp, bp->port, | 7126 | bnx2x_phy_hw_lock(bp); |
6190 | bp->link_params.ext_phy_config, | 7127 | rc = bnx2x_flash_download(bp, BP_PORT(bp), |
6191 | (bp->state != BNX2X_STATE_CLOSED), | 7128 | bp->link_params.ext_phy_config, |
6192 | eebuf, eeprom->len); | 7129 | (bp->state != BNX2X_STATE_CLOSED), |
6193 | rc |= bnx2x_link_reset(&bp->link_params, | 7130 | eebuf, eeprom->len); |
6194 | &bp->link_vars); | 7131 | rc |= bnx2x_link_reset(&bp->link_params, |
6195 | rc |= bnx2x_phy_init(&bp->link_params, | 7132 | &bp->link_vars); |
6196 | &bp->link_vars); | 7133 | rc |= bnx2x_phy_init(&bp->link_params, |
6197 | bnx2x_phy_hw_unlock(bp); | 7134 | &bp->link_vars); |
6198 | 7135 | bnx2x_phy_hw_unlock(bp); | |
6199 | } else | 7136 | |
7137 | } else /* Only the PMF can access the PHY */ | ||
7138 | return -EINVAL; | ||
7139 | else | ||
6200 | rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); | 7140 | rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); |
6201 | 7141 | ||
6202 | return rc; | 7142 | return rc; |
@@ -6234,7 +7174,7 @@ static int bnx2x_set_coalesce(struct net_device *dev, | |||
6234 | bp->stats_ticks = 0xffff00; | 7174 | bp->stats_ticks = 0xffff00; |
6235 | bp->stats_ticks &= 0xffff00; | 7175 | bp->stats_ticks &= 0xffff00; |
6236 | 7176 | ||
6237 | if (netif_running(bp->dev)) | 7177 | if (netif_running(dev)) |
6238 | bnx2x_update_coalesce(bp); | 7178 | bnx2x_update_coalesce(bp); |
6239 | 7179 | ||
6240 | return 0; | 7180 | return 0; |
@@ -6261,6 +7201,7 @@ static int bnx2x_set_ringparam(struct net_device *dev, | |||
6261 | struct ethtool_ringparam *ering) | 7201 | struct ethtool_ringparam *ering) |
6262 | { | 7202 | { |
6263 | struct bnx2x *bp = netdev_priv(dev); | 7203 | struct bnx2x *bp = netdev_priv(dev); |
7204 | int rc = 0; | ||
6264 | 7205 | ||
6265 | if ((ering->rx_pending > MAX_RX_AVAIL) || | 7206 | if ((ering->rx_pending > MAX_RX_AVAIL) || |
6266 | (ering->tx_pending > MAX_TX_AVAIL) || | 7207 | (ering->tx_pending > MAX_TX_AVAIL) || |
@@ -6270,12 +7211,12 @@ static int bnx2x_set_ringparam(struct net_device *dev, | |||
6270 | bp->rx_ring_size = ering->rx_pending; | 7211 | bp->rx_ring_size = ering->rx_pending; |
6271 | bp->tx_ring_size = ering->tx_pending; | 7212 | bp->tx_ring_size = ering->tx_pending; |
6272 | 7213 | ||
6273 | if (netif_running(bp->dev)) { | 7214 | if (netif_running(dev)) { |
6274 | bnx2x_nic_unload(bp, 0); | 7215 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); |
6275 | bnx2x_nic_load(bp, 0); | 7216 | rc = bnx2x_nic_load(bp, LOAD_NORMAL); |
6276 | } | 7217 | } |
6277 | 7218 | ||
6278 | return 0; | 7219 | return rc; |
6279 | } | 7220 | } |
6280 | 7221 | ||
6281 | static void bnx2x_get_pauseparam(struct net_device *dev, | 7222 | static void bnx2x_get_pauseparam(struct net_device *dev, |
@@ -6301,6 +7242,9 @@ static int bnx2x_set_pauseparam(struct net_device *dev, | |||
6301 | { | 7242 | { |
6302 | struct bnx2x *bp = netdev_priv(dev); | 7243 | struct bnx2x *bp = netdev_priv(dev); |
6303 | 7244 | ||
7245 | if (IS_E1HMF(bp)) | ||
7246 | return 0; | ||
7247 | |||
6304 | DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" | 7248 | DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" |
6305 | DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", | 7249 | DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", |
6306 | epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); | 7250 | epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); |
@@ -6317,7 +7261,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev, | |||
6317 | bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE; | 7261 | bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE; |
6318 | 7262 | ||
6319 | if (epause->autoneg) { | 7263 | if (epause->autoneg) { |
6320 | if (!(bp->supported & SUPPORTED_Autoneg)) { | 7264 | if (!(bp->port.supported & SUPPORTED_Autoneg)) { |
6321 | DP(NETIF_MSG_LINK, "Autoneg not supported\n"); | 7265 | DP(NETIF_MSG_LINK, "Autoneg not supported\n"); |
6322 | return -EINVAL; | 7266 | return -EINVAL; |
6323 | } | 7267 | } |
@@ -6328,8 +7272,11 @@ static int bnx2x_set_pauseparam(struct net_device *dev, | |||
6328 | 7272 | ||
6329 | DP(NETIF_MSG_LINK, | 7273 | DP(NETIF_MSG_LINK, |
6330 | "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl); | 7274 | "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl); |
6331 | bnx2x_stop_stats(bp); | 7275 | |
6332 | bnx2x_link_set(bp); | 7276 | if (netif_running(dev)) { |
7277 | bnx2x_stop_stats(bp); | ||
7278 | bnx2x_link_set(bp); | ||
7279 | } | ||
6333 | 7280 | ||
6334 | return 0; | 7281 | return 0; |
6335 | } | 7282 | } |
@@ -6531,18 +7478,25 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, | |||
6531 | static int bnx2x_phys_id(struct net_device *dev, u32 data) | 7478 | static int bnx2x_phys_id(struct net_device *dev, u32 data) |
6532 | { | 7479 | { |
6533 | struct bnx2x *bp = netdev_priv(dev); | 7480 | struct bnx2x *bp = netdev_priv(dev); |
7481 | int port = BP_PORT(bp); | ||
6534 | int i; | 7482 | int i; |
6535 | 7483 | ||
7484 | if (!netif_running(dev)) | ||
7485 | return 0; | ||
7486 | |||
7487 | if (!bp->port.pmf) | ||
7488 | return 0; | ||
7489 | |||
6536 | if (data == 0) | 7490 | if (data == 0) |
6537 | data = 2; | 7491 | data = 2; |
6538 | 7492 | ||
6539 | for (i = 0; i < (data * 2); i++) { | 7493 | for (i = 0; i < (data * 2); i++) { |
6540 | if ((i % 2) == 0) | 7494 | if ((i % 2) == 0) |
6541 | bnx2x_set_led(bp, bp->port, LED_MODE_OPER, SPEED_1000, | 7495 | bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000, |
6542 | bp->link_params.hw_led_mode, | 7496 | bp->link_params.hw_led_mode, |
6543 | bp->link_params.chip_id); | 7497 | bp->link_params.chip_id); |
6544 | else | 7498 | else |
6545 | bnx2x_set_led(bp, bp->port, LED_MODE_OFF, 0, | 7499 | bnx2x_set_led(bp, port, LED_MODE_OFF, 0, |
6546 | bp->link_params.hw_led_mode, | 7500 | bp->link_params.hw_led_mode, |
6547 | bp->link_params.chip_id); | 7501 | bp->link_params.chip_id); |
6548 | 7502 | ||
@@ -6552,7 +7506,7 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data) | |||
6552 | } | 7506 | } |
6553 | 7507 | ||
6554 | if (bp->link_vars.link_up) | 7508 | if (bp->link_vars.link_up) |
6555 | bnx2x_set_led(bp, bp->port, LED_MODE_OPER, | 7509 | bnx2x_set_led(bp, port, LED_MODE_OPER, |
6556 | bp->link_vars.line_speed, | 7510 | bp->link_vars.line_speed, |
6557 | bp->link_params.hw_led_mode, | 7511 | bp->link_params.hw_led_mode, |
6558 | bp->link_params.chip_id); | 7512 | bp->link_params.chip_id); |
@@ -6609,8 +7563,7 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) | |||
6609 | 7563 | ||
6610 | switch (state) { | 7564 | switch (state) { |
6611 | case PCI_D0: | 7565 | case PCI_D0: |
6612 | pci_write_config_word(bp->pdev, | 7566 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, |
6613 | bp->pm_cap + PCI_PM_CTRL, | ||
6614 | ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | | 7567 | ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | |
6615 | PCI_PM_CTRL_PME_STATUS)); | 7568 | PCI_PM_CTRL_PME_STATUS)); |
6616 | 7569 | ||
@@ -6644,82 +7597,6 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) | |||
6644 | * net_device service functions | 7597 | * net_device service functions |
6645 | */ | 7598 | */ |
6646 | 7599 | ||
6647 | /* called with netif_tx_lock from set_multicast */ | ||
6648 | static void bnx2x_set_rx_mode(struct net_device *dev) | ||
6649 | { | ||
6650 | struct bnx2x *bp = netdev_priv(dev); | ||
6651 | u32 rx_mode = BNX2X_RX_MODE_NORMAL; | ||
6652 | |||
6653 | DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags); | ||
6654 | |||
6655 | if (dev->flags & IFF_PROMISC) | ||
6656 | rx_mode = BNX2X_RX_MODE_PROMISC; | ||
6657 | |||
6658 | else if ((dev->flags & IFF_ALLMULTI) || | ||
6659 | (dev->mc_count > BNX2X_MAX_MULTICAST)) | ||
6660 | rx_mode = BNX2X_RX_MODE_ALLMULTI; | ||
6661 | |||
6662 | else { /* some multicasts */ | ||
6663 | int i, old, offset; | ||
6664 | struct dev_mc_list *mclist; | ||
6665 | struct mac_configuration_cmd *config = | ||
6666 | bnx2x_sp(bp, mcast_config); | ||
6667 | |||
6668 | for (i = 0, mclist = dev->mc_list; | ||
6669 | mclist && (i < dev->mc_count); | ||
6670 | i++, mclist = mclist->next) { | ||
6671 | |||
6672 | config->config_table[i].cam_entry.msb_mac_addr = | ||
6673 | swab16(*(u16 *)&mclist->dmi_addr[0]); | ||
6674 | config->config_table[i].cam_entry.middle_mac_addr = | ||
6675 | swab16(*(u16 *)&mclist->dmi_addr[2]); | ||
6676 | config->config_table[i].cam_entry.lsb_mac_addr = | ||
6677 | swab16(*(u16 *)&mclist->dmi_addr[4]); | ||
6678 | config->config_table[i].cam_entry.flags = | ||
6679 | cpu_to_le16(bp->port); | ||
6680 | config->config_table[i].target_table_entry.flags = 0; | ||
6681 | config->config_table[i].target_table_entry. | ||
6682 | client_id = 0; | ||
6683 | config->config_table[i].target_table_entry. | ||
6684 | vlan_id = 0; | ||
6685 | |||
6686 | DP(NETIF_MSG_IFUP, | ||
6687 | "setting MCAST[%d] (%04x:%04x:%04x)\n", | ||
6688 | i, config->config_table[i].cam_entry.msb_mac_addr, | ||
6689 | config->config_table[i].cam_entry.middle_mac_addr, | ||
6690 | config->config_table[i].cam_entry.lsb_mac_addr); | ||
6691 | } | ||
6692 | old = config->hdr.length_6b; | ||
6693 | if (old > i) { | ||
6694 | for (; i < old; i++) { | ||
6695 | if (CAM_IS_INVALID(config->config_table[i])) { | ||
6696 | i--; /* already invalidated */ | ||
6697 | break; | ||
6698 | } | ||
6699 | /* invalidate */ | ||
6700 | CAM_INVALIDATE(config->config_table[i]); | ||
6701 | } | ||
6702 | } | ||
6703 | |||
6704 | if (CHIP_REV_IS_SLOW(bp)) | ||
6705 | offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port); | ||
6706 | else | ||
6707 | offset = BNX2X_MAX_MULTICAST*(1 + bp->port); | ||
6708 | |||
6709 | config->hdr.length_6b = i; | ||
6710 | config->hdr.offset = offset; | ||
6711 | config->hdr.reserved0 = 0; | ||
6712 | config->hdr.reserved1 = 0; | ||
6713 | |||
6714 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | ||
6715 | U64_HI(bnx2x_sp_mapping(bp, mcast_config)), | ||
6716 | U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); | ||
6717 | } | ||
6718 | |||
6719 | bp->rx_mode = rx_mode; | ||
6720 | bnx2x_set_storm_rx_mode(bp); | ||
6721 | } | ||
6722 | |||
6723 | static int bnx2x_poll(struct napi_struct *napi, int budget) | 7600 | static int bnx2x_poll(struct napi_struct *napi, int budget) |
6724 | { | 7601 | { |
6725 | struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, | 7602 | struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, |
@@ -6729,7 +7606,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
6729 | 7606 | ||
6730 | #ifdef BNX2X_STOP_ON_ERROR | 7607 | #ifdef BNX2X_STOP_ON_ERROR |
6731 | if (unlikely(bp->panic)) | 7608 | if (unlikely(bp->panic)) |
6732 | goto out_panic; | 7609 | goto poll_panic; |
6733 | #endif | 7610 | #endif |
6734 | 7611 | ||
6735 | prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb); | 7612 | prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb); |
@@ -6738,30 +7615,28 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
6738 | 7615 | ||
6739 | bnx2x_update_fpsb_idx(fp); | 7616 | bnx2x_update_fpsb_idx(fp); |
6740 | 7617 | ||
6741 | if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons) | 7618 | if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || |
7619 | (fp->tx_pkt_prod != fp->tx_pkt_cons)) | ||
6742 | bnx2x_tx_int(fp, budget); | 7620 | bnx2x_tx_int(fp, budget); |
6743 | 7621 | ||
6744 | |||
6745 | if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons) | 7622 | if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons) |
6746 | work_done = bnx2x_rx_int(fp, budget); | 7623 | work_done = bnx2x_rx_int(fp, budget); |
6747 | 7624 | ||
6748 | |||
6749 | rmb(); /* bnx2x_has_work() reads the status block */ | 7625 | rmb(); /* bnx2x_has_work() reads the status block */ |
6750 | 7626 | ||
6751 | /* must not complete if we consumed full budget */ | 7627 | /* must not complete if we consumed full budget */ |
6752 | if ((work_done < budget) && !bnx2x_has_work(fp)) { | 7628 | if ((work_done < budget) && !bnx2x_has_work(fp)) { |
6753 | 7629 | ||
6754 | #ifdef BNX2X_STOP_ON_ERROR | 7630 | #ifdef BNX2X_STOP_ON_ERROR |
6755 | out_panic: | 7631 | poll_panic: |
6756 | #endif | 7632 | #endif |
6757 | netif_rx_complete(bp->dev, napi); | 7633 | netif_rx_complete(bp->dev, napi); |
6758 | 7634 | ||
6759 | bnx2x_ack_sb(bp, fp->index, USTORM_ID, | 7635 | bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, |
6760 | le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); | 7636 | le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); |
6761 | bnx2x_ack_sb(bp, fp->index, CSTORM_ID, | 7637 | bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID, |
6762 | le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); | 7638 | le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); |
6763 | } | 7639 | } |
6764 | |||
6765 | return work_done; | 7640 | return work_done; |
6766 | } | 7641 | } |
6767 | 7642 | ||
@@ -7055,18 +7930,145 @@ static int bnx2x_close(struct net_device *dev) | |||
7055 | return 0; | 7930 | return 0; |
7056 | } | 7931 | } |
7057 | 7932 | ||
7058 | /* Called with rtnl_lock */ | 7933 | /* called with netif_tx_lock from set_multicast */ |
7934 | static void bnx2x_set_rx_mode(struct net_device *dev) | ||
7935 | { | ||
7936 | struct bnx2x *bp = netdev_priv(dev); | ||
7937 | u32 rx_mode = BNX2X_RX_MODE_NORMAL; | ||
7938 | int port = BP_PORT(bp); | ||
7939 | |||
7940 | if (bp->state != BNX2X_STATE_OPEN) { | ||
7941 | DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); | ||
7942 | return; | ||
7943 | } | ||
7944 | |||
7945 | DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags); | ||
7946 | |||
7947 | if (dev->flags & IFF_PROMISC) | ||
7948 | rx_mode = BNX2X_RX_MODE_PROMISC; | ||
7949 | |||
7950 | else if ((dev->flags & IFF_ALLMULTI) || | ||
7951 | ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp))) | ||
7952 | rx_mode = BNX2X_RX_MODE_ALLMULTI; | ||
7953 | |||
7954 | else { /* some multicasts */ | ||
7955 | if (CHIP_IS_E1(bp)) { | ||
7956 | int i, old, offset; | ||
7957 | struct dev_mc_list *mclist; | ||
7958 | struct mac_configuration_cmd *config = | ||
7959 | bnx2x_sp(bp, mcast_config); | ||
7960 | |||
7961 | for (i = 0, mclist = dev->mc_list; | ||
7962 | mclist && (i < dev->mc_count); | ||
7963 | i++, mclist = mclist->next) { | ||
7964 | |||
7965 | config->config_table[i]. | ||
7966 | cam_entry.msb_mac_addr = | ||
7967 | swab16(*(u16 *)&mclist->dmi_addr[0]); | ||
7968 | config->config_table[i]. | ||
7969 | cam_entry.middle_mac_addr = | ||
7970 | swab16(*(u16 *)&mclist->dmi_addr[2]); | ||
7971 | config->config_table[i]. | ||
7972 | cam_entry.lsb_mac_addr = | ||
7973 | swab16(*(u16 *)&mclist->dmi_addr[4]); | ||
7974 | config->config_table[i].cam_entry.flags = | ||
7975 | cpu_to_le16(port); | ||
7976 | config->config_table[i]. | ||
7977 | target_table_entry.flags = 0; | ||
7978 | config->config_table[i]. | ||
7979 | target_table_entry.client_id = 0; | ||
7980 | config->config_table[i]. | ||
7981 | target_table_entry.vlan_id = 0; | ||
7982 | |||
7983 | DP(NETIF_MSG_IFUP, | ||
7984 | "setting MCAST[%d] (%04x:%04x:%04x)\n", i, | ||
7985 | config->config_table[i]. | ||
7986 | cam_entry.msb_mac_addr, | ||
7987 | config->config_table[i]. | ||
7988 | cam_entry.middle_mac_addr, | ||
7989 | config->config_table[i]. | ||
7990 | cam_entry.lsb_mac_addr); | ||
7991 | } | ||
7992 | old = config->hdr.length_6b; | ||
7993 | if (old > i) { | ||
7994 | for (; i < old; i++) { | ||
7995 | if (CAM_IS_INVALID(config-> | ||
7996 | config_table[i])) { | ||
7997 | i--; /* already invalidated */ | ||
7998 | break; | ||
7999 | } | ||
8000 | /* invalidate */ | ||
8001 | CAM_INVALIDATE(config-> | ||
8002 | config_table[i]); | ||
8003 | } | ||
8004 | } | ||
8005 | |||
8006 | if (CHIP_REV_IS_SLOW(bp)) | ||
8007 | offset = BNX2X_MAX_EMUL_MULTI*(1 + port); | ||
8008 | else | ||
8009 | offset = BNX2X_MAX_MULTICAST*(1 + port); | ||
8010 | |||
8011 | config->hdr.length_6b = i; | ||
8012 | config->hdr.offset = offset; | ||
8013 | config->hdr.client_id = BP_CL_ID(bp); | ||
8014 | config->hdr.reserved1 = 0; | ||
8015 | |||
8016 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | ||
8017 | U64_HI(bnx2x_sp_mapping(bp, mcast_config)), | ||
8018 | U64_LO(bnx2x_sp_mapping(bp, mcast_config)), | ||
8019 | 0); | ||
8020 | } else { /* E1H */ | ||
8021 | /* Accept one or more multicasts */ | ||
8022 | struct dev_mc_list *mclist; | ||
8023 | u32 mc_filter[MC_HASH_SIZE]; | ||
8024 | u32 crc, bit, regidx; | ||
8025 | int i; | ||
8026 | |||
8027 | memset(mc_filter, 0, 4 * MC_HASH_SIZE); | ||
8028 | |||
8029 | for (i = 0, mclist = dev->mc_list; | ||
8030 | mclist && (i < dev->mc_count); | ||
8031 | i++, mclist = mclist->next) { | ||
8032 | |||
8033 | DP(NETIF_MSG_IFUP, "Adding mcast MAC: " | ||
8034 | "%02x:%02x:%02x:%02x:%02x:%02x\n", | ||
8035 | mclist->dmi_addr[0], mclist->dmi_addr[1], | ||
8036 | mclist->dmi_addr[2], mclist->dmi_addr[3], | ||
8037 | mclist->dmi_addr[4], mclist->dmi_addr[5]); | ||
8038 | |||
8039 | crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN); | ||
8040 | bit = (crc >> 24) & 0xff; | ||
8041 | regidx = bit >> 5; | ||
8042 | bit &= 0x1f; | ||
8043 | mc_filter[regidx] |= (1 << bit); | ||
8044 | } | ||
8045 | |||
8046 | for (i = 0; i < MC_HASH_SIZE; i++) | ||
8047 | REG_WR(bp, MC_HASH_OFFSET(bp, i), | ||
8048 | mc_filter[i]); | ||
8049 | } | ||
8050 | } | ||
8051 | |||
8052 | bp->rx_mode = rx_mode; | ||
8053 | bnx2x_set_storm_rx_mode(bp); | ||
8054 | } | ||
8055 | |||
8056 | /* called with rtnl_lock */ | ||
7059 | static int bnx2x_change_mac_addr(struct net_device *dev, void *p) | 8057 | static int bnx2x_change_mac_addr(struct net_device *dev, void *p) |
7060 | { | 8058 | { |
7061 | struct sockaddr *addr = p; | 8059 | struct sockaddr *addr = p; |
7062 | struct bnx2x *bp = netdev_priv(dev); | 8060 | struct bnx2x *bp = netdev_priv(dev); |
7063 | 8061 | ||
7064 | if (!is_valid_ether_addr(addr->sa_data)) | 8062 | if (!is_valid_ether_addr((u8 *)(addr->sa_data))) |
7065 | return -EINVAL; | 8063 | return -EINVAL; |
7066 | 8064 | ||
7067 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 8065 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
7068 | if (netif_running(dev)) | 8066 | if (netif_running(dev)) { |
7069 | bnx2x_set_mac_addr(bp); | 8067 | if (CHIP_IS_E1(bp)) |
8068 | bnx2x_set_mac_addr_e1(bp); | ||
8069 | else | ||
8070 | bnx2x_set_mac_addr_e1h(bp); | ||
8071 | } | ||
7070 | 8072 | ||
7071 | return 0; | 8073 | return 0; |
7072 | } | 8074 | } |
@@ -7080,7 +8082,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
7080 | 8082 | ||
7081 | switch (cmd) { | 8083 | switch (cmd) { |
7082 | case SIOCGMIIPHY: | 8084 | case SIOCGMIIPHY: |
7083 | data->phy_id = bp->phy_addr; | 8085 | data->phy_id = bp->port.phy_addr; |
7084 | 8086 | ||
7085 | /* fallthrough */ | 8087 | /* fallthrough */ |
7086 | 8088 | ||
@@ -7090,12 +8092,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
7090 | if (!netif_running(dev)) | 8092 | if (!netif_running(dev)) |
7091 | return -EAGAIN; | 8093 | return -EAGAIN; |
7092 | 8094 | ||
7093 | mutex_lock(&bp->phy_mutex); | 8095 | mutex_lock(&bp->port.phy_mutex); |
7094 | err = bnx2x_cl45_read(bp, bp->port, 0, bp->phy_addr, | 8096 | err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr, |
7095 | DEFAULT_PHY_DEV_ADDR, | 8097 | DEFAULT_PHY_DEV_ADDR, |
7096 | (data->reg_num & 0x1f), &mii_regval); | 8098 | (data->reg_num & 0x1f), &mii_regval); |
7097 | data->val_out = mii_regval; | 8099 | data->val_out = mii_regval; |
7098 | mutex_unlock(&bp->phy_mutex); | 8100 | mutex_unlock(&bp->port.phy_mutex); |
7099 | return err; | 8101 | return err; |
7100 | } | 8102 | } |
7101 | 8103 | ||
@@ -7106,11 +8108,11 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
7106 | if (!netif_running(dev)) | 8108 | if (!netif_running(dev)) |
7107 | return -EAGAIN; | 8109 | return -EAGAIN; |
7108 | 8110 | ||
7109 | mutex_lock(&bp->phy_mutex); | 8111 | mutex_lock(&bp->port.phy_mutex); |
7110 | err = bnx2x_cl45_write(bp, bp->port, 0, bp->phy_addr, | 8112 | err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr, |
7111 | DEFAULT_PHY_DEV_ADDR, | 8113 | DEFAULT_PHY_DEV_ADDR, |
7112 | (data->reg_num & 0x1f), data->val_in); | 8114 | (data->reg_num & 0x1f), data->val_in); |
7113 | mutex_unlock(&bp->phy_mutex); | 8115 | mutex_unlock(&bp->port.phy_mutex); |
7114 | return err; | 8116 | return err; |
7115 | 8117 | ||
7116 | default: | 8118 | default: |
@@ -7121,10 +8123,11 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
7121 | return -EOPNOTSUPP; | 8123 | return -EOPNOTSUPP; |
7122 | } | 8124 | } |
7123 | 8125 | ||
7124 | /* Called with rtnl_lock */ | 8126 | /* called with rtnl_lock */ |
7125 | static int bnx2x_change_mtu(struct net_device *dev, int new_mtu) | 8127 | static int bnx2x_change_mtu(struct net_device *dev, int new_mtu) |
7126 | { | 8128 | { |
7127 | struct bnx2x *bp = netdev_priv(dev); | 8129 | struct bnx2x *bp = netdev_priv(dev); |
8130 | int rc = 0; | ||
7128 | 8131 | ||
7129 | if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || | 8132 | if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || |
7130 | ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) | 8133 | ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) |
@@ -7137,10 +8140,11 @@ static int bnx2x_change_mtu(struct net_device *dev, int new_mtu) | |||
7137 | dev->mtu = new_mtu; | 8140 | dev->mtu = new_mtu; |
7138 | 8141 | ||
7139 | if (netif_running(dev)) { | 8142 | if (netif_running(dev)) { |
7140 | bnx2x_nic_unload(bp, 0); | 8143 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); |
7141 | bnx2x_nic_load(bp, 0); | 8144 | rc = bnx2x_nic_load(bp, LOAD_NORMAL); |
7142 | } | 8145 | } |
7143 | return 0; | 8146 | |
8147 | return rc; | ||
7144 | } | 8148 | } |
7145 | 8149 | ||
7146 | static void bnx2x_tx_timeout(struct net_device *dev) | 8150 | static void bnx2x_tx_timeout(struct net_device *dev) |
@@ -7156,7 +8160,7 @@ static void bnx2x_tx_timeout(struct net_device *dev) | |||
7156 | } | 8160 | } |
7157 | 8161 | ||
7158 | #ifdef BCM_VLAN | 8162 | #ifdef BCM_VLAN |
7159 | /* Called with rtnl_lock */ | 8163 | /* called with rtnl_lock */ |
7160 | static void bnx2x_vlan_rx_register(struct net_device *dev, | 8164 | static void bnx2x_vlan_rx_register(struct net_device *dev, |
7161 | struct vlan_group *vlgrp) | 8165 | struct vlan_group *vlgrp) |
7162 | { | 8166 | { |
@@ -7166,6 +8170,7 @@ static void bnx2x_vlan_rx_register(struct net_device *dev, | |||
7166 | if (netif_running(dev)) | 8170 | if (netif_running(dev)) |
7167 | bnx2x_set_client_config(bp); | 8171 | bnx2x_set_client_config(bp); |
7168 | } | 8172 | } |
8173 | |||
7169 | #endif | 8174 | #endif |
7170 | 8175 | ||
7171 | #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) | 8176 | #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) |
@@ -7179,36 +8184,8 @@ static void poll_bnx2x(struct net_device *dev) | |||
7179 | } | 8184 | } |
7180 | #endif | 8185 | #endif |
7181 | 8186 | ||
7182 | static void bnx2x_reset_task(struct work_struct *work) | 8187 | static int __devinit bnx2x_init_dev(struct pci_dev *pdev, |
7183 | { | 8188 | struct net_device *dev) |
7184 | struct bnx2x *bp = container_of(work, struct bnx2x, reset_task); | ||
7185 | |||
7186 | #ifdef BNX2X_STOP_ON_ERROR | ||
7187 | BNX2X_ERR("reset task called but STOP_ON_ERROR defined" | ||
7188 | " so reset not done to allow debug dump,\n" | ||
7189 | KERN_ERR " you will need to reboot when done\n"); | ||
7190 | return; | ||
7191 | #endif | ||
7192 | |||
7193 | if (!netif_running(bp->dev)) | ||
7194 | return; | ||
7195 | |||
7196 | rtnl_lock(); | ||
7197 | |||
7198 | if (bp->state != BNX2X_STATE_OPEN) { | ||
7199 | DP(NETIF_MSG_TX_ERR, "state is %x, returning\n", bp->state); | ||
7200 | goto reset_task_exit; | ||
7201 | } | ||
7202 | |||
7203 | bnx2x_nic_unload(bp, 0); | ||
7204 | bnx2x_nic_load(bp, 0); | ||
7205 | |||
7206 | reset_task_exit: | ||
7207 | rtnl_unlock(); | ||
7208 | } | ||
7209 | |||
7210 | static int __devinit bnx2x_init_board(struct pci_dev *pdev, | ||
7211 | struct net_device *dev) | ||
7212 | { | 8189 | { |
7213 | struct bnx2x *bp; | 8190 | struct bnx2x *bp; |
7214 | int rc; | 8191 | int rc; |
@@ -7216,8 +8193,10 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev, | |||
7216 | SET_NETDEV_DEV(dev, &pdev->dev); | 8193 | SET_NETDEV_DEV(dev, &pdev->dev); |
7217 | bp = netdev_priv(dev); | 8194 | bp = netdev_priv(dev); |
7218 | 8195 | ||
8196 | bp->dev = dev; | ||
8197 | bp->pdev = pdev; | ||
7219 | bp->flags = 0; | 8198 | bp->flags = 0; |
7220 | bp->port = PCI_FUNC(pdev->devfn); | 8199 | bp->func = PCI_FUNC(pdev->devfn); |
7221 | 8200 | ||
7222 | rc = pci_enable_device(pdev); | 8201 | rc = pci_enable_device(pdev); |
7223 | if (rc) { | 8202 | if (rc) { |
@@ -7239,14 +8218,17 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev, | |||
7239 | goto err_out_disable; | 8218 | goto err_out_disable; |
7240 | } | 8219 | } |
7241 | 8220 | ||
7242 | rc = pci_request_regions(pdev, DRV_MODULE_NAME); | 8221 | if (atomic_read(&pdev->enable_cnt) == 1) { |
7243 | if (rc) { | 8222 | rc = pci_request_regions(pdev, DRV_MODULE_NAME); |
7244 | printk(KERN_ERR PFX "Cannot obtain PCI resources," | 8223 | if (rc) { |
7245 | " aborting\n"); | 8224 | printk(KERN_ERR PFX "Cannot obtain PCI resources," |
7246 | goto err_out_disable; | 8225 | " aborting\n"); |
7247 | } | 8226 | goto err_out_disable; |
8227 | } | ||
7248 | 8228 | ||
7249 | pci_set_master(pdev); | 8229 | pci_set_master(pdev); |
8230 | pci_save_state(pdev); | ||
8231 | } | ||
7250 | 8232 | ||
7251 | bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); | 8233 | bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); |
7252 | if (bp->pm_cap == 0) { | 8234 | if (bp->pm_cap == 0) { |
@@ -7280,13 +8262,9 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev, | |||
7280 | goto err_out_release; | 8262 | goto err_out_release; |
7281 | } | 8263 | } |
7282 | 8264 | ||
7283 | bp->dev = dev; | 8265 | dev->mem_start = pci_resource_start(pdev, 0); |
7284 | bp->pdev = pdev; | 8266 | dev->base_addr = dev->mem_start; |
7285 | 8267 | dev->mem_end = pci_resource_end(pdev, 0); | |
7286 | INIT_WORK(&bp->reset_task, bnx2x_reset_task); | ||
7287 | INIT_WORK(&bp->sp_task, bnx2x_sp_task); | ||
7288 | |||
7289 | dev->base_addr = pci_resource_start(pdev, 0); | ||
7290 | 8268 | ||
7291 | dev->irq = pdev->irq; | 8269 | dev->irq = pdev->irq; |
7292 | 8270 | ||
@@ -7298,8 +8276,9 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev, | |||
7298 | goto err_out_release; | 8276 | goto err_out_release; |
7299 | } | 8277 | } |
7300 | 8278 | ||
7301 | bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2), | 8279 | bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), |
7302 | pci_resource_len(pdev, 2)); | 8280 | min_t(u64, BNX2X_DB_SIZE, |
8281 | pci_resource_len(pdev, 2))); | ||
7303 | if (!bp->doorbells) { | 8282 | if (!bp->doorbells) { |
7304 | printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n"); | 8283 | printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n"); |
7305 | rc = -ENOMEM; | 8284 | rc = -ENOMEM; |
@@ -7308,47 +8287,43 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev, | |||
7308 | 8287 | ||
7309 | bnx2x_set_power_state(bp, PCI_D0); | 8288 | bnx2x_set_power_state(bp, PCI_D0); |
7310 | 8289 | ||
7311 | bnx2x_get_hwinfo(bp); | 8290 | /* clean indirect addresses */ |
7312 | 8291 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, | |
7313 | 8292 | PCICFG_VENDOR_ID_OFFSET); | |
7314 | if (nomcp) { | 8293 | REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0); |
7315 | printk(KERN_ERR PFX "MCP disabled, will only" | 8294 | REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0); |
7316 | " init first device\n"); | 8295 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); |
7317 | onefunc = 1; | 8296 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); |
7318 | } | ||
7319 | |||
7320 | if (onefunc && bp->port) { | ||
7321 | printk(KERN_ERR PFX "Second device disabled, exiting\n"); | ||
7322 | rc = -ENODEV; | ||
7323 | goto err_out_unmap; | ||
7324 | } | ||
7325 | |||
7326 | bp->tx_ring_size = MAX_TX_AVAIL; | ||
7327 | bp->rx_ring_size = MAX_RX_AVAIL; | ||
7328 | |||
7329 | bp->rx_csum = 1; | ||
7330 | |||
7331 | bp->rx_offset = 0; | ||
7332 | |||
7333 | bp->tx_quick_cons_trip_int = 0xff; | ||
7334 | bp->tx_quick_cons_trip = 0xff; | ||
7335 | bp->tx_ticks_int = 50; | ||
7336 | bp->tx_ticks = 50; | ||
7337 | 8297 | ||
7338 | bp->rx_quick_cons_trip_int = 0xff; | 8298 | dev->hard_start_xmit = bnx2x_start_xmit; |
7339 | bp->rx_quick_cons_trip = 0xff; | 8299 | dev->watchdog_timeo = TX_TIMEOUT; |
7340 | bp->rx_ticks_int = 25; | ||
7341 | bp->rx_ticks = 25; | ||
7342 | 8300 | ||
7343 | bp->stats_ticks = 1000000 & 0xffff00; | 8301 | dev->ethtool_ops = &bnx2x_ethtool_ops; |
8302 | dev->open = bnx2x_open; | ||
8303 | dev->stop = bnx2x_close; | ||
8304 | dev->set_multicast_list = bnx2x_set_rx_mode; | ||
8305 | dev->set_mac_address = bnx2x_change_mac_addr; | ||
8306 | dev->do_ioctl = bnx2x_ioctl; | ||
8307 | dev->change_mtu = bnx2x_change_mtu; | ||
8308 | dev->tx_timeout = bnx2x_tx_timeout; | ||
8309 | #ifdef BCM_VLAN | ||
8310 | dev->vlan_rx_register = bnx2x_vlan_rx_register; | ||
8311 | #endif | ||
8312 | #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) | ||
8313 | dev->poll_controller = poll_bnx2x; | ||
8314 | #endif | ||
8315 | dev->features |= NETIF_F_SG; | ||
8316 | dev->features |= NETIF_F_HW_CSUM; | ||
8317 | if (bp->flags & USING_DAC_FLAG) | ||
8318 | dev->features |= NETIF_F_HIGHDMA; | ||
8319 | #ifdef BCM_VLAN | ||
8320 | dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); | ||
8321 | #endif | ||
8322 | dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); | ||
7344 | 8323 | ||
7345 | bp->timer_interval = HZ; | 8324 | bp->timer_interval = HZ; |
7346 | bp->current_interval = (poll ? poll : HZ); | 8325 | bp->current_interval = (poll ? poll : HZ); |
7347 | 8326 | ||
7348 | init_timer(&bp->timer); | ||
7349 | bp->timer.expires = jiffies + bp->current_interval; | ||
7350 | bp->timer.data = (unsigned long) bp; | ||
7351 | bp->timer.function = bnx2x_timer; | ||
7352 | 8327 | ||
7353 | return 0; | 8328 | return 0; |
7354 | 8329 | ||
@@ -7357,14 +8332,14 @@ err_out_unmap: | |||
7357 | iounmap(bp->regview); | 8332 | iounmap(bp->regview); |
7358 | bp->regview = NULL; | 8333 | bp->regview = NULL; |
7359 | } | 8334 | } |
7360 | |||
7361 | if (bp->doorbells) { | 8335 | if (bp->doorbells) { |
7362 | iounmap(bp->doorbells); | 8336 | iounmap(bp->doorbells); |
7363 | bp->doorbells = NULL; | 8337 | bp->doorbells = NULL; |
7364 | } | 8338 | } |
7365 | 8339 | ||
7366 | err_out_release: | 8340 | err_out_release: |
7367 | pci_release_regions(pdev); | 8341 | if (atomic_read(&pdev->enable_cnt) == 1) |
8342 | pci_release_regions(pdev); | ||
7368 | 8343 | ||
7369 | err_out_disable: | 8344 | err_out_disable: |
7370 | pci_disable_device(pdev); | 8345 | pci_disable_device(pdev); |
@@ -7398,7 +8373,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
7398 | struct net_device *dev = NULL; | 8373 | struct net_device *dev = NULL; |
7399 | struct bnx2x *bp; | 8374 | struct bnx2x *bp; |
7400 | int rc; | 8375 | int rc; |
7401 | int port = PCI_FUNC(pdev->devfn); | ||
7402 | DECLARE_MAC_BUF(mac); | 8376 | DECLARE_MAC_BUF(mac); |
7403 | 8377 | ||
7404 | if (version_printed++ == 0) | 8378 | if (version_printed++ == 0) |
@@ -7406,78 +8380,62 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
7406 | 8380 | ||
7407 | /* dev zeroed in init_etherdev */ | 8381 | /* dev zeroed in init_etherdev */ |
7408 | dev = alloc_etherdev(sizeof(*bp)); | 8382 | dev = alloc_etherdev(sizeof(*bp)); |
7409 | if (!dev) | 8383 | if (!dev) { |
8384 | printk(KERN_ERR PFX "Cannot allocate net device\n"); | ||
7410 | return -ENOMEM; | 8385 | return -ENOMEM; |
8386 | } | ||
7411 | 8387 | ||
7412 | netif_carrier_off(dev); | 8388 | netif_carrier_off(dev); |
7413 | 8389 | ||
7414 | bp = netdev_priv(dev); | 8390 | bp = netdev_priv(dev); |
7415 | bp->msglevel = debug; | 8391 | bp->msglevel = debug; |
7416 | 8392 | ||
7417 | if (port && onefunc) { | 8393 | rc = bnx2x_init_dev(pdev, dev); |
7418 | printk(KERN_ERR PFX "second function disabled. exiting\n"); | ||
7419 | free_netdev(dev); | ||
7420 | return 0; | ||
7421 | } | ||
7422 | |||
7423 | rc = bnx2x_init_board(pdev, dev); | ||
7424 | if (rc < 0) { | 8394 | if (rc < 0) { |
7425 | free_netdev(dev); | 8395 | free_netdev(dev); |
7426 | return rc; | 8396 | return rc; |
7427 | } | 8397 | } |
7428 | 8398 | ||
7429 | dev->hard_start_xmit = bnx2x_start_xmit; | ||
7430 | dev->watchdog_timeo = TX_TIMEOUT; | ||
7431 | |||
7432 | dev->ethtool_ops = &bnx2x_ethtool_ops; | ||
7433 | dev->open = bnx2x_open; | ||
7434 | dev->stop = bnx2x_close; | ||
7435 | dev->set_multicast_list = bnx2x_set_rx_mode; | ||
7436 | dev->set_mac_address = bnx2x_change_mac_addr; | ||
7437 | dev->do_ioctl = bnx2x_ioctl; | ||
7438 | dev->change_mtu = bnx2x_change_mtu; | ||
7439 | dev->tx_timeout = bnx2x_tx_timeout; | ||
7440 | #ifdef BCM_VLAN | ||
7441 | dev->vlan_rx_register = bnx2x_vlan_rx_register; | ||
7442 | #endif | ||
7443 | #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) | ||
7444 | dev->poll_controller = poll_bnx2x; | ||
7445 | #endif | ||
7446 | dev->features |= NETIF_F_SG; | ||
7447 | if (bp->flags & USING_DAC_FLAG) | ||
7448 | dev->features |= NETIF_F_HIGHDMA; | ||
7449 | dev->features |= NETIF_F_IP_CSUM; | ||
7450 | #ifdef BCM_VLAN | ||
7451 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
7452 | #endif | ||
7453 | dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN; | ||
7454 | |||
7455 | rc = register_netdev(dev); | 8399 | rc = register_netdev(dev); |
7456 | if (rc) { | 8400 | if (rc) { |
7457 | dev_err(&pdev->dev, "Cannot register net device\n"); | 8401 | dev_err(&pdev->dev, "Cannot register net device\n"); |
7458 | if (bp->regview) | 8402 | goto init_one_exit; |
7459 | iounmap(bp->regview); | ||
7460 | if (bp->doorbells) | ||
7461 | iounmap(bp->doorbells); | ||
7462 | pci_release_regions(pdev); | ||
7463 | pci_disable_device(pdev); | ||
7464 | pci_set_drvdata(pdev, NULL); | ||
7465 | free_netdev(dev); | ||
7466 | return rc; | ||
7467 | } | 8403 | } |
7468 | 8404 | ||
7469 | pci_set_drvdata(pdev, dev); | 8405 | pci_set_drvdata(pdev, dev); |
7470 | 8406 | ||
7471 | bp->name = board_info[ent->driver_data].name; | 8407 | rc = bnx2x_init_bp(bp); |
8408 | if (rc) { | ||
8409 | unregister_netdev(dev); | ||
8410 | goto init_one_exit; | ||
8411 | } | ||
8412 | |||
8413 | bp->common.name = board_info[ent->driver_data].name; | ||
7472 | printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx," | 8414 | printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx," |
7473 | " IRQ %d, ", dev->name, bp->name, | 8415 | " IRQ %d, ", dev->name, bp->common.name, |
7474 | ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', | 8416 | (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), |
7475 | ((CHIP_ID(bp) & 0x0ff0) >> 4), | ||
7476 | bnx2x_get_pcie_width(bp), | 8417 | bnx2x_get_pcie_width(bp), |
7477 | (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz", | 8418 | (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz", |
7478 | dev->base_addr, bp->pdev->irq); | 8419 | dev->base_addr, bp->pdev->irq); |
7479 | printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr)); | 8420 | printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr)); |
7480 | return 0; | 8421 | return 0; |
8422 | |||
8423 | init_one_exit: | ||
8424 | if (bp->regview) | ||
8425 | iounmap(bp->regview); | ||
8426 | |||
8427 | if (bp->doorbells) | ||
8428 | iounmap(bp->doorbells); | ||
8429 | |||
8430 | free_netdev(dev); | ||
8431 | |||
8432 | if (atomic_read(&pdev->enable_cnt) == 1) | ||
8433 | pci_release_regions(pdev); | ||
8434 | |||
8435 | pci_disable_device(pdev); | ||
8436 | pci_set_drvdata(pdev, NULL); | ||
8437 | |||
8438 | return rc; | ||
7481 | } | 8439 | } |
7482 | 8440 | ||
7483 | static void __devexit bnx2x_remove_one(struct pci_dev *pdev) | 8441 | static void __devexit bnx2x_remove_one(struct pci_dev *pdev) |
@@ -7486,11 +8444,9 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) | |||
7486 | struct bnx2x *bp; | 8444 | struct bnx2x *bp; |
7487 | 8445 | ||
7488 | if (!dev) { | 8446 | if (!dev) { |
7489 | /* we get here if init_one() fails */ | ||
7490 | printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); | 8447 | printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); |
7491 | return; | 8448 | return; |
7492 | } | 8449 | } |
7493 | |||
7494 | bp = netdev_priv(dev); | 8450 | bp = netdev_priv(dev); |
7495 | 8451 | ||
7496 | unregister_netdev(dev); | 8452 | unregister_netdev(dev); |
@@ -7502,7 +8458,10 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) | |||
7502 | iounmap(bp->doorbells); | 8458 | iounmap(bp->doorbells); |
7503 | 8459 | ||
7504 | free_netdev(dev); | 8460 | free_netdev(dev); |
7505 | pci_release_regions(pdev); | 8461 | |
8462 | if (atomic_read(&pdev->enable_cnt) == 1) | ||
8463 | pci_release_regions(pdev); | ||
8464 | |||
7506 | pci_disable_device(pdev); | 8465 | pci_disable_device(pdev); |
7507 | pci_set_drvdata(pdev, NULL); | 8466 | pci_set_drvdata(pdev, NULL); |
7508 | } | 8467 | } |
@@ -7512,21 +8471,29 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) | |||
7512 | struct net_device *dev = pci_get_drvdata(pdev); | 8471 | struct net_device *dev = pci_get_drvdata(pdev); |
7513 | struct bnx2x *bp; | 8472 | struct bnx2x *bp; |
7514 | 8473 | ||
7515 | if (!dev) | 8474 | if (!dev) { |
7516 | return 0; | 8475 | printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); |
8476 | return -ENODEV; | ||
8477 | } | ||
8478 | bp = netdev_priv(dev); | ||
7517 | 8479 | ||
7518 | if (!netif_running(dev)) | 8480 | rtnl_lock(); |
7519 | return 0; | ||
7520 | 8481 | ||
7521 | bp = netdev_priv(dev); | 8482 | pci_save_state(pdev); |
7522 | 8483 | ||
7523 | bnx2x_nic_unload(bp, 0); | 8484 | if (!netif_running(dev)) { |
8485 | rtnl_unlock(); | ||
8486 | return 0; | ||
8487 | } | ||
7524 | 8488 | ||
7525 | netif_device_detach(dev); | 8489 | netif_device_detach(dev); |
7526 | 8490 | ||
7527 | pci_save_state(pdev); | 8491 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); |
8492 | |||
7528 | bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); | 8493 | bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); |
7529 | 8494 | ||
8495 | rtnl_unlock(); | ||
8496 | |||
7530 | return 0; | 8497 | return 0; |
7531 | } | 8498 | } |
7532 | 8499 | ||
@@ -7540,21 +8507,25 @@ static int bnx2x_resume(struct pci_dev *pdev) | |||
7540 | printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); | 8507 | printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); |
7541 | return -ENODEV; | 8508 | return -ENODEV; |
7542 | } | 8509 | } |
7543 | |||
7544 | if (!netif_running(dev)) | ||
7545 | return 0; | ||
7546 | |||
7547 | bp = netdev_priv(dev); | 8510 | bp = netdev_priv(dev); |
7548 | 8511 | ||
8512 | rtnl_lock(); | ||
8513 | |||
7549 | pci_restore_state(pdev); | 8514 | pci_restore_state(pdev); |
8515 | |||
8516 | if (!netif_running(dev)) { | ||
8517 | rtnl_unlock(); | ||
8518 | return 0; | ||
8519 | } | ||
8520 | |||
7550 | bnx2x_set_power_state(bp, PCI_D0); | 8521 | bnx2x_set_power_state(bp, PCI_D0); |
7551 | netif_device_attach(dev); | 8522 | netif_device_attach(dev); |
7552 | 8523 | ||
7553 | rc = bnx2x_nic_load(bp, 0); | 8524 | rc = bnx2x_nic_load(bp, LOAD_NORMAL); |
7554 | if (rc) | ||
7555 | return rc; | ||
7556 | 8525 | ||
7557 | return 0; | 8526 | rtnl_unlock(); |
8527 | |||
8528 | return rc; | ||
7558 | } | 8529 | } |
7559 | 8530 | ||
7560 | static struct pci_driver bnx2x_pci_driver = { | 8531 | static struct pci_driver bnx2x_pci_driver = { |
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h index 8707c0d05d9a..15c9a9946724 100644 --- a/drivers/net/bnx2x_reg.h +++ b/drivers/net/bnx2x_reg.h | |||
@@ -38,21 +38,19 @@ | |||
38 | was asserted. */ | 38 | was asserted. */ |
39 | #define BRB1_REG_NUM_OF_FULL_CYCLES_0 0x600c8 | 39 | #define BRB1_REG_NUM_OF_FULL_CYCLES_0 0x600c8 |
40 | #define BRB1_REG_NUM_OF_FULL_CYCLES_1 0x600cc | 40 | #define BRB1_REG_NUM_OF_FULL_CYCLES_1 0x600cc |
41 | #define BRB1_REG_NUM_OF_FULL_CYCLES_2 0x600d0 | ||
42 | #define BRB1_REG_NUM_OF_FULL_CYCLES_3 0x600d4 | ||
43 | #define BRB1_REG_NUM_OF_FULL_CYCLES_4 0x600d8 | 41 | #define BRB1_REG_NUM_OF_FULL_CYCLES_4 0x600d8 |
44 | /* [ST 32] The number of cycles that the pause signal towards MAC #0 was | 42 | /* [ST 32] The number of cycles that the pause signal towards MAC #0 was |
45 | asserted. */ | 43 | asserted. */ |
46 | #define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8 | 44 | #define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8 |
47 | #define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc | 45 | #define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc |
48 | #define BRB1_REG_NUM_OF_PAUSE_CYCLES_2 0x600c0 | ||
49 | #define BRB1_REG_NUM_OF_PAUSE_CYCLES_3 0x600c4 | ||
50 | /* [RW 10] Write client 0: De-assert pause threshold. */ | 46 | /* [RW 10] Write client 0: De-assert pause threshold. */ |
51 | #define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078 | 47 | #define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078 |
52 | #define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c | 48 | #define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c |
53 | /* [RW 10] Write client 0: Assert pause threshold. */ | 49 | /* [RW 10] Write client 0: Assert pause threshold. */ |
54 | #define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068 | 50 | #define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068 |
55 | #define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c | 51 | #define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c |
52 | /* [R 24] The number of full blocks occpied by port. */ | ||
53 | #define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094 | ||
56 | /* [RW 1] Reset the design by software. */ | 54 | /* [RW 1] Reset the design by software. */ |
57 | #define BRB1_REG_SOFT_RESET 0x600dc | 55 | #define BRB1_REG_SOFT_RESET 0x600dc |
58 | /* [R 5] Used to read the value of the XX protection CAM occupancy counter. */ | 56 | /* [R 5] Used to read the value of the XX protection CAM occupancy counter. */ |
@@ -513,7 +511,6 @@ | |||
513 | /* [RW 15] Interrupt table Read and write access to it is not possible in | 511 | /* [RW 15] Interrupt table Read and write access to it is not possible in |
514 | the middle of the work */ | 512 | the middle of the work */ |
515 | #define CSEM_REG_INT_TABLE 0x200400 | 513 | #define CSEM_REG_INT_TABLE 0x200400 |
516 | #define CSEM_REG_INT_TABLE_SIZE 256 | ||
517 | /* [ST 24] Statistics register. The number of messages that entered through | 514 | /* [ST 24] Statistics register. The number of messages that entered through |
518 | FIC0 */ | 515 | FIC0 */ |
519 | #define CSEM_REG_MSG_NUM_FIC0 0x200000 | 516 | #define CSEM_REG_MSG_NUM_FIC0 0x200000 |
@@ -587,13 +584,10 @@ | |||
587 | #define DBG_REG_DBG_PRTY_MASK 0xc0a8 | 584 | #define DBG_REG_DBG_PRTY_MASK 0xc0a8 |
588 | /* [R 1] Parity register #0 read */ | 585 | /* [R 1] Parity register #0 read */ |
589 | #define DBG_REG_DBG_PRTY_STS 0xc09c | 586 | #define DBG_REG_DBG_PRTY_STS 0xc09c |
590 | /* [RW 2] debug only: These bits indicate the credit for PCI request type 4 | ||
591 | interface; MUST be configured AFTER pci_ext_buffer_strt_addr_lsb/msb are | ||
592 | configured */ | ||
593 | #define DBG_REG_PCI_REQ_CREDIT 0xc120 | ||
594 | /* [RW 32] Commands memory. The address to command X; row Y is to calculated | 587 | /* [RW 32] Commands memory. The address to command X; row Y is to calculated |
595 | as 14*X+Y. */ | 588 | as 14*X+Y. */ |
596 | #define DMAE_REG_CMD_MEM 0x102400 | 589 | #define DMAE_REG_CMD_MEM 0x102400 |
590 | #define DMAE_REG_CMD_MEM_SIZE 224 | ||
597 | /* [RW 1] If 0 - the CRC-16c initial value is all zeroes; if 1 - the CRC-16c | 591 | /* [RW 1] If 0 - the CRC-16c initial value is all zeroes; if 1 - the CRC-16c |
598 | initial value is all ones. */ | 592 | initial value is all ones. */ |
599 | #define DMAE_REG_CRC16C_INIT 0x10201c | 593 | #define DMAE_REG_CRC16C_INIT 0x10201c |
@@ -1626,7 +1620,7 @@ | |||
1626 | is reset to 0x080; giving a default blink period of approximately 8Hz. */ | 1620 | is reset to 0x080; giving a default blink period of approximately 8Hz. */ |
1627 | #define NIG_REG_LED_CONTROL_BLINK_RATE_P0 0x10310 | 1621 | #define NIG_REG_LED_CONTROL_BLINK_RATE_P0 0x10310 |
1628 | /* [RW 1] Port0: If set along with the | 1622 | /* [RW 1] Port0: If set along with the |
1629 | nig_registers_led_control_override_traffic_p0.led_control_override_traffic_p0 | 1623 | ~nig_registers_led_control_override_traffic_p0.led_control_override_traffic_p0 |
1630 | bit and ~nig_registers_led_control_traffic_p0.led_control_traffic_p0 LED | 1624 | bit and ~nig_registers_led_control_traffic_p0.led_control_traffic_p0 LED |
1631 | bit; the Traffic LED will blink with the blink rate specified in | 1625 | bit; the Traffic LED will blink with the blink rate specified in |
1632 | ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and | 1626 | ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and |
@@ -1733,9 +1727,21 @@ | |||
1733 | /* [R 32] Rx statistics : In user packets discarded due to BRB backpressure | 1727 | /* [R 32] Rx statistics : In user packets discarded due to BRB backpressure |
1734 | for port0 */ | 1728 | for port0 */ |
1735 | #define NIG_REG_STAT0_BRB_DISCARD 0x105f0 | 1729 | #define NIG_REG_STAT0_BRB_DISCARD 0x105f0 |
1730 | /* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that | ||
1731 | between 1024 and 1522 bytes for port0 */ | ||
1732 | #define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750 | ||
1733 | /* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that | ||
1734 | between 1523 bytes and above for port0 */ | ||
1735 | #define NIG_REG_STAT0_EGRESS_MAC_PKT1 0x10760 | ||
1736 | /* [R 32] Rx statistics : In user packets discarded due to BRB backpressure | 1736 | /* [R 32] Rx statistics : In user packets discarded due to BRB backpressure |
1737 | for port1 */ | 1737 | for port1 */ |
1738 | #define NIG_REG_STAT1_BRB_DISCARD 0x10628 | 1738 | #define NIG_REG_STAT1_BRB_DISCARD 0x10628 |
1739 | /* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that | ||
1740 | between 1024 and 1522 bytes for port1 */ | ||
1741 | #define NIG_REG_STAT1_EGRESS_MAC_PKT0 0x107a0 | ||
1742 | /* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that | ||
1743 | between 1523 bytes and above for port1 */ | ||
1744 | #define NIG_REG_STAT1_EGRESS_MAC_PKT1 0x107b0 | ||
1739 | /* [WB_R 64] Rx statistics : User octets received for LP */ | 1745 | /* [WB_R 64] Rx statistics : User octets received for LP */ |
1740 | #define NIG_REG_STAT2_BRB_OCTET 0x107e0 | 1746 | #define NIG_REG_STAT2_BRB_OCTET 0x107e0 |
1741 | #define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328 | 1747 | #define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328 |
@@ -1849,7 +1855,6 @@ | |||
1849 | #define PRS_REG_CFC_SEARCH_INITIAL_CREDIT 0x4011c | 1855 | #define PRS_REG_CFC_SEARCH_INITIAL_CREDIT 0x4011c |
1850 | /* [RW 24] CID for port 0 if no match */ | 1856 | /* [RW 24] CID for port 0 if no match */ |
1851 | #define PRS_REG_CID_PORT_0 0x400fc | 1857 | #define PRS_REG_CID_PORT_0 0x400fc |
1852 | #define PRS_REG_CID_PORT_1 0x40100 | ||
1853 | /* [RW 32] The CM header for flush message where 'load existed' bit in CFC | 1858 | /* [RW 32] The CM header for flush message where 'load existed' bit in CFC |
1854 | load response is reset and packet type is 0. Used in packet start message | 1859 | load response is reset and packet type is 0. Used in packet start message |
1855 | to TCM. */ | 1860 | to TCM. */ |
@@ -1957,6 +1962,10 @@ | |||
1957 | #define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c | 1962 | #define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c |
1958 | /* [R 7] Debug only: Number of used entries in the header FIFO */ | 1963 | /* [R 7] Debug only: Number of used entries in the header FIFO */ |
1959 | #define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478 | 1964 | #define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478 |
1965 | #define PXP2_REG_PGL_ADDR_88_F0 0x120534 | ||
1966 | #define PXP2_REG_PGL_ADDR_8C_F0 0x120538 | ||
1967 | #define PXP2_REG_PGL_ADDR_90_F0 0x12053c | ||
1968 | #define PXP2_REG_PGL_ADDR_94_F0 0x120540 | ||
1960 | #define PXP2_REG_PGL_CONTROL0 0x120490 | 1969 | #define PXP2_REG_PGL_CONTROL0 0x120490 |
1961 | #define PXP2_REG_PGL_CONTROL1 0x120514 | 1970 | #define PXP2_REG_PGL_CONTROL1 0x120514 |
1962 | /* [RW 32] third dword data of expansion rom request. this register is | 1971 | /* [RW 32] third dword data of expansion rom request. this register is |
@@ -2060,12 +2069,13 @@ | |||
2060 | #define PXP2_REG_PSWRQ_SRC0_L2P 0x120054 | 2069 | #define PXP2_REG_PSWRQ_SRC0_L2P 0x120054 |
2061 | #define PXP2_REG_PSWRQ_TM0_L2P 0x12001c | 2070 | #define PXP2_REG_PSWRQ_TM0_L2P 0x12001c |
2062 | #define PXP2_REG_PSWRQ_TSDM0_L2P 0x1200e0 | 2071 | #define PXP2_REG_PSWRQ_TSDM0_L2P 0x1200e0 |
2063 | /* [RW 25] Interrupt mask register #0 read/write */ | 2072 | /* [RW 32] Interrupt mask register #0 read/write */ |
2064 | #define PXP2_REG_PXP2_INT_MASK 0x120578 | 2073 | #define PXP2_REG_PXP2_INT_MASK_0 0x120578 |
2065 | /* [R 25] Interrupt register #0 read */ | 2074 | /* [R 32] Interrupt register #0 read */ |
2066 | #define PXP2_REG_PXP2_INT_STS 0x12056c | 2075 | #define PXP2_REG_PXP2_INT_STS_0 0x12056c |
2067 | /* [RC 25] Interrupt register #0 read clear */ | 2076 | #define PXP2_REG_PXP2_INT_STS_1 0x120608 |
2068 | #define PXP2_REG_PXP2_INT_STS_CLR 0x120570 | 2077 | /* [RC 32] Interrupt register #0 read clear */ |
2078 | #define PXP2_REG_PXP2_INT_STS_CLR_0 0x120570 | ||
2069 | /* [RW 32] Parity mask register #0 read/write */ | 2079 | /* [RW 32] Parity mask register #0 read/write */ |
2070 | #define PXP2_REG_PXP2_PRTY_MASK_0 0x120588 | 2080 | #define PXP2_REG_PXP2_PRTY_MASK_0 0x120588 |
2071 | #define PXP2_REG_PXP2_PRTY_MASK_1 0x120598 | 2081 | #define PXP2_REG_PXP2_PRTY_MASK_1 0x120598 |
@@ -2811,22 +2821,6 @@ | |||
2811 | #define QM_REG_QVOQIDX_97 0x16e490 | 2821 | #define QM_REG_QVOQIDX_97 0x16e490 |
2812 | #define QM_REG_QVOQIDX_98 0x16e494 | 2822 | #define QM_REG_QVOQIDX_98 0x16e494 |
2813 | #define QM_REG_QVOQIDX_99 0x16e498 | 2823 | #define QM_REG_QVOQIDX_99 0x16e498 |
2814 | /* [R 24] Remaining pause timeout for queues 15-0 */ | ||
2815 | #define QM_REG_REMAINPAUSETM0 0x168418 | ||
2816 | /* [R 24] Remaining pause timeout for queues 31-16 */ | ||
2817 | #define QM_REG_REMAINPAUSETM1 0x16841c | ||
2818 | /* [R 24] Remaining pause timeout for queues 47-32 */ | ||
2819 | #define QM_REG_REMAINPAUSETM2 0x16e69c | ||
2820 | /* [R 24] Remaining pause timeout for queues 63-48 */ | ||
2821 | #define QM_REG_REMAINPAUSETM3 0x16e6a0 | ||
2822 | /* [R 24] Remaining pause timeout for queues 79-64 */ | ||
2823 | #define QM_REG_REMAINPAUSETM4 0x16e6a4 | ||
2824 | /* [R 24] Remaining pause timeout for queues 95-80 */ | ||
2825 | #define QM_REG_REMAINPAUSETM5 0x16e6a8 | ||
2826 | /* [R 24] Remaining pause timeout for queues 111-96 */ | ||
2827 | #define QM_REG_REMAINPAUSETM6 0x16e6ac | ||
2828 | /* [R 24] Remaining pause timeout for queues 127-112 */ | ||
2829 | #define QM_REG_REMAINPAUSETM7 0x16e6b0 | ||
2830 | /* [RW 1] Initialization bit command */ | 2824 | /* [RW 1] Initialization bit command */ |
2831 | #define QM_REG_SOFT_RESET 0x168428 | 2825 | #define QM_REG_SOFT_RESET 0x168428 |
2832 | /* [RW 8] The credit cost per every task in the QM. A value per each VOQ */ | 2826 | /* [RW 8] The credit cost per every task in the QM. A value per each VOQ */ |
@@ -3826,7 +3820,6 @@ | |||
3826 | /* [RW 15] Interrupt table Read and write access to it is not possible in | 3820 | /* [RW 15] Interrupt table Read and write access to it is not possible in |
3827 | the middle of the work */ | 3821 | the middle of the work */ |
3828 | #define TSEM_REG_INT_TABLE 0x180400 | 3822 | #define TSEM_REG_INT_TABLE 0x180400 |
3829 | #define TSEM_REG_INT_TABLE_SIZE 256 | ||
3830 | /* [ST 24] Statistics register. The number of messages that entered through | 3823 | /* [ST 24] Statistics register. The number of messages that entered through |
3831 | FIC0 */ | 3824 | FIC0 */ |
3832 | #define TSEM_REG_MSG_NUM_FIC0 0x180000 | 3825 | #define TSEM_REG_MSG_NUM_FIC0 0x180000 |
@@ -4283,7 +4276,6 @@ | |||
4283 | /* [RW 15] Interrupt table Read and write access to it is not possible in | 4276 | /* [RW 15] Interrupt table Read and write access to it is not possible in |
4284 | the middle of the work */ | 4277 | the middle of the work */ |
4285 | #define USEM_REG_INT_TABLE 0x300400 | 4278 | #define USEM_REG_INT_TABLE 0x300400 |
4286 | #define USEM_REG_INT_TABLE_SIZE 256 | ||
4287 | /* [ST 24] Statistics register. The number of messages that entered through | 4279 | /* [ST 24] Statistics register. The number of messages that entered through |
4288 | FIC0 */ | 4280 | FIC0 */ |
4289 | #define USEM_REG_MSG_NUM_FIC0 0x300000 | 4281 | #define USEM_REG_MSG_NUM_FIC0 0x300000 |
@@ -4802,7 +4794,6 @@ | |||
4802 | /* [RW 15] Interrupt table Read and write access to it is not possible in | 4794 | /* [RW 15] Interrupt table Read and write access to it is not possible in |
4803 | the middle of the work */ | 4795 | the middle of the work */ |
4804 | #define XSEM_REG_INT_TABLE 0x280400 | 4796 | #define XSEM_REG_INT_TABLE 0x280400 |
4805 | #define XSEM_REG_INT_TABLE_SIZE 256 | ||
4806 | /* [ST 24] Statistics register. The number of messages that entered through | 4797 | /* [ST 24] Statistics register. The number of messages that entered through |
4807 | FIC0 */ | 4798 | FIC0 */ |
4808 | #define XSEM_REG_MSG_NUM_FIC0 0x280000 | 4799 | #define XSEM_REG_MSG_NUM_FIC0 0x280000 |
@@ -4930,10 +4921,7 @@ | |||
4930 | #define EMAC_MDIO_MODE_CLOCK_CNT (0x3fL<<16) | 4921 | #define EMAC_MDIO_MODE_CLOCK_CNT (0x3fL<<16) |
4931 | #define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16 | 4922 | #define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16 |
4932 | #define EMAC_MODE_25G_MODE (1L<<5) | 4923 | #define EMAC_MODE_25G_MODE (1L<<5) |
4933 | #define EMAC_MODE_ACPI_RCVD (1L<<20) | ||
4934 | #define EMAC_MODE_HALF_DUPLEX (1L<<1) | 4924 | #define EMAC_MODE_HALF_DUPLEX (1L<<1) |
4935 | #define EMAC_MODE_MPKT (1L<<18) | ||
4936 | #define EMAC_MODE_MPKT_RCVD (1L<<19) | ||
4937 | #define EMAC_MODE_PORT_GMII (2L<<2) | 4925 | #define EMAC_MODE_PORT_GMII (2L<<2) |
4938 | #define EMAC_MODE_PORT_MII (1L<<2) | 4926 | #define EMAC_MODE_PORT_MII (1L<<2) |
4939 | #define EMAC_MODE_PORT_MII_10M (3L<<2) | 4927 | #define EMAC_MODE_PORT_MII_10M (3L<<2) |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index caa000596b25..e74b14acf8e0 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -1949,6 +1949,8 @@ | |||
1949 | #define PCI_DEVICE_ID_NX2_5708 0x164c | 1949 | #define PCI_DEVICE_ID_NX2_5708 0x164c |
1950 | #define PCI_DEVICE_ID_TIGON3_5702FE 0x164d | 1950 | #define PCI_DEVICE_ID_TIGON3_5702FE 0x164d |
1951 | #define PCI_DEVICE_ID_NX2_57710 0x164e | 1951 | #define PCI_DEVICE_ID_NX2_57710 0x164e |
1952 | #define PCI_DEVICE_ID_NX2_57711 0x164f | ||
1953 | #define PCI_DEVICE_ID_NX2_57711E 0x1650 | ||
1952 | #define PCI_DEVICE_ID_TIGON3_5705 0x1653 | 1954 | #define PCI_DEVICE_ID_TIGON3_5705 0x1653 |
1953 | #define PCI_DEVICE_ID_TIGON3_5705_2 0x1654 | 1955 | #define PCI_DEVICE_ID_TIGON3_5705_2 0x1654 |
1954 | #define PCI_DEVICE_ID_TIGON3_5720 0x1658 | 1956 | #define PCI_DEVICE_ID_TIGON3_5720 0x1658 |