aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2012-12-06 05:33:09 -0500
committerDavid S. Miller <davem@davemloft.net>2012-12-07 12:44:01 -0500
commit2bc4078e92b28375a762d7236c1c9619eecab315 (patch)
tree420fb018df8b016e9e10927d783140f4b9145964 /drivers/net/ethernet
parente503e0662447ce2bd7c0a73c90395c78ebee494c (diff)
bnx2: Add BNX2 prefix to descriptor structures and macros
for namespace consistency. Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c190
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h79
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c70
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h14
4 files changed, 181 insertions, 172 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 41fa6af2fd86..98cb76b9482c 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -260,10 +260,10 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
260 * needs to be skipped. 260 * needs to be skipped.
261 */ 261 */
262 diff = txr->tx_prod - txr->tx_cons; 262 diff = txr->tx_prod - txr->tx_cons;
263 if (unlikely(diff >= TX_DESC_CNT)) { 263 if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
264 diff &= 0xffff; 264 diff &= 0xffff;
265 if (diff == TX_DESC_CNT) 265 if (diff == BNX2_TX_DESC_CNT)
266 diff = MAX_TX_DESC_CNT; 266 diff = BNX2_MAX_TX_DESC_CNT;
267 } 267 }
268 return bp->tx_ring_size - diff; 268 return bp->tx_ring_size - diff;
269} 269}
@@ -824,7 +824,7 @@ bnx2_free_mem(struct bnx2 *bp)
824 824
825 for (i = 0; i < bp->ctx_pages; i++) { 825 for (i = 0; i < bp->ctx_pages; i++) {
826 if (bp->ctx_blk[i]) { 826 if (bp->ctx_blk[i]) {
827 dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE, 827 dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
828 bp->ctx_blk[i], 828 bp->ctx_blk[i],
829 bp->ctx_blk_mapping[i]); 829 bp->ctx_blk_mapping[i]);
830 bp->ctx_blk[i] = NULL; 830 bp->ctx_blk[i] = NULL;
@@ -888,12 +888,12 @@ bnx2_alloc_mem(struct bnx2 *bp)
888 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size; 888 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
889 889
890 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 890 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
891 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE; 891 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
892 if (bp->ctx_pages == 0) 892 if (bp->ctx_pages == 0)
893 bp->ctx_pages = 1; 893 bp->ctx_pages = 1;
894 for (i = 0; i < bp->ctx_pages; i++) { 894 for (i = 0; i < bp->ctx_pages; i++) {
895 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev, 895 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
896 BCM_PAGE_SIZE, 896 BNX2_PAGE_SIZE,
897 &bp->ctx_blk_mapping[i], 897 &bp->ctx_blk_mapping[i],
898 GFP_KERNEL); 898 GFP_KERNEL);
899 if (bp->ctx_blk[i] == NULL) 899 if (bp->ctx_blk[i] == NULL)
@@ -2538,7 +2538,7 @@ bnx2_init_5709_context(struct bnx2 *bp)
2538 u32 val; 2538 u32 val;
2539 2539
2540 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12); 2540 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2541 val |= (BCM_PAGE_BITS - 8) << 16; 2541 val |= (BNX2_PAGE_BITS - 8) << 16;
2542 BNX2_WR(bp, BNX2_CTX_COMMAND, val); 2542 BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2543 for (i = 0; i < 10; i++) { 2543 for (i = 0; i < 10; i++) {
2544 val = BNX2_RD(bp, BNX2_CTX_COMMAND); 2544 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
@@ -2553,7 +2553,7 @@ bnx2_init_5709_context(struct bnx2 *bp)
2553 int j; 2553 int j;
2554 2554
2555 if (bp->ctx_blk[i]) 2555 if (bp->ctx_blk[i])
2556 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE); 2556 memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2557 else 2557 else
2558 return -ENOMEM; 2558 return -ENOMEM;
2559 2559
@@ -2690,9 +2690,9 @@ static inline int
2690bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) 2690bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2691{ 2691{
2692 dma_addr_t mapping; 2692 dma_addr_t mapping;
2693 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index]; 2693 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2694 struct rx_bd *rxbd = 2694 struct bnx2_rx_bd *rxbd =
2695 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)]; 2695 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2696 struct page *page = alloc_page(gfp); 2696 struct page *page = alloc_page(gfp);
2697 2697
2698 if (!page) 2698 if (!page)
@@ -2714,7 +2714,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gf
2714static void 2714static void
2715bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) 2715bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2716{ 2716{
2717 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index]; 2717 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2718 struct page *page = rx_pg->page; 2718 struct page *page = rx_pg->page;
2719 2719
2720 if (!page) 2720 if (!page)
@@ -2731,9 +2731,10 @@ static inline int
2731bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) 2731bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2732{ 2732{
2733 u8 *data; 2733 u8 *data;
2734 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index]; 2734 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2735 dma_addr_t mapping; 2735 dma_addr_t mapping;
2736 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; 2736 struct bnx2_rx_bd *rxbd =
2737 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2737 2738
2738 data = kmalloc(bp->rx_buf_size, gfp); 2739 data = kmalloc(bp->rx_buf_size, gfp);
2739 if (!data) 2740 if (!data)
@@ -2802,7 +2803,7 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2802 barrier(); 2803 barrier();
2803 cons = *bnapi->hw_tx_cons_ptr; 2804 cons = *bnapi->hw_tx_cons_ptr;
2804 barrier(); 2805 barrier();
2805 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT)) 2806 if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2806 cons++; 2807 cons++;
2807 return cons; 2808 return cons;
2808} 2809}
@@ -2823,11 +2824,11 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2823 sw_cons = txr->tx_cons; 2824 sw_cons = txr->tx_cons;
2824 2825
2825 while (sw_cons != hw_cons) { 2826 while (sw_cons != hw_cons) {
2826 struct sw_tx_bd *tx_buf; 2827 struct bnx2_sw_tx_bd *tx_buf;
2827 struct sk_buff *skb; 2828 struct sk_buff *skb;
2828 int i, last; 2829 int i, last;
2829 2830
2830 sw_ring_cons = TX_RING_IDX(sw_cons); 2831 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2831 2832
2832 tx_buf = &txr->tx_buf_ring[sw_ring_cons]; 2833 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2833 skb = tx_buf->skb; 2834 skb = tx_buf->skb;
@@ -2841,7 +2842,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2841 2842
2842 last_idx = sw_cons + tx_buf->nr_frags + 1; 2843 last_idx = sw_cons + tx_buf->nr_frags + 1;
2843 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1; 2844 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2844 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) { 2845 if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2845 last_idx++; 2846 last_idx++;
2846 } 2847 }
2847 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) { 2848 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
@@ -2856,17 +2857,18 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2856 last = tx_buf->nr_frags; 2857 last = tx_buf->nr_frags;
2857 2858
2858 for (i = 0; i < last; i++) { 2859 for (i = 0; i < last; i++) {
2859 sw_cons = NEXT_TX_BD(sw_cons); 2860 struct bnx2_sw_tx_bd *tx_buf;
2860 2861
2862 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2863
2864 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2861 dma_unmap_page(&bp->pdev->dev, 2865 dma_unmap_page(&bp->pdev->dev,
2862 dma_unmap_addr( 2866 dma_unmap_addr(tx_buf, mapping),
2863 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2864 mapping),
2865 skb_frag_size(&skb_shinfo(skb)->frags[i]), 2867 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2866 PCI_DMA_TODEVICE); 2868 PCI_DMA_TODEVICE);
2867 } 2869 }
2868 2870
2869 sw_cons = NEXT_TX_BD(sw_cons); 2871 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2870 2872
2871 tx_bytes += skb->len; 2873 tx_bytes += skb->len;
2872 dev_kfree_skb(skb); 2874 dev_kfree_skb(skb);
@@ -2905,8 +2907,8 @@ static void
2905bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, 2907bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2906 struct sk_buff *skb, int count) 2908 struct sk_buff *skb, int count)
2907{ 2909{
2908 struct sw_pg *cons_rx_pg, *prod_rx_pg; 2910 struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2909 struct rx_bd *cons_bd, *prod_bd; 2911 struct bnx2_rx_bd *cons_bd, *prod_bd;
2910 int i; 2912 int i;
2911 u16 hw_prod, prod; 2913 u16 hw_prod, prod;
2912 u16 cons = rxr->rx_pg_cons; 2914 u16 cons = rxr->rx_pg_cons;
@@ -2933,12 +2935,14 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2933 hw_prod = rxr->rx_pg_prod; 2935 hw_prod = rxr->rx_pg_prod;
2934 2936
2935 for (i = 0; i < count; i++) { 2937 for (i = 0; i < count; i++) {
2936 prod = RX_PG_RING_IDX(hw_prod); 2938 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2937 2939
2938 prod_rx_pg = &rxr->rx_pg_ring[prod]; 2940 prod_rx_pg = &rxr->rx_pg_ring[prod];
2939 cons_rx_pg = &rxr->rx_pg_ring[cons]; 2941 cons_rx_pg = &rxr->rx_pg_ring[cons];
2940 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 2942 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2941 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 2943 [BNX2_RX_IDX(cons)];
2944 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2945 [BNX2_RX_IDX(prod)];
2942 2946
2943 if (prod != cons) { 2947 if (prod != cons) {
2944 prod_rx_pg->page = cons_rx_pg->page; 2948 prod_rx_pg->page = cons_rx_pg->page;
@@ -2950,8 +2954,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2950 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; 2954 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2951 2955
2952 } 2956 }
2953 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons)); 2957 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2954 hw_prod = NEXT_RX_BD(hw_prod); 2958 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2955 } 2959 }
2956 rxr->rx_pg_prod = hw_prod; 2960 rxr->rx_pg_prod = hw_prod;
2957 rxr->rx_pg_cons = cons; 2961 rxr->rx_pg_cons = cons;
@@ -2961,8 +2965,8 @@ static inline void
2961bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, 2965bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2962 u8 *data, u16 cons, u16 prod) 2966 u8 *data, u16 cons, u16 prod)
2963{ 2967{
2964 struct sw_bd *cons_rx_buf, *prod_rx_buf; 2968 struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2965 struct rx_bd *cons_bd, *prod_bd; 2969 struct bnx2_rx_bd *cons_bd, *prod_bd;
2966 2970
2967 cons_rx_buf = &rxr->rx_buf_ring[cons]; 2971 cons_rx_buf = &rxr->rx_buf_ring[cons];
2968 prod_rx_buf = &rxr->rx_buf_ring[prod]; 2972 prod_rx_buf = &rxr->rx_buf_ring[prod];
@@ -2981,8 +2985,8 @@ bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2981 dma_unmap_addr_set(prod_rx_buf, mapping, 2985 dma_unmap_addr_set(prod_rx_buf, mapping,
2982 dma_unmap_addr(cons_rx_buf, mapping)); 2986 dma_unmap_addr(cons_rx_buf, mapping));
2983 2987
2984 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 2988 cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
2985 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 2989 prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
2986 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; 2990 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2987 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; 2991 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2988} 2992}
@@ -3022,7 +3026,7 @@ error:
3022 return skb; 3026 return skb;
3023 } else { 3027 } else {
3024 unsigned int i, frag_len, frag_size, pages; 3028 unsigned int i, frag_len, frag_size, pages;
3025 struct sw_pg *rx_pg; 3029 struct bnx2_sw_pg *rx_pg;
3026 u16 pg_cons = rxr->rx_pg_cons; 3030 u16 pg_cons = rxr->rx_pg_cons;
3027 u16 pg_prod = rxr->rx_pg_prod; 3031 u16 pg_prod = rxr->rx_pg_prod;
3028 3032
@@ -3065,7 +3069,7 @@ error:
3065 rx_pg->page = NULL; 3069 rx_pg->page = NULL;
3066 3070
3067 err = bnx2_alloc_rx_page(bp, rxr, 3071 err = bnx2_alloc_rx_page(bp, rxr,
3068 RX_PG_RING_IDX(pg_prod), 3072 BNX2_RX_PG_RING_IDX(pg_prod),
3069 GFP_ATOMIC); 3073 GFP_ATOMIC);
3070 if (unlikely(err)) { 3074 if (unlikely(err)) {
3071 rxr->rx_pg_cons = pg_cons; 3075 rxr->rx_pg_cons = pg_cons;
@@ -3083,8 +3087,8 @@ error:
3083 skb->truesize += PAGE_SIZE; 3087 skb->truesize += PAGE_SIZE;
3084 skb->len += frag_len; 3088 skb->len += frag_len;
3085 3089
3086 pg_prod = NEXT_RX_BD(pg_prod); 3090 pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3087 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons)); 3091 pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3088 } 3092 }
3089 rxr->rx_pg_prod = pg_prod; 3093 rxr->rx_pg_prod = pg_prod;
3090 rxr->rx_pg_cons = pg_cons; 3094 rxr->rx_pg_cons = pg_cons;
@@ -3101,7 +3105,7 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3101 barrier(); 3105 barrier();
3102 cons = *bnapi->hw_rx_cons_ptr; 3106 cons = *bnapi->hw_rx_cons_ptr;
3103 barrier(); 3107 barrier();
3104 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)) 3108 if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3105 cons++; 3109 cons++;
3106 return cons; 3110 return cons;
3107} 3111}
@@ -3125,13 +3129,14 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3125 while (sw_cons != hw_cons) { 3129 while (sw_cons != hw_cons) {
3126 unsigned int len, hdr_len; 3130 unsigned int len, hdr_len;
3127 u32 status; 3131 u32 status;
3128 struct sw_bd *rx_buf, *next_rx_buf; 3132 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3129 struct sk_buff *skb; 3133 struct sk_buff *skb;
3130 dma_addr_t dma_addr; 3134 dma_addr_t dma_addr;
3131 u8 *data; 3135 u8 *data;
3136 u16 next_ring_idx;
3132 3137
3133 sw_ring_cons = RX_RING_IDX(sw_cons); 3138 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3134 sw_ring_prod = RX_RING_IDX(sw_prod); 3139 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3135 3140
3136 rx_buf = &rxr->rx_buf_ring[sw_ring_cons]; 3141 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3137 data = rx_buf->data; 3142 data = rx_buf->data;
@@ -3146,8 +3151,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3146 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, 3151 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3147 PCI_DMA_FROMDEVICE); 3152 PCI_DMA_FROMDEVICE);
3148 3153
3149 next_rx_buf = 3154 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3150 &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))]; 3155 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3151 prefetch(get_l2_fhdr(next_rx_buf->data)); 3156 prefetch(get_l2_fhdr(next_rx_buf->data));
3152 3157
3153 len = rx_hdr->l2_fhdr_pkt_len; 3158 len = rx_hdr->l2_fhdr_pkt_len;
@@ -3239,8 +3244,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3239 rx_pkt++; 3244 rx_pkt++;
3240 3245
3241next_rx: 3246next_rx:
3242 sw_cons = NEXT_RX_BD(sw_cons); 3247 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3243 sw_prod = NEXT_RX_BD(sw_prod); 3248 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3244 3249
3245 if ((rx_pkt == budget)) 3250 if ((rx_pkt == budget))
3246 break; 3251 break;
@@ -4907,13 +4912,13 @@ bnx2_init_chip(struct bnx2 *bp)
4907 BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val); 4912 BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4908 BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val); 4913 BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4909 4914
4910 val = (BCM_PAGE_BITS - 8) << 24; 4915 val = (BNX2_PAGE_BITS - 8) << 24;
4911 BNX2_WR(bp, BNX2_RV2P_CONFIG, val); 4916 BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4912 4917
4913 /* Configure page size. */ 4918 /* Configure page size. */
4914 val = BNX2_RD(bp, BNX2_TBDR_CONFIG); 4919 val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4915 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE; 4920 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4916 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 4921 val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4917 BNX2_WR(bp, BNX2_TBDR_CONFIG, val); 4922 BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4918 4923
4919 val = bp->mac_addr[0] + 4924 val = bp->mac_addr[0] +
@@ -5113,7 +5118,7 @@ bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5113static void 5118static void
5114bnx2_init_tx_ring(struct bnx2 *bp, int ring_num) 5119bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5115{ 5120{
5116 struct tx_bd *txbd; 5121 struct bnx2_tx_bd *txbd;
5117 u32 cid = TX_CID; 5122 u32 cid = TX_CID;
5118 struct bnx2_napi *bnapi; 5123 struct bnx2_napi *bnapi;
5119 struct bnx2_tx_ring_info *txr; 5124 struct bnx2_tx_ring_info *txr;
@@ -5128,7 +5133,7 @@ bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5128 5133
5129 bp->tx_wake_thresh = bp->tx_ring_size / 2; 5134 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5130 5135
5131 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT]; 5136 txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5132 5137
5133 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32; 5138 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5134 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff; 5139 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
@@ -5143,17 +5148,17 @@ bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5143} 5148}
5144 5149
5145static void 5150static void
5146bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size, 5151bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5147 int num_rings) 5152 u32 buf_size, int num_rings)
5148{ 5153{
5149 int i; 5154 int i;
5150 struct rx_bd *rxbd; 5155 struct bnx2_rx_bd *rxbd;
5151 5156
5152 for (i = 0; i < num_rings; i++) { 5157 for (i = 0; i < num_rings; i++) {
5153 int j; 5158 int j;
5154 5159
5155 rxbd = &rx_ring[i][0]; 5160 rxbd = &rx_ring[i][0];
5156 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) { 5161 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5157 rxbd->rx_bd_len = buf_size; 5162 rxbd->rx_bd_len = buf_size;
5158 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; 5163 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5159 } 5164 }
@@ -5225,8 +5230,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5225 ring_num, i, bp->rx_pg_ring_size); 5230 ring_num, i, bp->rx_pg_ring_size);
5226 break; 5231 break;
5227 } 5232 }
5228 prod = NEXT_RX_BD(prod); 5233 prod = BNX2_NEXT_RX_BD(prod);
5229 ring_prod = RX_PG_RING_IDX(prod); 5234 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5230 } 5235 }
5231 rxr->rx_pg_prod = prod; 5236 rxr->rx_pg_prod = prod;
5232 5237
@@ -5237,8 +5242,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5237 ring_num, i, bp->rx_ring_size); 5242 ring_num, i, bp->rx_ring_size);
5238 break; 5243 break;
5239 } 5244 }
5240 prod = NEXT_RX_BD(prod); 5245 prod = BNX2_NEXT_RX_BD(prod);
5241 ring_prod = RX_RING_IDX(prod); 5246 ring_prod = BNX2_RX_RING_IDX(prod);
5242 } 5247 }
5243 rxr->rx_prod = prod; 5248 rxr->rx_prod = prod;
5244 5249
@@ -5303,8 +5308,8 @@ static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5303{ 5308{
5304 u32 max, num_rings = 1; 5309 u32 max, num_rings = 1;
5305 5310
5306 while (ring_size > MAX_RX_DESC_CNT) { 5311 while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5307 ring_size -= MAX_RX_DESC_CNT; 5312 ring_size -= BNX2_MAX_RX_DESC_CNT;
5308 num_rings++; 5313 num_rings++;
5309 } 5314 }
5310 /* round to next power of 2 */ 5315 /* round to next power of 2 */
@@ -5337,13 +5342,14 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5337 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 5342 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5338 5343
5339 jumbo_size = size * pages; 5344 jumbo_size = size * pages;
5340 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT) 5345 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5341 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT; 5346 jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5342 5347
5343 bp->rx_pg_ring_size = jumbo_size; 5348 bp->rx_pg_ring_size = jumbo_size;
5344 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size, 5349 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5345 MAX_RX_PG_RINGS); 5350 BNX2_MAX_RX_PG_RINGS);
5346 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1; 5351 bp->rx_max_pg_ring_idx =
5352 (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5347 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET; 5353 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5348 bp->rx_copy_thresh = 0; 5354 bp->rx_copy_thresh = 0;
5349 } 5355 }
@@ -5354,8 +5360,8 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5354 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 5360 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5355 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET; 5361 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5356 bp->rx_ring_size = size; 5362 bp->rx_ring_size = size;
5357 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS); 5363 bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5358 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1; 5364 bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5359} 5365}
5360 5366
5361static void 5367static void
@@ -5371,13 +5377,13 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5371 if (txr->tx_buf_ring == NULL) 5377 if (txr->tx_buf_ring == NULL)
5372 continue; 5378 continue;
5373 5379
5374 for (j = 0; j < TX_DESC_CNT; ) { 5380 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5375 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 5381 struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5376 struct sk_buff *skb = tx_buf->skb; 5382 struct sk_buff *skb = tx_buf->skb;
5377 int k, last; 5383 int k, last;
5378 5384
5379 if (skb == NULL) { 5385 if (skb == NULL) {
5380 j = NEXT_TX_BD(j); 5386 j = BNX2_NEXT_TX_BD(j);
5381 continue; 5387 continue;
5382 } 5388 }
5383 5389
@@ -5389,9 +5395,9 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5389 tx_buf->skb = NULL; 5395 tx_buf->skb = NULL;
5390 5396
5391 last = tx_buf->nr_frags; 5397 last = tx_buf->nr_frags;
5392 j = NEXT_TX_BD(j); 5398 j = BNX2_NEXT_TX_BD(j);
5393 for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) { 5399 for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5394 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; 5400 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5395 dma_unmap_page(&bp->pdev->dev, 5401 dma_unmap_page(&bp->pdev->dev,
5396 dma_unmap_addr(tx_buf, mapping), 5402 dma_unmap_addr(tx_buf, mapping),
5397 skb_frag_size(&skb_shinfo(skb)->frags[k]), 5403 skb_frag_size(&skb_shinfo(skb)->frags[k]),
@@ -5417,7 +5423,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
5417 return; 5423 return;
5418 5424
5419 for (j = 0; j < bp->rx_max_ring_idx; j++) { 5425 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5420 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j]; 5426 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5421 u8 *data = rx_buf->data; 5427 u8 *data = rx_buf->data;
5422 5428
5423 if (data == NULL) 5429 if (data == NULL)
@@ -5741,8 +5747,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5741 unsigned char *packet; 5747 unsigned char *packet;
5742 u16 rx_start_idx, rx_idx; 5748 u16 rx_start_idx, rx_idx;
5743 dma_addr_t map; 5749 dma_addr_t map;
5744 struct tx_bd *txbd; 5750 struct bnx2_tx_bd *txbd;
5745 struct sw_bd *rx_buf; 5751 struct bnx2_sw_bd *rx_buf;
5746 struct l2_fhdr *rx_hdr; 5752 struct l2_fhdr *rx_hdr;
5747 int ret = -ENODEV; 5753 int ret = -ENODEV;
5748 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi; 5754 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
@@ -5794,7 +5800,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5794 5800
5795 num_pkts = 0; 5801 num_pkts = 0;
5796 5802
5797 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)]; 5803 txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5798 5804
5799 txbd->tx_bd_haddr_hi = (u64) map >> 32; 5805 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5800 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff; 5806 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
@@ -5802,7 +5808,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5802 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END; 5808 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5803 5809
5804 num_pkts++; 5810 num_pkts++;
5805 txr->tx_prod = NEXT_TX_BD(txr->tx_prod); 5811 txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5806 txr->tx_prod_bseq += pkt_size; 5812 txr->tx_prod_bseq += pkt_size;
5807 5813
5808 BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod); 5814 BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
@@ -6533,8 +6539,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6533{ 6539{
6534 struct bnx2 *bp = netdev_priv(dev); 6540 struct bnx2 *bp = netdev_priv(dev);
6535 dma_addr_t mapping; 6541 dma_addr_t mapping;
6536 struct tx_bd *txbd; 6542 struct bnx2_tx_bd *txbd;
6537 struct sw_tx_bd *tx_buf; 6543 struct bnx2_sw_tx_bd *tx_buf;
6538 u32 len, vlan_tag_flags, last_frag, mss; 6544 u32 len, vlan_tag_flags, last_frag, mss;
6539 u16 prod, ring_prod; 6545 u16 prod, ring_prod;
6540 int i; 6546 int i;
@@ -6557,7 +6563,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6557 } 6563 }
6558 len = skb_headlen(skb); 6564 len = skb_headlen(skb);
6559 prod = txr->tx_prod; 6565 prod = txr->tx_prod;
6560 ring_prod = TX_RING_IDX(prod); 6566 ring_prod = BNX2_TX_RING_IDX(prod);
6561 6567
6562 vlan_tag_flags = 0; 6568 vlan_tag_flags = 0;
6563 if (skb->ip_summed == CHECKSUM_PARTIAL) { 6569 if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -6627,8 +6633,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6627 for (i = 0; i < last_frag; i++) { 6633 for (i = 0; i < last_frag; i++) {
6628 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6634 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6629 6635
6630 prod = NEXT_TX_BD(prod); 6636 prod = BNX2_NEXT_TX_BD(prod);
6631 ring_prod = TX_RING_IDX(prod); 6637 ring_prod = BNX2_TX_RING_IDX(prod);
6632 txbd = &txr->tx_desc_ring[ring_prod]; 6638 txbd = &txr->tx_desc_ring[ring_prod];
6633 6639
6634 len = skb_frag_size(frag); 6640 len = skb_frag_size(frag);
@@ -6652,7 +6658,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6652 6658
6653 netdev_tx_sent_queue(txq, skb->len); 6659 netdev_tx_sent_queue(txq, skb->len);
6654 6660
6655 prod = NEXT_TX_BD(prod); 6661 prod = BNX2_NEXT_TX_BD(prod);
6656 txr->tx_prod_bseq += skb->len; 6662 txr->tx_prod_bseq += skb->len;
6657 6663
6658 BNX2_WR16(bp, txr->tx_bidx_addr, prod); 6664 BNX2_WR16(bp, txr->tx_bidx_addr, prod);
@@ -6682,7 +6688,7 @@ dma_error:
6682 6688
6683 /* start back at beginning and unmap skb */ 6689 /* start back at beginning and unmap skb */
6684 prod = txr->tx_prod; 6690 prod = txr->tx_prod;
6685 ring_prod = TX_RING_IDX(prod); 6691 ring_prod = BNX2_TX_RING_IDX(prod);
6686 tx_buf = &txr->tx_buf_ring[ring_prod]; 6692 tx_buf = &txr->tx_buf_ring[ring_prod];
6687 tx_buf->skb = NULL; 6693 tx_buf->skb = NULL;
6688 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), 6694 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
@@ -6690,8 +6696,8 @@ dma_error:
6690 6696
6691 /* unmap remaining mapped pages */ 6697 /* unmap remaining mapped pages */
6692 for (i = 0; i < last_frag; i++) { 6698 for (i = 0; i < last_frag; i++) {
6693 prod = NEXT_TX_BD(prod); 6699 prod = BNX2_NEXT_TX_BD(prod);
6694 ring_prod = TX_RING_IDX(prod); 6700 ring_prod = BNX2_TX_RING_IDX(prod);
6695 tx_buf = &txr->tx_buf_ring[ring_prod]; 6701 tx_buf = &txr->tx_buf_ring[ring_prod];
6696 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), 6702 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6697 skb_frag_size(&skb_shinfo(skb)->frags[i]), 6703 skb_frag_size(&skb_shinfo(skb)->frags[i]),
@@ -7254,13 +7260,13 @@ bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7254{ 7260{
7255 struct bnx2 *bp = netdev_priv(dev); 7261 struct bnx2 *bp = netdev_priv(dev);
7256 7262
7257 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT; 7263 ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7258 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT; 7264 ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7259 7265
7260 ering->rx_pending = bp->rx_ring_size; 7266 ering->rx_pending = bp->rx_ring_size;
7261 ering->rx_jumbo_pending = bp->rx_pg_ring_size; 7267 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7262 7268
7263 ering->tx_max_pending = MAX_TX_DESC_CNT; 7269 ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7264 ering->tx_pending = bp->tx_ring_size; 7270 ering->tx_pending = bp->tx_ring_size;
7265} 7271}
7266 7272
@@ -7326,8 +7332,8 @@ bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7326 struct bnx2 *bp = netdev_priv(dev); 7332 struct bnx2 *bp = netdev_priv(dev);
7327 int rc; 7333 int rc;
7328 7334
7329 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) || 7335 if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7330 (ering->tx_pending > MAX_TX_DESC_CNT) || 7336 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7331 (ering->tx_pending <= MAX_SKB_FRAGS)) { 7337 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7332 7338
7333 return -EINVAL; 7339 return -EINVAL;
@@ -8299,7 +8305,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8299 bp->mac_addr[4] = (u8) (reg >> 8); 8305 bp->mac_addr[4] = (u8) (reg >> 8);
8300 bp->mac_addr[5] = (u8) reg; 8306 bp->mac_addr[5] = (u8) reg;
8301 8307
8302 bp->tx_ring_size = MAX_TX_DESC_CNT; 8308 bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8303 bnx2_set_rx_ring_size(bp, 255); 8309 bnx2_set_rx_ring_size(bp, 255);
8304 8310
8305 bp->tx_quick_cons_trip_int = 2; 8311 bp->tx_quick_cons_trip_int = 2;
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 5f136745ffdd..58caa2266772 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -20,7 +20,7 @@
20/* 20/*
21 * tx_bd definition 21 * tx_bd definition
22 */ 22 */
23struct tx_bd { 23struct bnx2_tx_bd {
24 u32 tx_bd_haddr_hi; 24 u32 tx_bd_haddr_hi;
25 u32 tx_bd_haddr_lo; 25 u32 tx_bd_haddr_lo;
26 u32 tx_bd_mss_nbytes; 26 u32 tx_bd_mss_nbytes;
@@ -48,7 +48,7 @@ struct tx_bd {
48/* 48/*
49 * rx_bd definition 49 * rx_bd definition
50 */ 50 */
51struct rx_bd { 51struct bnx2_rx_bd {
52 u32 rx_bd_haddr_hi; 52 u32 rx_bd_haddr_hi;
53 u32 rx_bd_haddr_lo; 53 u32 rx_bd_haddr_lo;
54 u32 rx_bd_len; 54 u32 rx_bd_len;
@@ -6538,37 +6538,38 @@ struct l2_fhdr {
6538 6538
6539/* Use CPU native page size up to 16K for the ring sizes. */ 6539/* Use CPU native page size up to 16K for the ring sizes. */
6540#if (PAGE_SHIFT > 14) 6540#if (PAGE_SHIFT > 14)
6541#define BCM_PAGE_BITS 14 6541#define BNX2_PAGE_BITS 14
6542#else 6542#else
6543#define BCM_PAGE_BITS PAGE_SHIFT 6543#define BNX2_PAGE_BITS PAGE_SHIFT
6544#endif 6544#endif
6545#define BCM_PAGE_SIZE (1 << BCM_PAGE_BITS) 6545#define BNX2_PAGE_SIZE (1 << BNX2_PAGE_BITS)
6546 6546
6547#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd)) 6547#define BNX2_TX_DESC_CNT (BNX2_PAGE_SIZE / sizeof(struct bnx2_tx_bd))
6548#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) 6548#define BNX2_MAX_TX_DESC_CNT (BNX2_TX_DESC_CNT - 1)
6549 6549
6550#define MAX_RX_RINGS 8 6550#define BNX2_MAX_RX_RINGS 8
6551#define MAX_RX_PG_RINGS 32 6551#define BNX2_MAX_RX_PG_RINGS 32
6552#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct rx_bd)) 6552#define BNX2_RX_DESC_CNT (BNX2_PAGE_SIZE / sizeof(struct bnx2_rx_bd))
6553#define MAX_RX_DESC_CNT (RX_DESC_CNT - 1) 6553#define BNX2_MAX_RX_DESC_CNT (BNX2_RX_DESC_CNT - 1)
6554#define MAX_TOTAL_RX_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_RINGS) 6554#define BNX2_MAX_TOTAL_RX_DESC_CNT (BNX2_MAX_RX_DESC_CNT * BNX2_MAX_RX_RINGS)
6555#define MAX_TOTAL_RX_PG_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_PG_RINGS) 6555#define BNX2_MAX_TOTAL_RX_PG_DESC_CNT \
6556 (BNX2_MAX_RX_DESC_CNT * BNX2_MAX_RX_PG_RINGS)
6556 6557
6557#define NEXT_TX_BD(x) (((x) & (MAX_TX_DESC_CNT - 1)) == \ 6558#define BNX2_NEXT_TX_BD(x) (((x) & (BNX2_MAX_TX_DESC_CNT - 1)) == \
6558 (MAX_TX_DESC_CNT - 1)) ? \ 6559 (BNX2_MAX_TX_DESC_CNT - 1)) ? \
6559 (x) + 2 : (x) + 1 6560 (x) + 2 : (x) + 1
6560 6561
6561#define TX_RING_IDX(x) ((x) & MAX_TX_DESC_CNT) 6562#define BNX2_TX_RING_IDX(x) ((x) & BNX2_MAX_TX_DESC_CNT)
6562 6563
6563#define NEXT_RX_BD(x) (((x) & (MAX_RX_DESC_CNT - 1)) == \ 6564#define BNX2_NEXT_RX_BD(x) (((x) & (BNX2_MAX_RX_DESC_CNT - 1)) == \
6564 (MAX_RX_DESC_CNT - 1)) ? \ 6565 (BNX2_MAX_RX_DESC_CNT - 1)) ? \
6565 (x) + 2 : (x) + 1 6566 (x) + 2 : (x) + 1
6566 6567
6567#define RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx) 6568#define BNX2_RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx)
6568#define RX_PG_RING_IDX(x) ((x) & bp->rx_max_pg_ring_idx) 6569#define BNX2_RX_PG_RING_IDX(x) ((x) & bp->rx_max_pg_ring_idx)
6569 6570
6570#define RX_RING(x) (((x) & ~MAX_RX_DESC_CNT) >> (BCM_PAGE_BITS - 4)) 6571#define BNX2_RX_RING(x) (((x) & ~BNX2_MAX_RX_DESC_CNT) >> (BNX2_PAGE_BITS - 4))
6571#define RX_IDX(x) ((x) & MAX_RX_DESC_CNT) 6572#define BNX2_RX_IDX(x) ((x) & BNX2_MAX_RX_DESC_CNT)
6572 6573
6573/* Context size. */ 6574/* Context size. */
6574#define CTX_SHIFT 7 6575#define CTX_SHIFT 7
@@ -6609,7 +6610,7 @@ struct l2_fhdr {
6609 * RX ring buffer contains pointer to kmalloc() data only, 6610 * RX ring buffer contains pointer to kmalloc() data only,
6610 * skb are built only after Hardware filled the frame. 6611 * skb are built only after Hardware filled the frame.
6611 */ 6612 */
6612struct sw_bd { 6613struct bnx2_sw_bd {
6613 u8 *data; 6614 u8 *data;
6614 DEFINE_DMA_UNMAP_ADDR(mapping); 6615 DEFINE_DMA_UNMAP_ADDR(mapping);
6615}; 6616};
@@ -6623,23 +6624,23 @@ static inline struct l2_fhdr *get_l2_fhdr(u8 *data)
6623} 6624}
6624 6625
6625 6626
6626struct sw_pg { 6627struct bnx2_sw_pg {
6627 struct page *page; 6628 struct page *page;
6628 DEFINE_DMA_UNMAP_ADDR(mapping); 6629 DEFINE_DMA_UNMAP_ADDR(mapping);
6629}; 6630};
6630 6631
6631struct sw_tx_bd { 6632struct bnx2_sw_tx_bd {
6632 struct sk_buff *skb; 6633 struct sk_buff *skb;
6633 DEFINE_DMA_UNMAP_ADDR(mapping); 6634 DEFINE_DMA_UNMAP_ADDR(mapping);
6634 unsigned short is_gso; 6635 unsigned short is_gso;
6635 unsigned short nr_frags; 6636 unsigned short nr_frags;
6636}; 6637};
6637 6638
6638#define SW_RXBD_RING_SIZE (sizeof(struct sw_bd) * RX_DESC_CNT) 6639#define SW_RXBD_RING_SIZE (sizeof(struct bnx2_sw_bd) * BNX2_RX_DESC_CNT)
6639#define SW_RXPG_RING_SIZE (sizeof(struct sw_pg) * RX_DESC_CNT) 6640#define SW_RXPG_RING_SIZE (sizeof(struct bnx2_sw_pg) * BNX2_RX_DESC_CNT)
6640#define RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT) 6641#define RXBD_RING_SIZE (sizeof(struct bnx2_rx_bd) * BNX2_RX_DESC_CNT)
6641#define SW_TXBD_RING_SIZE (sizeof(struct sw_tx_bd) * TX_DESC_CNT) 6642#define SW_TXBD_RING_SIZE (sizeof(struct bnx2_sw_tx_bd) * BNX2_TX_DESC_CNT)
6642#define TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT) 6643#define TXBD_RING_SIZE (sizeof(struct bnx2_tx_bd) * BNX2_TX_DESC_CNT)
6643 6644
6644/* Buffered flash (Atmel: AT45DB011B) specific information */ 6645/* Buffered flash (Atmel: AT45DB011B) specific information */
6645#define SEEPROM_PAGE_BITS 2 6646#define SEEPROM_PAGE_BITS 2
@@ -6720,8 +6721,8 @@ struct bnx2_tx_ring_info {
6720 u32 tx_bidx_addr; 6721 u32 tx_bidx_addr;
6721 u32 tx_bseq_addr; 6722 u32 tx_bseq_addr;
6722 6723
6723 struct tx_bd *tx_desc_ring; 6724 struct bnx2_tx_bd *tx_desc_ring;
6724 struct sw_tx_bd *tx_buf_ring; 6725 struct bnx2_sw_tx_bd *tx_buf_ring;
6725 6726
6726 u16 tx_cons; 6727 u16 tx_cons;
6727 u16 hw_tx_cons; 6728 u16 hw_tx_cons;
@@ -6741,13 +6742,13 @@ struct bnx2_rx_ring_info {
6741 u16 rx_pg_prod; 6742 u16 rx_pg_prod;
6742 u16 rx_pg_cons; 6743 u16 rx_pg_cons;
6743 6744
6744 struct sw_bd *rx_buf_ring; 6745 struct bnx2_sw_bd *rx_buf_ring;
6745 struct rx_bd *rx_desc_ring[MAX_RX_RINGS]; 6746 struct bnx2_rx_bd *rx_desc_ring[BNX2_MAX_RX_RINGS];
6746 struct sw_pg *rx_pg_ring; 6747 struct bnx2_sw_pg *rx_pg_ring;
6747 struct rx_bd *rx_pg_desc_ring[MAX_RX_PG_RINGS]; 6748 struct bnx2_rx_bd *rx_pg_desc_ring[BNX2_MAX_RX_PG_RINGS];
6748 6749
6749 dma_addr_t rx_desc_mapping[MAX_RX_RINGS]; 6750 dma_addr_t rx_desc_mapping[BNX2_MAX_RX_RINGS];
6750 dma_addr_t rx_pg_desc_mapping[MAX_RX_PG_RINGS]; 6751 dma_addr_t rx_pg_desc_mapping[BNX2_MAX_RX_PG_RINGS];
6751}; 6752};
6752 6753
6753struct bnx2_napi { 6754struct bnx2_napi {
@@ -7052,7 +7053,7 @@ struct bnx2_rv2p_fw_file {
7052 7053
7053#define RV2P_P1_FIXUP_PAGE_SIZE_IDX 0 7054#define RV2P_P1_FIXUP_PAGE_SIZE_IDX 0
7054#define RV2P_BD_PAGE_SIZE_MSK 0xffff 7055#define RV2P_BD_PAGE_SIZE_MSK 0xffff
7055#define RV2P_BD_PAGE_SIZE ((BCM_PAGE_SIZE / 16) - 1) 7056#define RV2P_BD_PAGE_SIZE ((BNX2_PAGE_SIZE / 16) - 1)
7056 7057
7057#define RV2P_PROC1 0 7058#define RV2P_PROC1 0
7058#define RV2P_PROC2 1 7059#define RV2P_PROC2 1
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 091c60a9897c..756a2a771291 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -724,7 +724,7 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
724 724
725 for (i = 0; i < dma->num_pages; i++) { 725 for (i = 0; i < dma->num_pages; i++) {
726 if (dma->pg_arr[i]) { 726 if (dma->pg_arr[i]) {
727 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE, 727 dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE,
728 dma->pg_arr[i], dma->pg_map_arr[i]); 728 dma->pg_arr[i], dma->pg_map_arr[i]);
729 dma->pg_arr[i] = NULL; 729 dma->pg_arr[i] = NULL;
730 } 730 }
@@ -783,7 +783,7 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
783 783
784 for (i = 0; i < pages; i++) { 784 for (i = 0; i < pages; i++) {
785 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, 785 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
786 BCM_PAGE_SIZE, 786 BNX2_PAGE_SIZE,
787 &dma->pg_map_arr[i], 787 &dma->pg_map_arr[i],
788 GFP_ATOMIC); 788 GFP_ATOMIC);
789 if (dma->pg_arr[i] == NULL) 789 if (dma->pg_arr[i] == NULL)
@@ -792,8 +792,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
792 if (!use_pg_tbl) 792 if (!use_pg_tbl)
793 return 0; 793 return 0;
794 794
795 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & 795 dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) &
796 ~(BCM_PAGE_SIZE - 1); 796 ~(BNX2_PAGE_SIZE - 1);
797 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, 797 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
798 &dma->pgtbl_map, GFP_ATOMIC); 798 &dma->pgtbl_map, GFP_ATOMIC);
799 if (dma->pgtbl == NULL) 799 if (dma->pgtbl == NULL)
@@ -898,8 +898,8 @@ static int cnic_alloc_context(struct cnic_dev *dev)
898 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 898 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
899 int i, k, arr_size; 899 int i, k, arr_size;
900 900
901 cp->ctx_blk_size = BCM_PAGE_SIZE; 901 cp->ctx_blk_size = BNX2_PAGE_SIZE;
902 cp->cids_per_blk = BCM_PAGE_SIZE / 128; 902 cp->cids_per_blk = BNX2_PAGE_SIZE / 128;
903 arr_size = BNX2_MAX_CID / cp->cids_per_blk * 903 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
904 sizeof(struct cnic_ctx); 904 sizeof(struct cnic_ctx);
905 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); 905 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
@@ -931,7 +931,7 @@ static int cnic_alloc_context(struct cnic_dev *dev)
931 for (i = 0; i < cp->ctx_blks; i++) { 931 for (i = 0; i < cp->ctx_blks; i++) {
932 cp->ctx_arr[i].ctx = 932 cp->ctx_arr[i].ctx =
933 dma_alloc_coherent(&dev->pcidev->dev, 933 dma_alloc_coherent(&dev->pcidev->dev,
934 BCM_PAGE_SIZE, 934 BNX2_PAGE_SIZE,
935 &cp->ctx_arr[i].mapping, 935 &cp->ctx_arr[i].mapping,
936 GFP_KERNEL); 936 GFP_KERNEL);
937 if (cp->ctx_arr[i].ctx == NULL) 937 if (cp->ctx_arr[i].ctx == NULL)
@@ -1011,7 +1011,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1011 if (udev->l2_ring) 1011 if (udev->l2_ring)
1012 return 0; 1012 return 0;
1013 1013
1014 udev->l2_ring_size = pages * BCM_PAGE_SIZE; 1014 udev->l2_ring_size = pages * BNX2_PAGE_SIZE;
1015 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, 1015 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1016 &udev->l2_ring_map, 1016 &udev->l2_ring_map,
1017 GFP_KERNEL | __GFP_COMP); 1017 GFP_KERNEL | __GFP_COMP);
@@ -2898,7 +2898,7 @@ static int cnic_l2_completion(struct cnic_local *cp)
2898 u16 hw_cons, sw_cons; 2898 u16 hw_cons, sw_cons;
2899 struct cnic_uio_dev *udev = cp->udev; 2899 struct cnic_uio_dev *udev = cp->udev;
2900 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) 2900 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2901 (udev->l2_ring + (2 * BCM_PAGE_SIZE)); 2901 (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
2902 u32 cmd; 2902 u32 cmd;
2903 int comp = 0; 2903 int comp = 0;
2904 2904
@@ -4366,7 +4366,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4366 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; 4366 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4367 u32 val; 4367 u32 val;
4368 4368
4369 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE); 4369 memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE);
4370 4370
4371 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, 4371 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4372 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); 4372 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
@@ -4508,7 +4508,7 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4508 u32 cid_addr, tx_cid, sb_id; 4508 u32 cid_addr, tx_cid, sb_id;
4509 u32 val, offset0, offset1, offset2, offset3; 4509 u32 val, offset0, offset1, offset2, offset3;
4510 int i; 4510 int i;
4511 struct tx_bd *txbd; 4511 struct bnx2_tx_bd *txbd;
4512 dma_addr_t buf_map, ring_map = udev->l2_ring_map; 4512 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4513 struct status_block *s_blk = cp->status_blk.gen; 4513 struct status_block *s_blk = cp->status_blk.gen;
4514 4514
@@ -4554,7 +4554,7 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4554 txbd = udev->l2_ring; 4554 txbd = udev->l2_ring;
4555 4555
4556 buf_map = udev->l2_buf_map; 4556 buf_map = udev->l2_buf_map;
4557 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { 4557 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
4558 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; 4558 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4559 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 4559 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4560 } 4560 }
@@ -4574,7 +4574,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4574 struct cnic_uio_dev *udev = cp->udev; 4574 struct cnic_uio_dev *udev = cp->udev;
4575 u32 cid_addr, sb_id, val, coal_reg, coal_val; 4575 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4576 int i; 4576 int i;
4577 struct rx_bd *rxbd; 4577 struct bnx2_rx_bd *rxbd;
4578 struct status_block *s_blk = cp->status_blk.gen; 4578 struct status_block *s_blk = cp->status_blk.gen;
4579 dma_addr_t ring_map = udev->l2_ring_map; 4579 dma_addr_t ring_map = udev->l2_ring_map;
4580 4580
@@ -4610,8 +4610,8 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4610 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); 4610 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4611 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); 4611 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4612 4612
4613 rxbd = udev->l2_ring + BCM_PAGE_SIZE; 4613 rxbd = udev->l2_ring + BNX2_PAGE_SIZE;
4614 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { 4614 for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
4615 dma_addr_t buf_map; 4615 dma_addr_t buf_map;
4616 int n = (i % cp->l2_rx_ring_size) + 1; 4616 int n = (i % cp->l2_rx_ring_size) + 1;
4617 4617
@@ -4621,11 +4621,11 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4621 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; 4621 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4622 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 4622 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4623 } 4623 }
4624 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; 4624 val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
4625 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); 4625 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4626 rxbd->rx_bd_haddr_hi = val; 4626 rxbd->rx_bd_haddr_hi = val;
4627 4627
4628 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; 4628 val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
4629 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); 4629 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4630 rxbd->rx_bd_haddr_lo = val; 4630 rxbd->rx_bd_haddr_lo = val;
4631 4631
@@ -4691,10 +4691,10 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4691 4691
4692 val = CNIC_RD(dev, BNX2_MQ_CONFIG); 4692 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4693 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4693 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4694 if (BCM_PAGE_BITS > 12) 4694 if (BNX2_PAGE_BITS > 12)
4695 val |= (12 - 8) << 4; 4695 val |= (12 - 8) << 4;
4696 else 4696 else
4697 val |= (BCM_PAGE_BITS - 8) << 4; 4697 val |= (BNX2_PAGE_BITS - 8) << 4;
4698 4698
4699 CNIC_WR(dev, BNX2_MQ_CONFIG, val); 4699 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4700 4700
@@ -4724,13 +4724,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4724 4724
4725 /* Initialize the kernel work queue context. */ 4725 /* Initialize the kernel work queue context. */
4726 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4726 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4727 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 4727 (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4728 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); 4728 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4729 4729
4730 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; 4730 val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4731 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 4731 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4732 4732
4733 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; 4733 val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4734 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 4734 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4735 4735
4736 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); 4736 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
@@ -4750,13 +4750,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4750 4750
4751 /* Initialize the kernel complete queue context. */ 4751 /* Initialize the kernel complete queue context. */
4752 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4752 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4753 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 4753 (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4754 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); 4754 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4755 4755
4756 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; 4756 val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4757 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 4757 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4758 4758
4759 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; 4759 val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4760 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 4760 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4761 4761
4762 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); 4762 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
@@ -4895,10 +4895,10 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4895 u32 cli = cp->ethdev->iscsi_l2_client_id; 4895 u32 cli = cp->ethdev->iscsi_l2_client_id;
4896 u32 val; 4896 u32 val;
4897 4897
4898 memset(txbd, 0, BCM_PAGE_SIZE); 4898 memset(txbd, 0, BNX2_PAGE_SIZE);
4899 4899
4900 buf_map = udev->l2_buf_map; 4900 buf_map = udev->l2_buf_map;
4901 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { 4901 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4902 struct eth_tx_start_bd *start_bd = &txbd->start_bd; 4902 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4903 struct eth_tx_parse_bd_e1x *pbd_e1x = 4903 struct eth_tx_parse_bd_e1x *pbd_e1x =
4904 &((txbd + 1)->parse_bd_e1x); 4904 &((txbd + 1)->parse_bd_e1x);
@@ -4954,9 +4954,9 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4954 struct cnic_local *cp = dev->cnic_priv; 4954 struct cnic_local *cp = dev->cnic_priv;
4955 struct cnic_uio_dev *udev = cp->udev; 4955 struct cnic_uio_dev *udev = cp->udev;
4956 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + 4956 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4957 BCM_PAGE_SIZE); 4957 BNX2_PAGE_SIZE);
4958 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) 4958 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4959 (udev->l2_ring + (2 * BCM_PAGE_SIZE)); 4959 (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
4960 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4960 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4961 int i; 4961 int i;
4962 u32 cli = cp->ethdev->iscsi_l2_client_id; 4962 u32 cli = cp->ethdev->iscsi_l2_client_id;
@@ -4980,20 +4980,20 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4980 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 4980 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4981 } 4981 }
4982 4982
4983 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; 4983 val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
4984 rxbd->addr_hi = cpu_to_le32(val); 4984 rxbd->addr_hi = cpu_to_le32(val);
4985 data->rx.bd_page_base.hi = cpu_to_le32(val); 4985 data->rx.bd_page_base.hi = cpu_to_le32(val);
4986 4986
4987 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; 4987 val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
4988 rxbd->addr_lo = cpu_to_le32(val); 4988 rxbd->addr_lo = cpu_to_le32(val);
4989 data->rx.bd_page_base.lo = cpu_to_le32(val); 4989 data->rx.bd_page_base.lo = cpu_to_le32(val);
4990 4990
4991 rxcqe += BNX2X_MAX_RCQ_DESC_CNT; 4991 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
4992 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32; 4992 val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32;
4993 rxcqe->addr_hi = cpu_to_le32(val); 4993 rxcqe->addr_hi = cpu_to_le32(val);
4994 data->rx.cqe_page_base.hi = cpu_to_le32(val); 4994 data->rx.cqe_page_base.hi = cpu_to_le32(val);
4995 4995
4996 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff; 4996 val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff;
4997 rxcqe->addr_lo = cpu_to_le32(val); 4997 rxcqe->addr_lo = cpu_to_le32(val);
4998 data->rx.cqe_page_base.lo = cpu_to_le32(val); 4998 data->rx.cqe_page_base.lo = cpu_to_le32(val);
4999 4999
@@ -5258,8 +5258,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
5258 msleep(10); 5258 msleep(10);
5259 } 5259 }
5260 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 5260 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5261 rx_ring = udev->l2_ring + BCM_PAGE_SIZE; 5261 rx_ring = udev->l2_ring + BNX2_PAGE_SIZE;
5262 memset(rx_ring, 0, BCM_PAGE_SIZE); 5262 memset(rx_ring, 0, BNX2_PAGE_SIZE);
5263} 5263}
5264 5264
5265static int cnic_register_netdev(struct cnic_dev *dev) 5265static int cnic_register_netdev(struct cnic_dev *dev)
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 148604c3fa0c..6fa7a989606a 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -80,18 +80,18 @@
80#define CNIC_LOCAL_PORT_MAX 61024 80#define CNIC_LOCAL_PORT_MAX 61024
81#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN) 81#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
82 82
83#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe)) 83#define KWQE_CNT (BNX2_PAGE_SIZE / sizeof(struct kwqe))
84#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe)) 84#define KCQE_CNT (BNX2_PAGE_SIZE / sizeof(struct kcqe))
85#define MAX_KWQE_CNT (KWQE_CNT - 1) 85#define MAX_KWQE_CNT (KWQE_CNT - 1)
86#define MAX_KCQE_CNT (KCQE_CNT - 1) 86#define MAX_KCQE_CNT (KCQE_CNT - 1)
87 87
88#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1) 88#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1)
89#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1) 89#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1)
90 90
91#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5)) 91#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BNX2_PAGE_BITS - 5))
92#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT) 92#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
93 93
94#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5)) 94#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BNX2_PAGE_BITS - 5))
95#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT) 95#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
96 96
97#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \ 97#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \
@@ -422,9 +422,11 @@ struct bnx2x_bd_chain_next {
422 422
423#define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id) 423#define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id)
424 424
425#define BNX2X_RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) 425#define BNX2X_RX_DESC_CNT (BNX2_PAGE_SIZE / \
426 sizeof(struct eth_rx_bd))
426#define BNX2X_MAX_RX_DESC_CNT (BNX2X_RX_DESC_CNT - 2) 427#define BNX2X_MAX_RX_DESC_CNT (BNX2X_RX_DESC_CNT - 2)
427#define BNX2X_RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) 428#define BNX2X_RCQ_DESC_CNT (BNX2_PAGE_SIZE / \
429 sizeof(union eth_rx_cqe))
428#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1) 430#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1)
429 431
430#define BNX2X_NEXT_RCQE(x) (((x) & BNX2X_MAX_RCQ_DESC_CNT) == \ 432#define BNX2X_NEXT_RCQE(x) (((x) & BNX2X_MAX_RCQ_DESC_CNT) == \