aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-04-27 17:39:21 -0400
committerDavid S. Miller <davem@davemloft.net>2012-04-30 21:38:17 -0400
commit1191cb83489e6ee87a38ae5b127651f4a7c438dc (patch)
tree24c0bc14c3f8d2fbbb2d9110e948afdaa443e2cf
parentd344c4f3103c6faa1635b155d8f516e8a7e0aa4e (diff)
bnx2x: remove some bloat
Before doing skb->head_frag work on bnx2x driver, I found too much stuff was inlined in bnx2x/bnx2x_cmn.h for no good reason and made my work not very easy. Move some big functions out of this include file to the respective .c file. A lot of inline keywords are not needed at all in this huge driver. text data bss dec hex filename 490083 1270 56 491409 77f91 bnx2x/bnx2x.ko.before 484206 1270 56 485532 7689c bnx2x/bnx2x.ko Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Eilon Greenstein <eilong@broadcom.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Maciej Żenczykowski <maze@google.com> Cc: Neal Cardwell <ncardwell@google.com> Cc: Tom Herbert <therbert@google.com> Cc: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Cc: Ben Hutchings <bhutchings@solarflare.com> Cc: Matt Carlson <mcarlson@broadcom.com> Cc: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c285
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h362
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c325
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
5 files changed, 499 insertions, 499 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index be0e90382d9e..ad0743bf4bde 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -358,8 +358,8 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
358 * Approximate value of the MSS for this aggregation calculated using 358 * Approximate value of the MSS for this aggregation calculated using
359 * the first packet of it. 359 * the first packet of it.
360 */ 360 */
361static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, 361static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
362 u16 len_on_bd) 362 u16 len_on_bd)
363{ 363{
364 /* 364 /*
365 * TPA arrgregation won't have either IP options or TCP options 365 * TPA arrgregation won't have either IP options or TCP options
@@ -385,6 +385,36 @@ static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
385 return len_on_bd - hdrs_len; 385 return len_on_bd - hdrs_len;
386} 386}
387 387
388static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
389 struct bnx2x_fastpath *fp, u16 index)
390{
391 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
392 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
393 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
394 dma_addr_t mapping;
395
396 if (unlikely(page == NULL)) {
397 BNX2X_ERR("Can't alloc sge\n");
398 return -ENOMEM;
399 }
400
401 mapping = dma_map_page(&bp->pdev->dev, page, 0,
402 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
403 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
404 __free_pages(page, PAGES_PER_SGE_SHIFT);
405 BNX2X_ERR("Can't map sge\n");
406 return -ENOMEM;
407 }
408
409 sw_buf->page = page;
410 dma_unmap_addr_set(sw_buf, mapping, mapping);
411
412 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
413 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
414
415 return 0;
416}
417
388static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, 418static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
389 struct bnx2x_agg_info *tpa_info, 419 struct bnx2x_agg_info *tpa_info,
390 u16 pages, 420 u16 pages,
@@ -483,11 +513,11 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
483 return 0; 513 return 0;
484} 514}
485 515
486static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, 516static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
487 struct bnx2x_agg_info *tpa_info, 517 struct bnx2x_agg_info *tpa_info,
488 u16 pages, 518 u16 pages,
489 struct eth_end_agg_rx_cqe *cqe, 519 struct eth_end_agg_rx_cqe *cqe,
490 u16 cqe_idx) 520 u16 cqe_idx)
491{ 521{
492 struct sw_rx_bd *rx_buf = &tpa_info->first_buf; 522 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
493 u8 pad = tpa_info->placement_offset; 523 u8 pad = tpa_info->placement_offset;
@@ -557,6 +587,36 @@ drop:
557 fp->eth_q_stats.rx_skb_alloc_failed++; 587 fp->eth_q_stats.rx_skb_alloc_failed++;
558} 588}
559 589
590static int bnx2x_alloc_rx_data(struct bnx2x *bp,
591 struct bnx2x_fastpath *fp, u16 index)
592{
593 u8 *data;
594 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
595 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
596 dma_addr_t mapping;
597
598 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
599 if (unlikely(data == NULL))
600 return -ENOMEM;
601
602 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
603 fp->rx_buf_size,
604 DMA_FROM_DEVICE);
605 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
606 kfree(data);
607 BNX2X_ERR("Can't map rx data\n");
608 return -ENOMEM;
609 }
610
611 rx_buf->data = data;
612 dma_unmap_addr_set(rx_buf, mapping, mapping);
613
614 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
615 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
616
617 return 0;
618}
619
560 620
561int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) 621int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
562{ 622{
@@ -870,8 +930,8 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
870 * 930 *
871 * It uses a none-atomic bit operations because is called under the mutex. 931 * It uses a none-atomic bit operations because is called under the mutex.
872 */ 932 */
873static inline void bnx2x_fill_report_data(struct bnx2x *bp, 933static void bnx2x_fill_report_data(struct bnx2x *bp,
874 struct bnx2x_link_report_data *data) 934 struct bnx2x_link_report_data *data)
875{ 935{
876 u16 line_speed = bnx2x_get_mf_speed(bp); 936 u16 line_speed = bnx2x_get_mf_speed(bp);
877 937
@@ -989,6 +1049,47 @@ void __bnx2x_link_report(struct bnx2x *bp)
989 } 1049 }
990} 1050}
991 1051
1052static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1053{
1054 int i;
1055
1056 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1057 struct eth_rx_sge *sge;
1058
1059 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1060 sge->addr_hi =
1061 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1062 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1063
1064 sge->addr_lo =
1065 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1066 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1067 }
1068}
1069
1070static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1071 struct bnx2x_fastpath *fp, int last)
1072{
1073 int i;
1074
1075 for (i = 0; i < last; i++) {
1076 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1077 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1078 u8 *data = first_buf->data;
1079
1080 if (data == NULL) {
1081 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1082 continue;
1083 }
1084 if (tpa_info->tpa_state == BNX2X_TPA_START)
1085 dma_unmap_single(&bp->pdev->dev,
1086 dma_unmap_addr(first_buf, mapping),
1087 fp->rx_buf_size, DMA_FROM_DEVICE);
1088 kfree(data);
1089 first_buf->data = NULL;
1090 }
1091}
1092
992void bnx2x_init_rx_rings(struct bnx2x *bp) 1093void bnx2x_init_rx_rings(struct bnx2x *bp)
993{ 1094{
994 int func = BP_FUNC(bp); 1095 int func = BP_FUNC(bp);
@@ -1362,7 +1463,7 @@ static int bnx2x_req_irq(struct bnx2x *bp)
1362 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); 1463 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1363} 1464}
1364 1465
1365static inline int bnx2x_setup_irqs(struct bnx2x *bp) 1466static int bnx2x_setup_irqs(struct bnx2x *bp)
1366{ 1467{
1367 int rc = 0; 1468 int rc = 0;
1368 if (bp->flags & USING_MSIX_FLAG && 1469 if (bp->flags & USING_MSIX_FLAG &&
@@ -1392,7 +1493,7 @@ static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1392 return 0; 1493 return 0;
1393} 1494}
1394 1495
1395static inline void bnx2x_napi_enable(struct bnx2x *bp) 1496static void bnx2x_napi_enable(struct bnx2x *bp)
1396{ 1497{
1397 int i; 1498 int i;
1398 1499
@@ -1400,7 +1501,7 @@ static inline void bnx2x_napi_enable(struct bnx2x *bp)
1400 napi_enable(&bnx2x_fp(bp, i, napi)); 1501 napi_enable(&bnx2x_fp(bp, i, napi));
1401} 1502}
1402 1503
1403static inline void bnx2x_napi_disable(struct bnx2x *bp) 1504static void bnx2x_napi_disable(struct bnx2x *bp)
1404{ 1505{
1405 int i; 1506 int i;
1406 1507
@@ -1487,7 +1588,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1487 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() 1588 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1488 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). 1589 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1489 */ 1590 */
1490static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) 1591static int bnx2x_set_real_num_queues(struct bnx2x *bp)
1491{ 1592{
1492 int rc, tx, rx; 1593 int rc, tx, rx;
1493 1594
@@ -1519,7 +1620,7 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1519 return rc; 1620 return rc;
1520} 1621}
1521 1622
1522static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) 1623static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1523{ 1624{
1524 int i; 1625 int i;
1525 1626
@@ -1547,7 +1648,7 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1547 } 1648 }
1548} 1649}
1549 1650
1550static inline int bnx2x_init_rss_pf(struct bnx2x *bp) 1651static int bnx2x_init_rss_pf(struct bnx2x *bp)
1551{ 1652{
1552 int i; 1653 int i;
1553 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; 1654 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
@@ -1614,7 +1715,7 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1614 return bnx2x_config_rss(bp, &params); 1715 return bnx2x_config_rss(bp, &params);
1615} 1716}
1616 1717
1617static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) 1718static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1618{ 1719{
1619 struct bnx2x_func_state_params func_params = {NULL}; 1720 struct bnx2x_func_state_params func_params = {NULL};
1620 1721
@@ -1723,6 +1824,87 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1723 return true; 1824 return true;
1724} 1825}
1725 1826
1827/**
1828 * bnx2x_bz_fp - zero content of the fastpath structure.
1829 *
1830 * @bp: driver handle
1831 * @index: fastpath index to be zeroed
1832 *
1833 * Makes sure the contents of the bp->fp[index].napi is kept
1834 * intact.
1835 */
1836static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1837{
1838 struct bnx2x_fastpath *fp = &bp->fp[index];
1839 struct napi_struct orig_napi = fp->napi;
1840 /* bzero bnx2x_fastpath contents */
1841 if (bp->stats_init)
1842 memset(fp, 0, sizeof(*fp));
1843 else {
1844 /* Keep Queue statistics */
1845 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1846 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1847
1848 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1849 GFP_KERNEL);
1850 if (tmp_eth_q_stats)
1851 memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
1852 sizeof(struct bnx2x_eth_q_stats));
1853
1854 tmp_eth_q_stats_old =
1855 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1856 GFP_KERNEL);
1857 if (tmp_eth_q_stats_old)
1858 memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
1859 sizeof(struct bnx2x_eth_q_stats_old));
1860
1861 memset(fp, 0, sizeof(*fp));
1862
1863 if (tmp_eth_q_stats) {
1864 memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
1865 sizeof(struct bnx2x_eth_q_stats));
1866 kfree(tmp_eth_q_stats);
1867 }
1868
1869 if (tmp_eth_q_stats_old) {
1870 memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
1871 sizeof(struct bnx2x_eth_q_stats_old));
1872 kfree(tmp_eth_q_stats_old);
1873 }
1874
1875 }
1876
1877 /* Restore the NAPI object as it has been already initialized */
1878 fp->napi = orig_napi;
1879
1880 fp->bp = bp;
1881 fp->index = index;
1882 if (IS_ETH_FP(fp))
1883 fp->max_cos = bp->max_cos;
1884 else
1885 /* Special queues support only one CoS */
1886 fp->max_cos = 1;
1887
1888 /*
1889 * set the tpa flag for each queue. The tpa flag determines the queue
1890 * minimal size so it must be set prior to queue memory allocation
1891 */
1892 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1893 (bp->flags & GRO_ENABLE_FLAG &&
1894 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1895 if (bp->flags & TPA_ENABLE_FLAG)
1896 fp->mode = TPA_MODE_LRO;
1897 else if (bp->flags & GRO_ENABLE_FLAG)
1898 fp->mode = TPA_MODE_GRO;
1899
1900#ifdef BCM_CNIC
1901 /* We don't want TPA on an FCoE L2 ring */
1902 if (IS_FCOE_FP(fp))
1903 fp->disable_tpa = 1;
1904#endif
1905}
1906
1907
1726/* must be called with rtnl_lock */ 1908/* must be called with rtnl_lock */
1727int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 1909int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1728{ 1910{
@@ -3169,7 +3351,7 @@ void bnx2x_free_fp_mem(struct bnx2x *bp)
3169 bnx2x_free_fp_mem_at(bp, i); 3351 bnx2x_free_fp_mem_at(bp, i);
3170} 3352}
3171 3353
3172static inline void set_sb_shortcuts(struct bnx2x *bp, int index) 3354static void set_sb_shortcuts(struct bnx2x *bp, int index)
3173{ 3355{
3174 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); 3356 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3175 if (!CHIP_IS_E1x(bp)) { 3357 if (!CHIP_IS_E1x(bp)) {
@@ -3185,6 +3367,63 @@ static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3185 } 3367 }
3186} 3368}
3187 3369
3370/* Returns the number of actually allocated BDs */
3371static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3372 int rx_ring_size)
3373{
3374 struct bnx2x *bp = fp->bp;
3375 u16 ring_prod, cqe_ring_prod;
3376 int i, failure_cnt = 0;
3377
3378 fp->rx_comp_cons = 0;
3379 cqe_ring_prod = ring_prod = 0;
3380
3381 /* This routine is called only during fo init so
3382 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3383 */
3384 for (i = 0; i < rx_ring_size; i++) {
3385 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3386 failure_cnt++;
3387 continue;
3388 }
3389 ring_prod = NEXT_RX_IDX(ring_prod);
3390 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3391 WARN_ON(ring_prod <= (i - failure_cnt));
3392 }
3393
3394 if (failure_cnt)
3395 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3396 i - failure_cnt, fp->index);
3397
3398 fp->rx_bd_prod = ring_prod;
3399 /* Limit the CQE producer by the CQE ring size */
3400 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3401 cqe_ring_prod);
3402 fp->rx_pkt = fp->rx_calls = 0;
3403
3404 fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3405
3406 return i - failure_cnt;
3407}
3408
3409static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3410{
3411 int i;
3412
3413 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3414 struct eth_rx_cqe_next_page *nextpg;
3415
3416 nextpg = (struct eth_rx_cqe_next_page *)
3417 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3418 nextpg->addr_hi =
3419 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3420 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3421 nextpg->addr_lo =
3422 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3423 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3424 }
3425}
3426
3188static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) 3427static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3189{ 3428{
3190 union host_hc_status_block *sb; 3429 union host_hc_status_block *sb;
@@ -3674,9 +3913,9 @@ void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3674 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); 3913 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3675} 3914}
3676 3915
3677static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, 3916static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3678 u8 fw_sb_id, u8 sb_index, 3917 u8 fw_sb_id, u8 sb_index,
3679 u8 ticks) 3918 u8 ticks)
3680{ 3919{
3681 3920
3682 u32 addr = BAR_CSTRORM_INTMEM + 3921 u32 addr = BAR_CSTRORM_INTMEM +
@@ -3687,9 +3926,9 @@ static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3687 port, fw_sb_id, sb_index, ticks); 3926 port, fw_sb_id, sb_index, ticks);
3688} 3927}
3689 3928
3690static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, 3929static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3691 u16 fw_sb_id, u8 sb_index, 3930 u16 fw_sb_id, u8 sb_index,
3692 u8 disable) 3931 u8 disable)
3693{ 3932{
3694 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 3933 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3695 u32 addr = BAR_CSTRORM_INTMEM + 3934 u32 addr = BAR_CSTRORM_INTMEM +
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index cec993bc2f47..7cd99b75347a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -612,53 +612,6 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
612 barrier(); 612 barrier();
613} 613}
614 614
615static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
616 u8 idu_sb_id, bool is_Pf)
617{
618 u32 data, ctl, cnt = 100;
619 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
620 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
621 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
622 u32 sb_bit = 1 << (idu_sb_id%32);
623 u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
624 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
625
626 /* Not supported in BC mode */
627 if (CHIP_INT_MODE_IS_BC(bp))
628 return;
629
630 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
631 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
632 IGU_REGULAR_CLEANUP_SET |
633 IGU_REGULAR_BCLEANUP;
634
635 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
636 func_encode << IGU_CTRL_REG_FID_SHIFT |
637 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
638
639 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
640 data, igu_addr_data);
641 REG_WR(bp, igu_addr_data, data);
642 mmiowb();
643 barrier();
644 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
645 ctl, igu_addr_ctl);
646 REG_WR(bp, igu_addr_ctl, ctl);
647 mmiowb();
648 barrier();
649
650 /* wait for clean up to finish */
651 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
652 msleep(20);
653
654
655 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
656 DP(NETIF_MSG_HW,
657 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
658 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
659 }
660}
661
662static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, 615static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
663 u8 storm, u16 index, u8 op, u8 update) 616 u8 storm, u16 index, u8 op, u8 update)
664{ 617{
@@ -885,66 +838,6 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
885 bnx2x_clear_sge_mask_next_elems(fp); 838 bnx2x_clear_sge_mask_next_elems(fp);
886} 839}
887 840
888static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
889 struct bnx2x_fastpath *fp, u16 index)
890{
891 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
892 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
893 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
894 dma_addr_t mapping;
895
896 if (unlikely(page == NULL)) {
897 BNX2X_ERR("Can't alloc sge\n");
898 return -ENOMEM;
899 }
900
901 mapping = dma_map_page(&bp->pdev->dev, page, 0,
902 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
903 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
904 __free_pages(page, PAGES_PER_SGE_SHIFT);
905 BNX2X_ERR("Can't map sge\n");
906 return -ENOMEM;
907 }
908
909 sw_buf->page = page;
910 dma_unmap_addr_set(sw_buf, mapping, mapping);
911
912 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
913 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
914
915 return 0;
916}
917
918static inline int bnx2x_alloc_rx_data(struct bnx2x *bp,
919 struct bnx2x_fastpath *fp, u16 index)
920{
921 u8 *data;
922 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
923 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
924 dma_addr_t mapping;
925
926 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
927 if (unlikely(data == NULL))
928 return -ENOMEM;
929
930 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
931 fp->rx_buf_size,
932 DMA_FROM_DEVICE);
933 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
934 kfree(data);
935 BNX2X_ERR("Can't map rx data\n");
936 return -ENOMEM;
937 }
938
939 rx_buf->data = data;
940 dma_unmap_addr_set(rx_buf, mapping, mapping);
941
942 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
943 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
944
945 return 0;
946}
947
948/* note that we are not allocating a new buffer, 841/* note that we are not allocating a new buffer,
949 * we are just moving one from cons to prod 842 * we are just moving one from cons to prod
950 * we are not creating a new mapping, 843 * we are not creating a new mapping,
@@ -1042,66 +935,6 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1042 bnx2x_free_rx_sge(bp, fp, i); 935 bnx2x_free_rx_sge(bp, fp, i);
1043} 936}
1044 937
1045static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
1046 struct bnx2x_fastpath *fp, int last)
1047{
1048 int i;
1049
1050 for (i = 0; i < last; i++) {
1051 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1052 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1053 u8 *data = first_buf->data;
1054
1055 if (data == NULL) {
1056 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1057 continue;
1058 }
1059 if (tpa_info->tpa_state == BNX2X_TPA_START)
1060 dma_unmap_single(&bp->pdev->dev,
1061 dma_unmap_addr(first_buf, mapping),
1062 fp->rx_buf_size, DMA_FROM_DEVICE);
1063 kfree(data);
1064 first_buf->data = NULL;
1065 }
1066}
1067
1068static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
1069{
1070 int i;
1071
1072 for (i = 1; i <= NUM_TX_RINGS; i++) {
1073 struct eth_tx_next_bd *tx_next_bd =
1074 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
1075
1076 tx_next_bd->addr_hi =
1077 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
1078 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
1079 tx_next_bd->addr_lo =
1080 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
1081 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
1082 }
1083
1084 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
1085 txdata->tx_db.data.zero_fill1 = 0;
1086 txdata->tx_db.data.prod = 0;
1087
1088 txdata->tx_pkt_prod = 0;
1089 txdata->tx_pkt_cons = 0;
1090 txdata->tx_bd_prod = 0;
1091 txdata->tx_bd_cons = 0;
1092 txdata->tx_pkt = 0;
1093}
1094
1095static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
1096{
1097 int i;
1098 u8 cos;
1099
1100 for_each_tx_queue(bp, i)
1101 for_each_cos_in_tx_queue(&bp->fp[i], cos)
1102 bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
1103}
1104
1105static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) 938static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
1106{ 939{
1107 int i; 940 int i;
@@ -1119,80 +952,6 @@ static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
1119 } 952 }
1120} 953}
1121 954
1122static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1123{
1124 int i;
1125
1126 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1127 struct eth_rx_sge *sge;
1128
1129 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1130 sge->addr_hi =
1131 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1132 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1133
1134 sge->addr_lo =
1135 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1136 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1137 }
1138}
1139
1140static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
1141{
1142 int i;
1143 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
1144 struct eth_rx_cqe_next_page *nextpg;
1145
1146 nextpg = (struct eth_rx_cqe_next_page *)
1147 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
1148 nextpg->addr_hi =
1149 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
1150 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
1151 nextpg->addr_lo =
1152 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
1153 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
1154 }
1155}
1156
1157/* Returns the number of actually allocated BDs */
1158static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
1159 int rx_ring_size)
1160{
1161 struct bnx2x *bp = fp->bp;
1162 u16 ring_prod, cqe_ring_prod;
1163 int i, failure_cnt = 0;
1164
1165 fp->rx_comp_cons = 0;
1166 cqe_ring_prod = ring_prod = 0;
1167
1168 /* This routine is called only during fo init so
1169 * fp->eth_q_stats.rx_skb_alloc_failed = 0
1170 */
1171 for (i = 0; i < rx_ring_size; i++) {
1172 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
1173 failure_cnt++;
1174 continue;
1175 }
1176 ring_prod = NEXT_RX_IDX(ring_prod);
1177 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
1178 WARN_ON(ring_prod <= (i - failure_cnt));
1179 }
1180
1181 if (failure_cnt)
1182 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
1183 i - failure_cnt, fp->index);
1184
1185 fp->rx_bd_prod = ring_prod;
1186 /* Limit the CQE producer by the CQE ring size */
1187 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
1188 cqe_ring_prod);
1189 fp->rx_pkt = fp->rx_calls = 0;
1190
1191 fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1192
1193 return i - failure_cnt;
1194}
1195
1196/* Statistics ID are global per chip/path, while Client IDs for E1x are per 955/* Statistics ID are global per chip/path, while Client IDs for E1x are per
1197 * port. 956 * port.
1198 */ 957 */
@@ -1421,47 +1180,6 @@ static inline void __storm_memset_struct(struct bnx2x *bp,
1421 REG_WR(bp, addr + (i * 4), data[i]); 1180 REG_WR(bp, addr + (i * 4), data[i]);
1422} 1181}
1423 1182
1424static inline void storm_memset_func_cfg(struct bnx2x *bp,
1425 struct tstorm_eth_function_common_config *tcfg,
1426 u16 abs_fid)
1427{
1428 size_t size = sizeof(struct tstorm_eth_function_common_config);
1429
1430 u32 addr = BAR_TSTRORM_INTMEM +
1431 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
1432
1433 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
1434}
1435
1436static inline void storm_memset_cmng(struct bnx2x *bp,
1437 struct cmng_init *cmng,
1438 u8 port)
1439{
1440 int vn;
1441 size_t size = sizeof(struct cmng_struct_per_port);
1442
1443 u32 addr = BAR_XSTRORM_INTMEM +
1444 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
1445
1446 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
1447
1448 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
1449 int func = func_by_vn(bp, vn);
1450
1451 addr = BAR_XSTRORM_INTMEM +
1452 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
1453 size = sizeof(struct rate_shaping_vars_per_vn);
1454 __storm_memset_struct(bp, addr, size,
1455 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
1456
1457 addr = BAR_XSTRORM_INTMEM +
1458 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
1459 size = sizeof(struct fairness_vars_per_vn);
1460 __storm_memset_struct(bp, addr, size,
1461 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
1462 }
1463}
1464
1465/** 1183/**
1466 * bnx2x_wait_sp_comp - wait for the outstanding SP commands. 1184 * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
1467 * 1185 *
@@ -1544,86 +1262,6 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
1544 */ 1262 */
1545 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; 1263 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
1546} 1264}
1547/**
1548 * bnx2x_bz_fp - zero content of the fastpath structure.
1549 *
1550 * @bp: driver handle
1551 * @index: fastpath index to be zeroed
1552 *
1553 * Makes sure the contents of the bp->fp[index].napi is kept
1554 * intact.
1555 */
1556static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
1557{
1558 struct bnx2x_fastpath *fp = &bp->fp[index];
1559 struct napi_struct orig_napi = fp->napi;
1560 /* bzero bnx2x_fastpath contents */
1561 if (bp->stats_init)
1562 memset(fp, 0, sizeof(*fp));
1563 else {
1564 /* Keep Queue statistics */
1565 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1566 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1567
1568 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1569 GFP_KERNEL);
1570 if (tmp_eth_q_stats)
1571 memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
1572 sizeof(struct bnx2x_eth_q_stats));
1573
1574 tmp_eth_q_stats_old =
1575 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1576 GFP_KERNEL);
1577 if (tmp_eth_q_stats_old)
1578 memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
1579 sizeof(struct bnx2x_eth_q_stats_old));
1580
1581 memset(fp, 0, sizeof(*fp));
1582
1583 if (tmp_eth_q_stats) {
1584 memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
1585 sizeof(struct bnx2x_eth_q_stats));
1586 kfree(tmp_eth_q_stats);
1587 }
1588
1589 if (tmp_eth_q_stats_old) {
1590 memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
1591 sizeof(struct bnx2x_eth_q_stats_old));
1592 kfree(tmp_eth_q_stats_old);
1593 }
1594
1595 }
1596
1597 /* Restore the NAPI object as it has been already initialized */
1598 fp->napi = orig_napi;
1599
1600 fp->bp = bp;
1601 fp->index = index;
1602 if (IS_ETH_FP(fp))
1603 fp->max_cos = bp->max_cos;
1604 else
1605 /* Special queues support only one CoS */
1606 fp->max_cos = 1;
1607
1608 /*
1609 * set the tpa flag for each queue. The tpa flag determines the queue
1610 * minimal size so it must be set prior to queue memory allocation
1611 */
1612 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1613 (bp->flags & GRO_ENABLE_FLAG &&
1614 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1615 if (bp->flags & TPA_ENABLE_FLAG)
1616 fp->mode = TPA_MODE_LRO;
1617 else if (bp->flags & GRO_ENABLE_FLAG)
1618 fp->mode = TPA_MODE_GRO;
1619
1620#ifdef BCM_CNIC
1621 /* We don't want TPA on an FCoE L2 ring */
1622 if (IS_FCOE_FP(fp))
1623 fp->disable_tpa = 1;
1624#endif
1625}
1626
1627#ifdef BCM_CNIC 1265#ifdef BCM_CNIC
1628/** 1266/**
1629 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. 1267 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index faf8abd0b7eb..ddc18ee5c5ae 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -592,8 +592,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
592#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE) 592#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE)
593#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE) 593#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE)
594 594
595static inline bool bnx2x_is_reg_online(struct bnx2x *bp, 595static bool bnx2x_is_reg_online(struct bnx2x *bp,
596 const struct reg_addr *reg_info) 596 const struct reg_addr *reg_info)
597{ 597{
598 if (CHIP_IS_E1(bp)) 598 if (CHIP_IS_E1(bp))
599 return IS_E1_ONLINE(reg_info->info); 599 return IS_E1_ONLINE(reg_info->info);
@@ -610,7 +610,7 @@ static inline bool bnx2x_is_reg_online(struct bnx2x *bp,
610} 610}
611 611
612/******* Paged registers info selectors ********/ 612/******* Paged registers info selectors ********/
613static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp) 613static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
614{ 614{
615 if (CHIP_IS_E2(bp)) 615 if (CHIP_IS_E2(bp))
616 return page_vals_e2; 616 return page_vals_e2;
@@ -620,7 +620,7 @@ static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
620 return NULL; 620 return NULL;
621} 621}
622 622
623static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp) 623static u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
624{ 624{
625 if (CHIP_IS_E2(bp)) 625 if (CHIP_IS_E2(bp))
626 return PAGE_MODE_VALUES_E2; 626 return PAGE_MODE_VALUES_E2;
@@ -630,7 +630,7 @@ static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
630 return 0; 630 return 0;
631} 631}
632 632
633static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp) 633static const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
634{ 634{
635 if (CHIP_IS_E2(bp)) 635 if (CHIP_IS_E2(bp))
636 return page_write_regs_e2; 636 return page_write_regs_e2;
@@ -640,7 +640,7 @@ static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
640 return NULL; 640 return NULL;
641} 641}
642 642
643static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp) 643static u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
644{ 644{
645 if (CHIP_IS_E2(bp)) 645 if (CHIP_IS_E2(bp))
646 return PAGE_WRITE_REGS_E2; 646 return PAGE_WRITE_REGS_E2;
@@ -650,7 +650,7 @@ static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
650 return 0; 650 return 0;
651} 651}
652 652
653static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp) 653static const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
654{ 654{
655 if (CHIP_IS_E2(bp)) 655 if (CHIP_IS_E2(bp))
656 return page_read_regs_e2; 656 return page_read_regs_e2;
@@ -660,7 +660,7 @@ static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
660 return NULL; 660 return NULL;
661} 661}
662 662
663static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp) 663static u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
664{ 664{
665 if (CHIP_IS_E2(bp)) 665 if (CHIP_IS_E2(bp))
666 return PAGE_READ_REGS_E2; 666 return PAGE_READ_REGS_E2;
@@ -670,7 +670,7 @@ static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
670 return 0; 670 return 0;
671} 671}
672 672
673static inline int __bnx2x_get_regs_len(struct bnx2x *bp) 673static int __bnx2x_get_regs_len(struct bnx2x *bp)
674{ 674{
675 int num_pages = __bnx2x_get_page_reg_num(bp); 675 int num_pages = __bnx2x_get_page_reg_num(bp);
676 int page_write_num = __bnx2x_get_page_write_num(bp); 676 int page_write_num = __bnx2x_get_page_write_num(bp);
@@ -715,7 +715,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
715 * ("read address"). There may be more than one write address per "page" and 715 * ("read address"). There may be more than one write address per "page" and
716 * more than one read address per write address. 716 * more than one read address per write address.
717 */ 717 */
718static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p) 718static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
719{ 719{
720 u32 i, j, k, n; 720 u32 i, j, k, n;
721 /* addresses of the paged registers */ 721 /* addresses of the paged registers */
@@ -744,7 +744,7 @@ static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
744 } 744 }
745} 745}
746 746
747static inline void __bnx2x_get_regs(struct bnx2x *bp, u32 *p) 747static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
748{ 748{
749 u32 i, j; 749 u32 i, j;
750 750
@@ -2209,7 +2209,7 @@ static void bnx2x_self_test(struct net_device *dev,
2209/* ethtool statistics are displayed for all regular ethernet queues and the 2209/* ethtool statistics are displayed for all regular ethernet queues and the
2210 * fcoe L2 queue if not disabled 2210 * fcoe L2 queue if not disabled
2211 */ 2211 */
2212static inline int bnx2x_num_stat_queues(struct bnx2x *bp) 2212static int bnx2x_num_stat_queues(struct bnx2x *bp)
2213{ 2213{
2214 return BNX2X_NUM_ETH_QUEUES(bp); 2214 return BNX2X_NUM_ETH_QUEUES(bp);
2215} 2215}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 0708cb803335..35b82e00d052 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -226,15 +226,15 @@ static LIST_HEAD(bnx2x_prev_list);
226* General service functions 226* General service functions
227****************************************************************************/ 227****************************************************************************/
228 228
229static inline void __storm_memset_dma_mapping(struct bnx2x *bp, 229static void __storm_memset_dma_mapping(struct bnx2x *bp,
230 u32 addr, dma_addr_t mapping) 230 u32 addr, dma_addr_t mapping)
231{ 231{
232 REG_WR(bp, addr, U64_LO(mapping)); 232 REG_WR(bp, addr, U64_LO(mapping));
233 REG_WR(bp, addr + 4, U64_HI(mapping)); 233 REG_WR(bp, addr + 4, U64_HI(mapping));
234} 234}
235 235
236static inline void storm_memset_spq_addr(struct bnx2x *bp, 236static void storm_memset_spq_addr(struct bnx2x *bp,
237 dma_addr_t mapping, u16 abs_fid) 237 dma_addr_t mapping, u16 abs_fid)
238{ 238{
239 u32 addr = XSEM_REG_FAST_MEMORY + 239 u32 addr = XSEM_REG_FAST_MEMORY +
240 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); 240 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
@@ -242,8 +242,8 @@ static inline void storm_memset_spq_addr(struct bnx2x *bp,
242 __storm_memset_dma_mapping(bp, addr, mapping); 242 __storm_memset_dma_mapping(bp, addr, mapping);
243} 243}
244 244
245static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 245static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
246 u16 pf_id) 246 u16 pf_id)
247{ 247{
248 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 248 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
249 pf_id); 249 pf_id);
@@ -255,8 +255,8 @@ static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
255 pf_id); 255 pf_id);
256} 256}
257 257
258static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 258static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
259 u8 enable) 259 u8 enable)
260{ 260{
261 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 261 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
262 enable); 262 enable);
@@ -268,8 +268,8 @@ static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
268 enable); 268 enable);
269} 269}
270 270
271static inline void storm_memset_eq_data(struct bnx2x *bp, 271static void storm_memset_eq_data(struct bnx2x *bp,
272 struct event_ring_data *eq_data, 272 struct event_ring_data *eq_data,
273 u16 pfid) 273 u16 pfid)
274{ 274{
275 size_t size = sizeof(struct event_ring_data); 275 size_t size = sizeof(struct event_ring_data);
@@ -279,8 +279,8 @@ static inline void storm_memset_eq_data(struct bnx2x *bp,
279 __storm_memset_struct(bp, addr, size, (u32 *)eq_data); 279 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
280} 280}
281 281
282static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, 282static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
283 u16 pfid) 283 u16 pfid)
284{ 284{
285 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); 285 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
286 REG_WR16(bp, addr, eq_prod); 286 REG_WR16(bp, addr, eq_prod);
@@ -676,7 +676,7 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
676 printk("%s" "end of fw dump\n", lvl); 676 printk("%s" "end of fw dump\n", lvl);
677} 677}
678 678
679static inline void bnx2x_fw_dump(struct bnx2x *bp) 679static void bnx2x_fw_dump(struct bnx2x *bp)
680{ 680{
681 bnx2x_fw_dump_lvl(bp, KERN_ERR); 681 bnx2x_fw_dump_lvl(bp, KERN_ERR);
682} 682}
@@ -996,8 +996,8 @@ static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
996 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 996 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
997} 997}
998 998
999static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, 999static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1000 u32 expected, u32 poll_count) 1000 u32 expected, u32 poll_count)
1001{ 1001{
1002 u32 cur_cnt = poll_count; 1002 u32 cur_cnt = poll_count;
1003 u32 val; 1003 u32 val;
@@ -1008,8 +1008,8 @@ static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1008 return val; 1008 return val;
1009} 1009}
1010 1010
1011static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, 1011static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1012 char *msg, u32 poll_cnt) 1012 char *msg, u32 poll_cnt)
1013{ 1013{
1014 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); 1014 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1015 if (val != 0) { 1015 if (val != 0) {
@@ -1106,7 +1106,7 @@ static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1106 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 1106 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1107 1107
1108 1108
1109static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, 1109static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
1110 u32 poll_cnt) 1110 u32 poll_cnt)
1111{ 1111{
1112 struct sdm_op_gen op_gen = {0}; 1112 struct sdm_op_gen op_gen = {0};
@@ -1140,7 +1140,7 @@ static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
1140 return ret; 1140 return ret;
1141} 1141}
1142 1142
1143static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev) 1143static u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1144{ 1144{
1145 int pos; 1145 int pos;
1146 u16 status; 1146 u16 status;
@@ -1550,7 +1550,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1550 * Returns the recovery leader resource id according to the engine this function 1550 * Returns the recovery leader resource id according to the engine this function
1551 * belongs to. Currently only only 2 engines is supported. 1551 * belongs to. Currently only only 2 engines is supported.
1552 */ 1552 */
1553static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp) 1553static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1554{ 1554{
1555 if (BP_PATH(bp)) 1555 if (BP_PATH(bp))
1556 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; 1556 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
@@ -1563,9 +1563,9 @@ static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1563 * 1563 *
1564 * @bp: driver handle 1564 * @bp: driver handle
1565 * 1565 *
1566 * Tries to aquire a leader lock for cuurent engine. 1566 * Tries to aquire a leader lock for current engine.
1567 */ 1567 */
1568static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp) 1568static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1569{ 1569{
1570 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1570 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1571} 1571}
@@ -2331,6 +2331,35 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2331 "rate shaping and fairness are disabled\n"); 2331 "rate shaping and fairness are disabled\n");
2332} 2332}
2333 2333
2334static void storm_memset_cmng(struct bnx2x *bp,
2335 struct cmng_init *cmng,
2336 u8 port)
2337{
2338 int vn;
2339 size_t size = sizeof(struct cmng_struct_per_port);
2340
2341 u32 addr = BAR_XSTRORM_INTMEM +
2342 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2343
2344 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2345
2346 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2347 int func = func_by_vn(bp, vn);
2348
2349 addr = BAR_XSTRORM_INTMEM +
2350 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2351 size = sizeof(struct rate_shaping_vars_per_vn);
2352 __storm_memset_struct(bp, addr, size,
2353 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2354
2355 addr = BAR_XSTRORM_INTMEM +
2356 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2357 size = sizeof(struct fairness_vars_per_vn);
2358 __storm_memset_struct(bp, addr, size,
2359 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2360 }
2361}
2362
2334/* This function is called upon link interrupt */ 2363/* This function is called upon link interrupt */
2335static void bnx2x_link_attn(struct bnx2x *bp) 2364static void bnx2x_link_attn(struct bnx2x *bp)
2336{ 2365{
@@ -2671,6 +2700,18 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2671} 2700}
2672 2701
2673 2702
2703static void storm_memset_func_cfg(struct bnx2x *bp,
2704 struct tstorm_eth_function_common_config *tcfg,
2705 u16 abs_fid)
2706{
2707 size_t size = sizeof(struct tstorm_eth_function_common_config);
2708
2709 u32 addr = BAR_TSTRORM_INTMEM +
2710 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
2711
2712 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
2713}
2714
2674void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) 2715void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2675{ 2716{
2676 if (CHIP_IS_E1x(bp)) { 2717 if (CHIP_IS_E1x(bp)) {
@@ -2700,9 +2741,9 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2700 * 2741 *
2701 * Return the flags that are common for the Tx-only and not normal connections. 2742 * Return the flags that are common for the Tx-only and not normal connections.
2702 */ 2743 */
2703static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp, 2744static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2704 struct bnx2x_fastpath *fp, 2745 struct bnx2x_fastpath *fp,
2705 bool zero_stats) 2746 bool zero_stats)
2706{ 2747{
2707 unsigned long flags = 0; 2748 unsigned long flags = 0;
2708 2749
@@ -2722,9 +2763,9 @@ static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2722 return flags; 2763 return flags;
2723} 2764}
2724 2765
2725static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp, 2766static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2726 struct bnx2x_fastpath *fp, 2767 struct bnx2x_fastpath *fp,
2727 bool leading) 2768 bool leading)
2728{ 2769{
2729 unsigned long flags = 0; 2770 unsigned long flags = 0;
2730 2771
@@ -3117,7 +3158,7 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3117 * configure FW 3158 * configure FW
3118 * notify others function about the change 3159 * notify others function about the change
3119 */ 3160 */
3120static inline void bnx2x_config_mf_bw(struct bnx2x *bp) 3161static void bnx2x_config_mf_bw(struct bnx2x *bp)
3121{ 3162{
3122 if (bp->link_vars.link_up) { 3163 if (bp->link_vars.link_up) {
3123 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); 3164 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
@@ -3126,7 +3167,7 @@ static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
3126 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 3167 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3127} 3168}
3128 3169
3129static inline void bnx2x_set_mf_bw(struct bnx2x *bp) 3170static void bnx2x_set_mf_bw(struct bnx2x *bp)
3130{ 3171{
3131 bnx2x_config_mf_bw(bp); 3172 bnx2x_config_mf_bw(bp);
3132 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 3173 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
@@ -3213,7 +3254,7 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
3213} 3254}
3214 3255
3215/* must be called under the spq lock */ 3256/* must be called under the spq lock */
3216static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) 3257static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3217{ 3258{
3218 struct eth_spe *next_spe = bp->spq_prod_bd; 3259 struct eth_spe *next_spe = bp->spq_prod_bd;
3219 3260
@@ -3229,7 +3270,7 @@ static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3229} 3270}
3230 3271
3231/* must be called under the spq lock */ 3272/* must be called under the spq lock */
3232static inline void bnx2x_sp_prod_update(struct bnx2x *bp) 3273static void bnx2x_sp_prod_update(struct bnx2x *bp)
3233{ 3274{
3234 int func = BP_FUNC(bp); 3275 int func = BP_FUNC(bp);
3235 3276
@@ -3251,7 +3292,7 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
3251 * @cmd: command to check 3292 * @cmd: command to check
3252 * @cmd_type: command type 3293 * @cmd_type: command type
3253 */ 3294 */
3254static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) 3295static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3255{ 3296{
3256 if ((cmd_type == NONE_CONNECTION_TYPE) || 3297 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3257 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 3298 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
@@ -3385,7 +3426,7 @@ static void bnx2x_release_alr(struct bnx2x *bp)
3385#define BNX2X_DEF_SB_ATT_IDX 0x0001 3426#define BNX2X_DEF_SB_ATT_IDX 0x0001
3386#define BNX2X_DEF_SB_IDX 0x0002 3427#define BNX2X_DEF_SB_IDX 0x0002
3387 3428
3388static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 3429static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3389{ 3430{
3390 struct host_sp_status_block *def_sb = bp->def_status_blk; 3431 struct host_sp_status_block *def_sb = bp->def_status_blk;
3391 u16 rc = 0; 3432 u16 rc = 0;
@@ -3517,7 +3558,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3517 } 3558 }
3518} 3559}
3519 3560
3520static inline void bnx2x_fan_failure(struct bnx2x *bp) 3561static void bnx2x_fan_failure(struct bnx2x *bp)
3521{ 3562{
3522 int port = BP_PORT(bp); 3563 int port = BP_PORT(bp);
3523 u32 ext_phy_config; 3564 u32 ext_phy_config;
@@ -3547,7 +3588,7 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
3547 3588
3548} 3589}
3549 3590
3550static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 3591static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
3551{ 3592{
3552 int port = BP_PORT(bp); 3593 int port = BP_PORT(bp);
3553 int reg_offset; 3594 int reg_offset;
@@ -3587,7 +3628,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
3587 } 3628 }
3588} 3629}
3589 3630
3590static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) 3631static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3591{ 3632{
3592 u32 val; 3633 u32 val;
3593 3634
@@ -3618,7 +3659,7 @@ static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3618 } 3659 }
3619} 3660}
3620 3661
3621static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) 3662static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3622{ 3663{
3623 u32 val; 3664 u32 val;
3624 3665
@@ -3662,7 +3703,7 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3662 } 3703 }
3663} 3704}
3664 3705
3665static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) 3706static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3666{ 3707{
3667 u32 val; 3708 u32 val;
3668 3709
@@ -3792,7 +3833,7 @@ void bnx2x_set_reset_global(struct bnx2x *bp)
3792 * 3833 *
3793 * Should be run under rtnl lock 3834 * Should be run under rtnl lock
3794 */ 3835 */
3795static inline void bnx2x_clear_reset_global(struct bnx2x *bp) 3836static void bnx2x_clear_reset_global(struct bnx2x *bp)
3796{ 3837{
3797 u32 val; 3838 u32 val;
3798 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3839 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
@@ -3806,7 +3847,7 @@ static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
3806 * 3847 *
3807 * should be run under rtnl lock 3848 * should be run under rtnl lock
3808 */ 3849 */
3809static inline bool bnx2x_reset_is_global(struct bnx2x *bp) 3850static bool bnx2x_reset_is_global(struct bnx2x *bp)
3810{ 3851{
3811 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3852 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3812 3853
@@ -3819,7 +3860,7 @@ static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
3819 * 3860 *
3820 * Should be run under rtnl lock 3861 * Should be run under rtnl lock
3821 */ 3862 */
3822static inline void bnx2x_set_reset_done(struct bnx2x *bp) 3863static void bnx2x_set_reset_done(struct bnx2x *bp)
3823{ 3864{
3824 u32 val; 3865 u32 val;
3825 u32 bit = BP_PATH(bp) ? 3866 u32 bit = BP_PATH(bp) ?
@@ -3944,7 +3985,7 @@ bool bnx2x_clear_pf_load(struct bnx2x *bp)
3944 * 3985 *
3945 * should be run under rtnl lock 3986 * should be run under rtnl lock
3946 */ 3987 */
3947static inline bool bnx2x_get_load_status(struct bnx2x *bp, int engine) 3988static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
3948{ 3989{
3949 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : 3990 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
3950 BNX2X_PATH0_LOAD_CNT_MASK); 3991 BNX2X_PATH0_LOAD_CNT_MASK);
@@ -3965,7 +4006,7 @@ static inline bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
3965/* 4006/*
3966 * Reset the load status for the current engine. 4007 * Reset the load status for the current engine.
3967 */ 4008 */
3968static inline void bnx2x_clear_load_status(struct bnx2x *bp) 4009static void bnx2x_clear_load_status(struct bnx2x *bp)
3969{ 4010{
3970 u32 val; 4011 u32 val;
3971 u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 4012 u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
@@ -3976,13 +4017,13 @@ static inline void bnx2x_clear_load_status(struct bnx2x *bp)
3976 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4017 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3977} 4018}
3978 4019
3979static inline void _print_next_block(int idx, const char *blk) 4020static void _print_next_block(int idx, const char *blk)
3980{ 4021{
3981 pr_cont("%s%s", idx ? ", " : "", blk); 4022 pr_cont("%s%s", idx ? ", " : "", blk);
3982} 4023}
3983 4024
3984static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, 4025static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
3985 bool print) 4026 bool print)
3986{ 4027{
3987 int i = 0; 4028 int i = 0;
3988 u32 cur_bit = 0; 4029 u32 cur_bit = 0;
@@ -4029,8 +4070,8 @@ static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
4029 return par_num; 4070 return par_num;
4030} 4071}
4031 4072
4032static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, 4073static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
4033 bool *global, bool print) 4074 bool *global, bool print)
4034{ 4075{
4035 int i = 0; 4076 int i = 0;
4036 u32 cur_bit = 0; 4077 u32 cur_bit = 0;
@@ -4115,8 +4156,8 @@ static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
4115 return par_num; 4156 return par_num;
4116} 4157}
4117 4158
4118static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, 4159static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
4119 bool print) 4160 bool print)
4120{ 4161{
4121 int i = 0; 4162 int i = 0;
4122 u32 cur_bit = 0; 4163 u32 cur_bit = 0;
@@ -4167,8 +4208,8 @@ static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
4167 return par_num; 4208 return par_num;
4168} 4209}
4169 4210
4170static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, 4211static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
4171 bool *global, bool print) 4212 bool *global, bool print)
4172{ 4213{
4173 int i = 0; 4214 int i = 0;
4174 u32 cur_bit = 0; 4215 u32 cur_bit = 0;
@@ -4209,8 +4250,8 @@ static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
4209 return par_num; 4250 return par_num;
4210} 4251}
4211 4252
4212static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, 4253static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
4213 bool print) 4254 bool print)
4214{ 4255{
4215 int i = 0; 4256 int i = 0;
4216 u32 cur_bit = 0; 4257 u32 cur_bit = 0;
@@ -4236,8 +4277,8 @@ static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
4236 return par_num; 4277 return par_num;
4237} 4278}
4238 4279
4239static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, 4280static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4240 u32 *sig) 4281 u32 *sig)
4241{ 4282{
4242 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 4283 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4243 (sig[1] & HW_PRTY_ASSERT_SET_1) || 4284 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
@@ -4308,7 +4349,7 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4308} 4349}
4309 4350
4310 4351
4311static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) 4352static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
4312{ 4353{
4313 u32 val; 4354 u32 val;
4314 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 4355 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
@@ -4500,7 +4541,7 @@ void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
4500 igu_addr); 4541 igu_addr);
4501} 4542}
4502 4543
4503static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) 4544static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
4504{ 4545{
4505 /* No memory barriers */ 4546 /* No memory barriers */
4506 storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); 4547 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
@@ -4531,7 +4572,7 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4531} 4572}
4532#endif 4573#endif
4533 4574
4534static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp) 4575static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
4535{ 4576{
4536 struct bnx2x_mcast_ramrod_params rparam; 4577 struct bnx2x_mcast_ramrod_params rparam;
4537 int rc; 4578 int rc;
@@ -4556,8 +4597,8 @@ static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
4556 netif_addr_unlock_bh(bp->dev); 4597 netif_addr_unlock_bh(bp->dev);
4557} 4598}
4558 4599
4559static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp, 4600static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4560 union event_ring_elem *elem) 4601 union event_ring_elem *elem)
4561{ 4602{
4562 unsigned long ramrod_flags = 0; 4603 unsigned long ramrod_flags = 0;
4563 int rc = 0; 4604 int rc = 0;
@@ -4604,7 +4645,7 @@ static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4604static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); 4645static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
4605#endif 4646#endif
4606 4647
4607static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) 4648static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4608{ 4649{
4609 netif_addr_lock_bh(bp->dev); 4650 netif_addr_lock_bh(bp->dev);
4610 4651
@@ -4625,7 +4666,7 @@ static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4625 netif_addr_unlock_bh(bp->dev); 4666 netif_addr_unlock_bh(bp->dev);
4626} 4667}
4627 4668
4628static inline void bnx2x_after_afex_vif_lists(struct bnx2x *bp, 4669static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
4629 union event_ring_elem *elem) 4670 union event_ring_elem *elem)
4630{ 4671{
4631 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) { 4672 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
@@ -4642,7 +4683,7 @@ static inline void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
4642} 4683}
4643 4684
4644/* called with rtnl_lock */ 4685/* called with rtnl_lock */
4645static inline void bnx2x_after_function_update(struct bnx2x *bp) 4686static void bnx2x_after_function_update(struct bnx2x *bp)
4646{ 4687{
4647 int q, rc; 4688 int q, rc;
4648 struct bnx2x_fastpath *fp; 4689 struct bnx2x_fastpath *fp;
@@ -4712,7 +4753,7 @@ static inline void bnx2x_after_function_update(struct bnx2x *bp)
4712#endif /* BCM_CNIC */ 4753#endif /* BCM_CNIC */
4713} 4754}
4714 4755
4715static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( 4756static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
4716 struct bnx2x *bp, u32 cid) 4757 struct bnx2x *bp, u32 cid)
4717{ 4758{
4718 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); 4759 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
@@ -5056,7 +5097,7 @@ static void bnx2x_timer(unsigned long data)
5056 * nic init service functions 5097 * nic init service functions
5057 */ 5098 */
5058 5099
5059static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) 5100static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5060{ 5101{
5061 u32 i; 5102 u32 i;
5062 if (!(len%4) && !(addr%4)) 5103 if (!(len%4) && !(addr%4))
@@ -5069,10 +5110,10 @@ static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5069} 5110}
5070 5111
5071/* helper: writes FP SP data to FW - data_size in dwords */ 5112/* helper: writes FP SP data to FW - data_size in dwords */
5072static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp, 5113static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5073 int fw_sb_id, 5114 int fw_sb_id,
5074 u32 *sb_data_p, 5115 u32 *sb_data_p,
5075 u32 data_size) 5116 u32 data_size)
5076{ 5117{
5077 int index; 5118 int index;
5078 for (index = 0; index < data_size; index++) 5119 for (index = 0; index < data_size; index++)
@@ -5082,7 +5123,7 @@ static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5082 *(sb_data_p + index)); 5123 *(sb_data_p + index));
5083} 5124}
5084 5125
5085static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) 5126static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5086{ 5127{
5087 u32 *sb_data_p; 5128 u32 *sb_data_p;
5088 u32 data_size = 0; 5129 u32 data_size = 0;
@@ -5115,7 +5156,7 @@ static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5115} 5156}
5116 5157
5117/* helper: writes SP SB data to FW */ 5158/* helper: writes SP SB data to FW */
5118static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp, 5159static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5119 struct hc_sp_status_block_data *sp_sb_data) 5160 struct hc_sp_status_block_data *sp_sb_data)
5120{ 5161{
5121 int func = BP_FUNC(bp); 5162 int func = BP_FUNC(bp);
@@ -5127,7 +5168,7 @@ static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5127 *((u32 *)sp_sb_data + i)); 5168 *((u32 *)sp_sb_data + i));
5128} 5169}
5129 5170
5130static inline void bnx2x_zero_sp_sb(struct bnx2x *bp) 5171static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5131{ 5172{
5132 int func = BP_FUNC(bp); 5173 int func = BP_FUNC(bp);
5133 struct hc_sp_status_block_data sp_sb_data; 5174 struct hc_sp_status_block_data sp_sb_data;
@@ -5148,8 +5189,7 @@ static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
5148} 5189}
5149 5190
5150 5191
5151static inline 5192static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5152void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5153 int igu_sb_id, int igu_seg_id) 5193 int igu_sb_id, int igu_seg_id)
5154{ 5194{
5155 hc_sm->igu_sb_id = igu_sb_id; 5195 hc_sm->igu_sb_id = igu_sb_id;
@@ -5160,8 +5200,7 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5160 5200
5161 5201
5162/* allocates state machine ids. */ 5202/* allocates state machine ids. */
5163static inline 5203static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5164void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5165{ 5204{
5166 /* zero out state machine indices */ 5205 /* zero out state machine indices */
5167 /* rx indices */ 5206 /* rx indices */
@@ -5569,7 +5608,7 @@ static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
5569 return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT; 5608 return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
5570} 5609}
5571 5610
5572static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) 5611static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
5573{ 5612{
5574 if (CHIP_IS_E1x(fp->bp)) 5613 if (CHIP_IS_E1x(fp->bp))
5575 return BP_L_ID(fp->bp) + fp->index; 5614 return BP_L_ID(fp->bp) + fp->index;
@@ -5630,6 +5669,43 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
5630 bnx2x_update_fpsb_idx(fp); 5669 bnx2x_update_fpsb_idx(fp);
5631} 5670}
5632 5671
5672static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
5673{
5674 int i;
5675
5676 for (i = 1; i <= NUM_TX_RINGS; i++) {
5677 struct eth_tx_next_bd *tx_next_bd =
5678 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5679
5680 tx_next_bd->addr_hi =
5681 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
5682 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5683 tx_next_bd->addr_lo =
5684 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
5685 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5686 }
5687
5688 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
5689 txdata->tx_db.data.zero_fill1 = 0;
5690 txdata->tx_db.data.prod = 0;
5691
5692 txdata->tx_pkt_prod = 0;
5693 txdata->tx_pkt_cons = 0;
5694 txdata->tx_bd_prod = 0;
5695 txdata->tx_bd_cons = 0;
5696 txdata->tx_pkt = 0;
5697}
5698
5699static void bnx2x_init_tx_rings(struct bnx2x *bp)
5700{
5701 int i;
5702 u8 cos;
5703
5704 for_each_tx_queue(bp, i)
5705 for_each_cos_in_tx_queue(&bp->fp[i], cos)
5706 bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
5707}
5708
5633void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 5709void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5634{ 5710{
5635 int i; 5711 int i;
@@ -6154,7 +6230,7 @@ void bnx2x_pf_disable(struct bnx2x *bp)
6154 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); 6230 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6155} 6231}
6156 6232
6157static inline void bnx2x__common_init_phy(struct bnx2x *bp) 6233static void bnx2x__common_init_phy(struct bnx2x *bp)
6158{ 6234{
6159 u32 shmem_base[2], shmem2_base[2]; 6235 u32 shmem_base[2], shmem2_base[2];
6160 shmem_base[0] = bp->common.shmem_base; 6236 shmem_base[0] = bp->common.shmem_base;
@@ -6882,12 +6958,59 @@ static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6882 REG_WR_DMAE(bp, reg, wb_write, 2); 6958 REG_WR_DMAE(bp, reg, wb_write, 2);
6883} 6959}
6884 6960
6885static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) 6961static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
6962 u8 idu_sb_id, bool is_Pf)
6963{
6964 u32 data, ctl, cnt = 100;
6965 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
6966 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
6967 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
6968 u32 sb_bit = 1 << (idu_sb_id%32);
6969 u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
6970 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
6971
6972 /* Not supported in BC mode */
6973 if (CHIP_INT_MODE_IS_BC(bp))
6974 return;
6975
6976 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
6977 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
6978 IGU_REGULAR_CLEANUP_SET |
6979 IGU_REGULAR_BCLEANUP;
6980
6981 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
6982 func_encode << IGU_CTRL_REG_FID_SHIFT |
6983 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
6984
6985 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
6986 data, igu_addr_data);
6987 REG_WR(bp, igu_addr_data, data);
6988 mmiowb();
6989 barrier();
6990 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
6991 ctl, igu_addr_ctl);
6992 REG_WR(bp, igu_addr_ctl, ctl);
6993 mmiowb();
6994 barrier();
6995
6996 /* wait for clean up to finish */
6997 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
6998 msleep(20);
6999
7000
7001 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7002 DP(NETIF_MSG_HW,
7003 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7004 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7005 }
7006}
7007
7008static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
6886{ 7009{
6887 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); 7010 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
6888} 7011}
6889 7012
6890static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) 7013static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
6891{ 7014{
6892 u32 i, base = FUNC_ILT_BASE(func); 7015 u32 i, base = FUNC_ILT_BASE(func);
6893 for (i = base; i < base + ILT_PER_FUNC; i++) 7016 for (i = base; i < base + ILT_PER_FUNC; i++)
@@ -7238,7 +7361,7 @@ void bnx2x_free_mem(struct bnx2x *bp)
7238 BCM_PAGE_SIZE * NUM_EQ_PAGES); 7361 BCM_PAGE_SIZE * NUM_EQ_PAGES);
7239} 7362}
7240 7363
7241static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) 7364static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
7242{ 7365{
7243 int num_groups; 7366 int num_groups;
7244 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; 7367 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
@@ -7604,7 +7727,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
7604 * - HC configuration 7727 * - HC configuration
7605 * - Queue's CDU context 7728 * - Queue's CDU context
7606 */ 7729 */
7607static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp, 7730static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
7608 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) 7731 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
7609{ 7732{
7610 7733
@@ -7954,7 +8077,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
7954 /* TODO: Close Doorbell port? */ 8077 /* TODO: Close Doorbell port? */
7955} 8078}
7956 8079
7957static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) 8080static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
7958{ 8081{
7959 struct bnx2x_func_state_params func_params = {NULL}; 8082 struct bnx2x_func_state_params func_params = {NULL};
7960 8083
@@ -7969,7 +8092,7 @@ static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
7969 return bnx2x_func_state_change(bp, &func_params); 8092 return bnx2x_func_state_change(bp, &func_params);
7970} 8093}
7971 8094
7972static inline int bnx2x_func_stop(struct bnx2x *bp) 8095static int bnx2x_func_stop(struct bnx2x *bp)
7973{ 8096{
7974 struct bnx2x_func_state_params func_params = {NULL}; 8097 struct bnx2x_func_state_params func_params = {NULL};
7975 int rc; 8098 int rc;
@@ -8084,7 +8207,7 @@ void bnx2x_send_unload_done(struct bnx2x *bp)
8084 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 8207 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
8085} 8208}
8086 8209
8087static inline int bnx2x_func_wait_started(struct bnx2x *bp) 8210static int bnx2x_func_wait_started(struct bnx2x *bp)
8088{ 8211{
8089 int tout = 50; 8212 int tout = 50;
8090 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 8213 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
@@ -8394,7 +8517,7 @@ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8394 * 8517 *
8395 * @bp: driver handle 8518 * @bp: driver handle
8396 */ 8519 */
8397static inline void bnx2x_mcp_wait_one(struct bnx2x *bp) 8520static void bnx2x_mcp_wait_one(struct bnx2x *bp)
8398{ 8521{
8399 /* special handling for emulation and FPGA, 8522 /* special handling for emulation and FPGA,
8400 wait 10 times longer */ 8523 wait 10 times longer */
@@ -8730,7 +8853,7 @@ exit_leader_reset:
8730 return rc; 8853 return rc;
8731} 8854}
8732 8855
8733static inline void bnx2x_recovery_failed(struct bnx2x *bp) 8856static void bnx2x_recovery_failed(struct bnx2x *bp)
8734{ 8857{
8735 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); 8858 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
8736 8859
@@ -10803,8 +10926,8 @@ static int bnx2x_close(struct net_device *dev)
10803 return 0; 10926 return 0;
10804} 10927}
10805 10928
10806static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp, 10929static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
10807 struct bnx2x_mcast_ramrod_params *p) 10930 struct bnx2x_mcast_ramrod_params *p)
10808{ 10931{
10809 int mc_count = netdev_mc_count(bp->dev); 10932 int mc_count = netdev_mc_count(bp->dev);
10810 struct bnx2x_mcast_list_elem *mc_mac = 10933 struct bnx2x_mcast_list_elem *mc_mac =
@@ -10827,7 +10950,7 @@ static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
10827 return 0; 10950 return 0;
10828} 10951}
10829 10952
10830static inline void bnx2x_free_mcast_macs_list( 10953static void bnx2x_free_mcast_macs_list(
10831 struct bnx2x_mcast_ramrod_params *p) 10954 struct bnx2x_mcast_ramrod_params *p)
10832{ 10955{
10833 struct bnx2x_mcast_list_elem *mc_mac = 10956 struct bnx2x_mcast_list_elem *mc_mac =
@@ -10845,7 +10968,7 @@ static inline void bnx2x_free_mcast_macs_list(
10845 * 10968 *
10846 * We will use zero (0) as a MAC type for these MACs. 10969 * We will use zero (0) as a MAC type for these MACs.
10847 */ 10970 */
10848static inline int bnx2x_set_uc_list(struct bnx2x *bp) 10971static int bnx2x_set_uc_list(struct bnx2x *bp)
10849{ 10972{
10850 int rc; 10973 int rc;
10851 struct net_device *dev = bp->dev; 10974 struct net_device *dev = bp->dev;
@@ -10876,7 +10999,7 @@ static inline int bnx2x_set_uc_list(struct bnx2x *bp)
10876 BNX2X_UC_LIST_MAC, &ramrod_flags); 10999 BNX2X_UC_LIST_MAC, &ramrod_flags);
10877} 11000}
10878 11001
10879static inline int bnx2x_set_mc_list(struct bnx2x *bp) 11002static int bnx2x_set_mc_list(struct bnx2x *bp)
10880{ 11003{
10881 struct net_device *dev = bp->dev; 11004 struct net_device *dev = bp->dev;
10882 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 11005 struct bnx2x_mcast_ramrod_params rparam = {NULL};
@@ -11062,7 +11185,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
11062#endif 11185#endif
11063}; 11186};
11064 11187
11065static inline int bnx2x_set_coherency_mask(struct bnx2x *bp) 11188static int bnx2x_set_coherency_mask(struct bnx2x *bp)
11066{ 11189{
11067 struct device *dev = &bp->pdev->dev; 11190 struct device *dev = &bp->pdev->dev;
11068 11191
@@ -11328,7 +11451,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
11328 return 0; 11451 return 0;
11329} 11452}
11330 11453
11331static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 11454static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11332{ 11455{
11333 const __be32 *source = (const __be32 *)_source; 11456 const __be32 *source = (const __be32 *)_source;
11334 u32 *target = (u32 *)_target; 11457 u32 *target = (u32 *)_target;
@@ -11342,7 +11465,7 @@ static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11342 Ops array is stored in the following format: 11465 Ops array is stored in the following format:
11343 {op(8bit), offset(24bit, big endian), data(32bit, big endian)} 11466 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11344 */ 11467 */
11345static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) 11468static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11346{ 11469{
11347 const __be32 *source = (const __be32 *)_source; 11470 const __be32 *source = (const __be32 *)_source;
11348 struct raw_op *target = (struct raw_op *)_target; 11471 struct raw_op *target = (struct raw_op *)_target;
@@ -11360,7 +11483,7 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11360 * IRO array is stored in the following format: 11483 * IRO array is stored in the following format:
11361 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } 11484 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
11362 */ 11485 */
11363static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) 11486static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
11364{ 11487{
11365 const __be32 *source = (const __be32 *)_source; 11488 const __be32 *source = (const __be32 *)_source;
11366 struct iro *target = (struct iro *)_target; 11489 struct iro *target = (struct iro *)_target;
@@ -11380,7 +11503,7 @@ static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
11380 } 11503 }
11381} 11504}
11382 11505
11383static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 11506static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11384{ 11507{
11385 const __be16 *source = (const __be16 *)_source; 11508 const __be16 *source = (const __be16 *)_source;
11386 u16 *target = (u16 *)_target; 11509 u16 *target = (u16 *)_target;
@@ -11523,7 +11646,7 @@ void bnx2x__init_func_obj(struct bnx2x *bp)
11523} 11646}
11524 11647
11525/* must be called after sriov-enable */ 11648/* must be called after sriov-enable */
11526static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp) 11649static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
11527{ 11650{
11528 int cid_count = BNX2X_L2_CID_COUNT(bp); 11651 int cid_count = BNX2X_L2_CID_COUNT(bp);
11529 11652
@@ -11539,7 +11662,7 @@ static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp)
11539 * @dev: pci device 11662 * @dev: pci device
11540 * 11663 *
11541 */ 11664 */
11542static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev) 11665static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
11543{ 11666{
11544 int pos; 11667 int pos;
11545 u16 control; 11668 u16 control;
@@ -12015,7 +12138,7 @@ module_exit(bnx2x_cleanup);
12015 * This function will wait until the ramdord completion returns. 12138 * This function will wait until the ramdord completion returns.
12016 * Return 0 if success, -ENODEV if ramrod doesn't return. 12139 * Return 0 if success, -ENODEV if ramrod doesn't return.
12017 */ 12140 */
12018static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) 12141static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
12019{ 12142{
12020 unsigned long ramrod_flags = 0; 12143 unsigned long ramrod_flags = 0;
12021 12144
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 7366e92c3fa7..1e2785cd11d0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1316,7 +1316,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1316 * 1316 *
1317 * @param bp 1317 * @param bp
1318 */ 1318 */
1319static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) 1319static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1320{ 1320{
1321 int i; 1321 int i;
1322 int first_queue_query_index; 1322 int first_queue_query_index;