aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x/bnx2x_cmn.c
diff options
context:
space:
mode:
authorDmitry Kravkov <dmitry@broadcom.com>2010-10-05 23:34:21 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-06 17:10:41 -0400
commitf85582f8c48addd8166727ef692d88b0ff618c5e (patch)
tree5ea8ef71ae9ca5e67793350b3533f146116bd177 /drivers/net/bnx2x/bnx2x_cmn.c
parentc2bff63fad94eeecf59e4ba8e4cb51688ccae1ec (diff)
bnx2x: code beautify
This patch does not include any functional changes. The changes are: empty lines, indentation and comments. Signed-off-by: Dmitry Kravkov <dmitry@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x/bnx2x_cmn.c')
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c60
1 files changed, 33 insertions, 27 deletions
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 68181cdd2096..97ef674dcc34 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -15,7 +15,6 @@
15 * 15 *
16 */ 16 */
17 17
18
19#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
20#include <linux/ip.h> 19#include <linux/ip.h>
21#include <net/ipv6.h> 20#include <net/ipv6.h>
@@ -136,7 +135,6 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp)
136 */ 135 */
137 smp_mb(); 136 smp_mb();
138 137
139 /* TBD need a thresh? */
140 if (unlikely(netif_tx_queue_stopped(txq))) { 138 if (unlikely(netif_tx_queue_stopped(txq))) {
141 /* Taking tx_lock() is needed to prevent reenabling the queue 139 /* Taking tx_lock() is needed to prevent reenabling the queue
142 * while it's empty. This could have happen if rx_action() gets 140 * while it's empty. This could have happen if rx_action() gets
@@ -623,6 +621,7 @@ reuse_rx:
623 bnx2x_set_skb_rxhash(bp, cqe, skb); 621 bnx2x_set_skb_rxhash(bp, cqe, skb);
624 622
625 skb_checksum_none_assert(skb); 623 skb_checksum_none_assert(skb);
624
626 if (bp->rx_csum) { 625 if (bp->rx_csum) {
627 if (likely(BNX2X_RX_CSUM_OK(cqe))) 626 if (likely(BNX2X_RX_CSUM_OK(cqe)))
628 skb->ip_summed = CHECKSUM_UNNECESSARY; 627 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -704,7 +703,6 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
704 return IRQ_HANDLED; 703 return IRQ_HANDLED;
705} 704}
706 705
707
708/* HW Lock for shared dual port PHYs */ 706/* HW Lock for shared dual port PHYs */
709void bnx2x_acquire_phy_lock(struct bnx2x *bp) 707void bnx2x_acquire_phy_lock(struct bnx2x *bp)
710{ 708{
@@ -916,6 +914,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
916 } 914 }
917 } 915 }
918} 916}
917
919static void bnx2x_free_tx_skbs(struct bnx2x *bp) 918static void bnx2x_free_tx_skbs(struct bnx2x *bp)
920{ 919{
921 int i; 920 int i;
@@ -1185,6 +1184,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1185 case ETH_RSS_MODE_REGULAR: 1184 case ETH_RSS_MODE_REGULAR:
1186 bp->num_queues = bnx2x_calc_num_queues(bp); 1185 bp->num_queues = bnx2x_calc_num_queues(bp);
1187 break; 1186 break;
1187
1188 default: 1188 default:
1189 bp->num_queues = 1; 1189 bp->num_queues = 1;
1190 break; 1190 break;
@@ -1354,6 +1354,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1354 /* Enable Timer scan */ 1354 /* Enable Timer scan */
1355 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1); 1355 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1356#endif 1356#endif
1357
1357 for_each_nondefault_queue(bp, i) { 1358 for_each_nondefault_queue(bp, i) {
1358 rc = bnx2x_setup_client(bp, &bp->fp[i], 0); 1359 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1359 if (rc) 1360 if (rc)
@@ -1473,11 +1474,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1473 1474
1474 /* Stop Tx */ 1475 /* Stop Tx */
1475 bnx2x_tx_disable(bp); 1476 bnx2x_tx_disable(bp);
1477
1476 del_timer_sync(&bp->timer); 1478 del_timer_sync(&bp->timer);
1479
1477 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, 1480 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1478 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 1481 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1479 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1480 1482
1483 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1481 1484
1482 /* Cleanup the chip if needed */ 1485 /* Cleanup the chip if needed */
1483 if (unload_mode != UNLOAD_RECOVERY) 1486 if (unload_mode != UNLOAD_RECOVERY)
@@ -1514,6 +1517,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1514 1517
1515 return 0; 1518 return 0;
1516} 1519}
1520
1517int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) 1521int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1518{ 1522{
1519 u16 pmcsr; 1523 u16 pmcsr;
@@ -1560,12 +1564,9 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1560 return 0; 1564 return 0;
1561} 1565}
1562 1566
1563
1564
1565/* 1567/*
1566 * net_device service functions 1568 * net_device service functions
1567 */ 1569 */
1568
1569int bnx2x_poll(struct napi_struct *napi, int budget) 1570int bnx2x_poll(struct napi_struct *napi, int budget)
1570{ 1571{
1571 int work_done = 0; 1572 int work_done = 0;
@@ -1595,19 +1596,19 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
1595 /* Fall out from the NAPI loop if needed */ 1596 /* Fall out from the NAPI loop if needed */
1596 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 1597 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1597 bnx2x_update_fpsb_idx(fp); 1598 bnx2x_update_fpsb_idx(fp);
1598 /* bnx2x_has_rx_work() reads the status block, 1599 /* bnx2x_has_rx_work() reads the status block,
1599 * thus we need to ensure that status block indices 1600 * thus we need to ensure that status block indices
1600 * have been actually read (bnx2x_update_fpsb_idx) 1601 * have been actually read (bnx2x_update_fpsb_idx)
1601 * prior to this check (bnx2x_has_rx_work) so that 1602 * prior to this check (bnx2x_has_rx_work) so that
1602 * we won't write the "newer" value of the status block 1603 * we won't write the "newer" value of the status block
1603 * to IGU (if there was a DMA right after 1604 * to IGU (if there was a DMA right after
1604 * bnx2x_has_rx_work and if there is no rmb, the memory 1605 * bnx2x_has_rx_work and if there is no rmb, the memory
1605 * reading (bnx2x_update_fpsb_idx) may be postponed 1606 * reading (bnx2x_update_fpsb_idx) may be postponed
1606 * to right before bnx2x_ack_sb). In this case there 1607 * to right before bnx2x_ack_sb). In this case there
1607 * will never be another interrupt until there is 1608 * will never be another interrupt until there is
1608 * another update of the status block, while there 1609 * another update of the status block, while there
1609 * is still unhandled work. 1610 * is still unhandled work.
1610 */ 1611 */
1611 rmb(); 1612 rmb();
1612 1613
1613 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 1614 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
@@ -1626,7 +1627,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
1626 return work_done; 1627 return work_done;
1627} 1628}
1628 1629
1629
1630/* we split the first BD into headers and data BDs 1630/* we split the first BD into headers and data BDs
1631 * to ease the pain of our fellow microcode engineers 1631 * to ease the pain of our fellow microcode engineers
1632 * we use one mapping for both BDs 1632 * we use one mapping for both BDs
@@ -1842,6 +1842,7 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1842 1842
1843 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN; 1843 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1844} 1844}
1845
1845/** 1846/**
1846 * 1847 *
1847 * @param skb 1848 * @param skb
@@ -1914,6 +1915,7 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1914 1915
1915 return hlen; 1916 return hlen;
1916} 1917}
1918
1917/* called with netif_tx_lock 1919/* called with netif_tx_lock
1918 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call 1920 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1919 * netif_wake_queue() 1921 * netif_wake_queue()
@@ -2003,13 +2005,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2003 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; 2005 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2004 2006
2005 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 2007 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2006 SET_FLAG(tx_start_bd->general_data, 2008 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2007 ETH_TX_START_BD_ETH_ADDR_TYPE, 2009 mac_type);
2008 mac_type); 2010
2009 /* header nbd */ 2011 /* header nbd */
2010 SET_FLAG(tx_start_bd->general_data, 2012 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2011 ETH_TX_START_BD_HDR_NBDS,
2012 1);
2013 2013
2014 /* remember the first BD of the packet */ 2014 /* remember the first BD of the packet */
2015 tx_buf->first_bd = fp->tx_bd_prod; 2015 tx_buf->first_bd = fp->tx_bd_prod;
@@ -2065,9 +2065,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2065 2065
2066 } 2066 }
2067 2067
2068 /* Map skb linear data for DMA */
2068 mapping = dma_map_single(&bp->pdev->dev, skb->data, 2069 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2069 skb_headlen(skb), DMA_TO_DEVICE); 2070 skb_headlen(skb), DMA_TO_DEVICE);
2070 2071
2072 /* Setup the data pointer of the first BD of the packet */
2071 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 2073 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2072 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 2074 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2073 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */ 2075 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
@@ -2101,6 +2103,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2101 } 2103 }
2102 tx_data_bd = (struct eth_tx_bd *)tx_start_bd; 2104 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2103 2105
2106 /* Handle fragmented skb */
2104 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2107 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2105 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2108 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2106 2109
@@ -2165,6 +2168,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2165 2168
2166 fp->tx_db.data.prod += nbd; 2169 fp->tx_db.data.prod += nbd;
2167 barrier(); 2170 barrier();
2171
2168 DOORBELL(bp, fp->cid, fp->tx_db.raw); 2172 DOORBELL(bp, fp->cid, fp->tx_db.raw);
2169 2173
2170 mmiowb(); 2174 mmiowb();
@@ -2187,6 +2191,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2187 2191
2188 return NETDEV_TX_OK; 2192 return NETDEV_TX_OK;
2189} 2193}
2194
2190/* called with rtnl_lock */ 2195/* called with rtnl_lock */
2191int bnx2x_change_mac_addr(struct net_device *dev, void *p) 2196int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2192{ 2197{
@@ -2319,6 +2324,7 @@ void bnx2x_vlan_rx_register(struct net_device *dev,
2319} 2324}
2320 2325
2321#endif 2326#endif
2327
2322int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) 2328int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2323{ 2329{
2324 struct net_device *dev = pci_get_drvdata(pdev); 2330 struct net_device *dev = pci_get_drvdata(pdev);