aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-02-12 13:16:59 -0500
committerDavid S. Miller <davem@davemloft.net>2009-02-16 02:55:59 -0500
commit73cd78f1d36da244b8b3b81d3a0f32372a636e5c (patch)
treec969da7a7d00783f3034a5c7a989c7254a614c5e /drivers
parenteaf5d59092dbed853bfab956ce123293832998f5 (diff)
igb: misc whitespace/formatting cleanups
This patch is intended to hold several whitespace, formatting, and comment cleanups that have been found while cleaning up the igb driver. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Acked-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/igb/igb_main.c58
1 files changed, 20 insertions, 38 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index f9d576bfef90..53e580bdfe29 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1691,7 +1691,6 @@ static int igb_close(struct net_device *netdev)
1691 * 1691 *
1692 * Return 0 on success, negative on failure 1692 * Return 0 on success, negative on failure
1693 **/ 1693 **/
1694
1695int igb_setup_tx_resources(struct igb_adapter *adapter, 1694int igb_setup_tx_resources(struct igb_adapter *adapter,
1696 struct igb_ring *tx_ring) 1695 struct igb_ring *tx_ring)
1697{ 1696{
@@ -1771,13 +1770,13 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1771 int i, j; 1770 int i, j;
1772 1771
1773 for (i = 0; i < adapter->num_tx_queues; i++) { 1772 for (i = 0; i < adapter->num_tx_queues; i++) {
1774 struct igb_ring *ring = &(adapter->tx_ring[i]); 1773 struct igb_ring *ring = &adapter->tx_ring[i];
1775 j = ring->reg_idx; 1774 j = ring->reg_idx;
1776 wr32(E1000_TDLEN(j), 1775 wr32(E1000_TDLEN(j),
1777 ring->count * sizeof(struct e1000_tx_desc)); 1776 ring->count * sizeof(struct e1000_tx_desc));
1778 tdba = ring->dma; 1777 tdba = ring->dma;
1779 wr32(E1000_TDBAL(j), 1778 wr32(E1000_TDBAL(j),
1780 tdba & 0x00000000ffffffffULL); 1779 tdba & 0x00000000ffffffffULL);
1781 wr32(E1000_TDBAH(j), tdba >> 32); 1780 wr32(E1000_TDBAH(j), tdba >> 32);
1782 1781
1783 ring->head = E1000_TDH(j); 1782 ring->head = E1000_TDH(j);
@@ -1797,8 +1796,6 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1797 wr32(E1000_DCA_TXCTRL(j), txctrl); 1796 wr32(E1000_DCA_TXCTRL(j), txctrl);
1798 } 1797 }
1799 1798
1800
1801
1802 /* Use the default values for the Tx Inter Packet Gap (IPG) timer */ 1799 /* Use the default values for the Tx Inter Packet Gap (IPG) timer */
1803 1800
1804 /* Program the Transmit Control Register */ 1801 /* Program the Transmit Control Register */
@@ -1826,7 +1823,6 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1826 * 1823 *
1827 * Returns 0 on success, negative on failure 1824 * Returns 0 on success, negative on failure
1828 **/ 1825 **/
1829
1830int igb_setup_rx_resources(struct igb_adapter *adapter, 1826int igb_setup_rx_resources(struct igb_adapter *adapter,
1831 struct igb_ring *rx_ring) 1827 struct igb_ring *rx_ring)
1832{ 1828{
@@ -1913,7 +1909,7 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1913 * enable stripping of CRC. It's unlikely this will break BMC 1909 * enable stripping of CRC. It's unlikely this will break BMC
1914 * redirection as it did with e1000. Newer features require 1910 * redirection as it did with e1000. Newer features require
1915 * that the HW strips the CRC. 1911 * that the HW strips the CRC.
1916 */ 1912 */
1917 rctl |= E1000_RCTL_SECRC; 1913 rctl |= E1000_RCTL_SECRC;
1918 1914
1919 /* 1915 /*
@@ -1991,14 +1987,14 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1991 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1987 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1992 * the Base and Length of the Rx Descriptor Ring */ 1988 * the Base and Length of the Rx Descriptor Ring */
1993 for (i = 0; i < adapter->num_rx_queues; i++) { 1989 for (i = 0; i < adapter->num_rx_queues; i++) {
1994 struct igb_ring *ring = &(adapter->rx_ring[i]); 1990 struct igb_ring *ring = &adapter->rx_ring[i];
1995 j = ring->reg_idx; 1991 j = ring->reg_idx;
1996 rdba = ring->dma; 1992 rdba = ring->dma;
1997 wr32(E1000_RDBAL(j), 1993 wr32(E1000_RDBAL(j),
1998 rdba & 0x00000000ffffffffULL); 1994 rdba & 0x00000000ffffffffULL);
1999 wr32(E1000_RDBAH(j), rdba >> 32); 1995 wr32(E1000_RDBAH(j), rdba >> 32);
2000 wr32(E1000_RDLEN(j), 1996 wr32(E1000_RDLEN(j),
2001 ring->count * sizeof(union e1000_adv_rx_desc)); 1997 ring->count * sizeof(union e1000_adv_rx_desc));
2002 1998
2003 ring->head = E1000_RDH(j); 1999 ring->head = E1000_RDH(j);
2004 ring->tail = E1000_RDT(j); 2000 ring->tail = E1000_RDT(j);
@@ -2136,6 +2132,7 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2136 buffer_info->skb = NULL; 2132 buffer_info->skb = NULL;
2137 } 2133 }
2138 buffer_info->time_stamp = 0; 2134 buffer_info->time_stamp = 0;
2135 buffer_info->next_to_watch = 0;
2139 /* buffer_info must be completely set up in the transmit path */ 2136 /* buffer_info must be completely set up in the transmit path */
2140} 2137}
2141 2138
@@ -2701,15 +2698,13 @@ static void igb_set_itr(struct igb_adapter *adapter)
2701 adapter->tx_itr, 2698 adapter->tx_itr,
2702 adapter->tx_ring->total_packets, 2699 adapter->tx_ring->total_packets,
2703 adapter->tx_ring->total_bytes); 2700 adapter->tx_ring->total_bytes);
2704
2705 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2701 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2706 } else { 2702 } else {
2707 current_itr = adapter->rx_itr; 2703 current_itr = adapter->rx_itr;
2708 } 2704 }
2709 2705
2710 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2706 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2711 if (adapter->itr_setting == 3 && 2707 if (adapter->itr_setting == 3 && current_itr == lowest_latency)
2712 current_itr == lowest_latency)
2713 current_itr = low_latency; 2708 current_itr = low_latency;
2714 2709
2715 switch (current_itr) { 2710 switch (current_itr) {
@@ -2827,7 +2822,7 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
2827 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); 2822 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
2828 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); 2823 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
2829 2824
2830 /* Context index must be unique per ring. */ 2825 /* For 82575, context index must be unique per ring. */
2831 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) 2826 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2832 mss_l4len_idx |= tx_ring->queue_index << 4; 2827 mss_l4len_idx |= tx_ring->queue_index << 4;
2833 2828
@@ -2911,8 +2906,6 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2911 2906
2912 return true; 2907 return true;
2913 } 2908 }
2914
2915
2916 return false; 2909 return false;
2917} 2910}
2918 2911
@@ -3136,7 +3129,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
3136 tx_flags |= IGB_TX_FLAGS_IPV4; 3129 tx_flags |= IGB_TX_FLAGS_IPV4;
3137 3130
3138 first = tx_ring->next_to_use; 3131 first = tx_ring->next_to_use;
3139
3140 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags, 3132 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
3141 &hdr_len) : 0; 3133 &hdr_len) : 0;
3142 3134
@@ -3210,8 +3202,7 @@ static void igb_reset_task(struct work_struct *work)
3210 * Returns the address of the device statistics structure. 3202 * Returns the address of the device statistics structure.
3211 * The statistics are actually updated from the timer callback. 3203 * The statistics are actually updated from the timer callback.
3212 **/ 3204 **/
3213static struct net_device_stats * 3205static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3214igb_get_stats(struct net_device *netdev)
3215{ 3206{
3216 struct igb_adapter *adapter = netdev_priv(netdev); 3207 struct igb_adapter *adapter = netdev_priv(netdev);
3217 3208
@@ -3245,6 +3236,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3245 3236
3246 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 3237 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3247 msleep(1); 3238 msleep(1);
3239
3248 /* igb_down has a dependency on max_frame_size */ 3240 /* igb_down has a dependency on max_frame_size */
3249 adapter->max_frame_size = max_frame; 3241 adapter->max_frame_size = max_frame;
3250 if (netif_running(netdev)) 3242 if (netif_running(netdev))
@@ -3414,8 +3406,7 @@ void igb_update_stats(struct igb_adapter *adapter)
3414 /* Phy Stats */ 3406 /* Phy Stats */
3415 if (hw->phy.media_type == e1000_media_type_copper) { 3407 if (hw->phy.media_type == e1000_media_type_copper) {
3416 if ((adapter->link_speed == SPEED_1000) && 3408 if ((adapter->link_speed == SPEED_1000) &&
3417 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, 3409 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3418 &phy_tmp))) {
3419 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3410 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3420 adapter->phy_stats.idle_errors += phy_tmp; 3411 adapter->phy_stats.idle_errors += phy_tmp;
3421 } 3412 }
@@ -3427,7 +3418,6 @@ void igb_update_stats(struct igb_adapter *adapter)
3427 adapter->stats.mgpdc += rd32(E1000_MGTPDC); 3418 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
3428} 3419}
3429 3420
3430
3431static irqreturn_t igb_msix_other(int irq, void *data) 3421static irqreturn_t igb_msix_other(int irq, void *data)
3432{ 3422{
3433 struct net_device *netdev = data; 3423 struct net_device *netdev = data;
@@ -3465,6 +3455,7 @@ static irqreturn_t igb_msix_tx(int irq, void *data)
3465 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3455 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3466 igb_update_tx_dca(tx_ring); 3456 igb_update_tx_dca(tx_ring);
3467#endif 3457#endif
3458
3468 tx_ring->total_bytes = 0; 3459 tx_ring->total_bytes = 0;
3469 tx_ring->total_packets = 0; 3460 tx_ring->total_packets = 0;
3470 3461
@@ -3485,13 +3476,11 @@ static void igb_write_itr(struct igb_ring *ring)
3485 if ((ring->adapter->itr_setting & 3) && ring->set_itr) { 3476 if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
3486 switch (hw->mac.type) { 3477 switch (hw->mac.type) {
3487 case e1000_82576: 3478 case e1000_82576:
3488 wr32(ring->itr_register, 3479 wr32(ring->itr_register, ring->itr_val |
3489 ring->itr_val |
3490 0x80000000); 3480 0x80000000);
3491 break; 3481 break;
3492 default: 3482 default:
3493 wr32(ring->itr_register, 3483 wr32(ring->itr_register, ring->itr_val |
3494 ring->itr_val |
3495 (ring->itr_val << 16)); 3484 (ring->itr_val << 16));
3496 break; 3485 break;
3497 } 3486 }
@@ -3762,7 +3751,6 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3762#endif 3751#endif
3763 igb_clean_rx_irq_adv(rx_ring, &work_done, budget); 3752 igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
3764 3753
3765
3766 /* If not enough Rx work done, exit the polling mode */ 3754 /* If not enough Rx work done, exit the polling mode */
3767 if ((work_done == 0) || !netif_running(netdev)) { 3755 if ((work_done == 0) || !netif_running(netdev)) {
3768 napi_complete(napi); 3756 napi_complete(napi);
@@ -3773,7 +3761,6 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3773 else 3761 else
3774 igb_update_ring_itr(rx_ring); 3762 igb_update_ring_itr(rx_ring);
3775 } 3763 }
3776
3777 if (!test_bit(__IGB_DOWN, &adapter->state)) 3764 if (!test_bit(__IGB_DOWN, &adapter->state))
3778 wr32(E1000_EIMS, rx_ring->eims_value); 3765 wr32(E1000_EIMS, rx_ring->eims_value);
3779 3766
@@ -3869,7 +3856,6 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3869 if (i == tx_ring->count) 3856 if (i == tx_ring->count)
3870 i = 0; 3857 i = 0;
3871 } 3858 }
3872
3873 eop = tx_ring->buffer_info[i].next_to_watch; 3859 eop = tx_ring->buffer_info[i].next_to_watch;
3874 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop); 3860 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
3875 } 3861 }
@@ -3938,7 +3924,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3938 * igb_receive_skb - helper function to handle rx indications 3924 * igb_receive_skb - helper function to handle rx indications
3939 * @ring: pointer to receive ring receving this packet 3925 * @ring: pointer to receive ring receving this packet
3940 * @status: descriptor status field as written by hardware 3926 * @status: descriptor status field as written by hardware
3941 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 3927 * @rx_desc: receive descriptor containing vlan and type information.
3942 * @skb: pointer to sk_buff to be indicated to stack 3928 * @skb: pointer to sk_buff to be indicated to stack
3943 **/ 3929 **/
3944static void igb_receive_skb(struct igb_ring *ring, u8 status, 3930static void igb_receive_skb(struct igb_ring *ring, u8 status,
@@ -3965,7 +3951,6 @@ static void igb_receive_skb(struct igb_ring *ring, u8 status,
3965 } 3951 }
3966} 3952}
3967 3953
3968
3969static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, 3954static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
3970 u32 status_err, struct sk_buff *skb) 3955 u32 status_err, struct sk_buff *skb)
3971{ 3956{
@@ -3998,11 +3983,11 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3998 union e1000_adv_rx_desc *rx_desc , *next_rxd; 3983 union e1000_adv_rx_desc *rx_desc , *next_rxd;
3999 struct igb_buffer *buffer_info , *next_buffer; 3984 struct igb_buffer *buffer_info , *next_buffer;
4000 struct sk_buff *skb; 3985 struct sk_buff *skb;
4001 unsigned int i;
4002 u32 length, hlen, staterr;
4003 bool cleaned = false; 3986 bool cleaned = false;
4004 int cleaned_count = 0; 3987 int cleaned_count = 0;
4005 unsigned int total_bytes = 0, total_packets = 0; 3988 unsigned int total_bytes = 0, total_packets = 0;
3989 unsigned int i;
3990 u32 length, hlen, staterr;
4006 3991
4007 i = rx_ring->next_to_clean; 3992 i = rx_ring->next_to_clean;
4008 buffer_info = &rx_ring->buffer_info[i]; 3993 buffer_info = &rx_ring->buffer_info[i];
@@ -4050,8 +4035,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4050 4035
4051 if (!skb_shinfo(skb)->nr_frags) { 4036 if (!skb_shinfo(skb)->nr_frags) {
4052 pci_unmap_single(pdev, buffer_info->dma, 4037 pci_unmap_single(pdev, buffer_info->dma,
4053 adapter->rx_ps_hdr_size + 4038 adapter->rx_ps_hdr_size + NET_IP_ALIGN,
4054 NET_IP_ALIGN,
4055 PCI_DMA_FROMDEVICE); 4039 PCI_DMA_FROMDEVICE);
4056 skb_put(skb, hlen); 4040 skb_put(skb, hlen);
4057 } 4041 }
@@ -4171,7 +4155,6 @@ next_desc:
4171 return cleaned; 4155 return cleaned;
4172} 4156}
4173 4157
4174
4175/** 4158/**
4176 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split 4159 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
4177 * @adapter: address of board private structure 4160 * @adapter: address of board private structure
@@ -4619,7 +4602,6 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
4619 return 0; 4602 return 0;
4620} 4603}
4621 4604
4622
4623static int igb_suspend(struct pci_dev *pdev, pm_message_t state) 4605static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
4624{ 4606{
4625 struct net_device *netdev = pci_get_drvdata(pdev); 4607 struct net_device *netdev = pci_get_drvdata(pdev);