aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2010-11-16 22:26:56 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2010-11-16 22:26:56 -0500
commit7d637bcc8f461f19e1d018078792ec0cd9b07b1d (patch)
tree05b890e7747abfdc0f4f60d88aa84676af39bb48 /drivers/net/ixgbe/ixgbe_main.c
parent33cf09c9586a0dce472ecd2aac13e8140c9ed1a1 (diff)
ixgbe: add a state flags to ring
This change adds a set of state flags to the rings that allow them to independently function allowing for features like RSC, packet split, and TX hang detection to be done per ring instead of for the entire device. This is accomplished by re-purposing the flow director reinit_state member and making it a global state instead since a long for a single bit flag is a bit wasteful. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c67
1 files changed, 39 insertions, 28 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index dc78736d3052..b798501500e6 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -687,7 +687,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
687 687
688 /* Detect a transmit hang in hardware, this serializes the 688 /* Detect a transmit hang in hardware, this serializes the
689 * check with the clearing of time_stamp and movement of eop */ 689 * check with the clearing of time_stamp and movement of eop */
690 adapter->detect_tx_hung = false; 690 clear_check_for_tx_hang(tx_ring);
691 if (tx_ring->tx_buffer_info[eop].time_stamp && 691 if (tx_ring->tx_buffer_info[eop].time_stamp &&
692 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && 692 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
693 ixgbe_tx_xon_state(adapter, tx_ring)) { 693 ixgbe_tx_xon_state(adapter, tx_ring)) {
@@ -786,13 +786,12 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
786 } 786 }
787 } 787 }
788 788
789 if (adapter->detect_tx_hung) { 789 if (check_for_tx_hang(tx_ring) &&
790 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) { 790 ixgbe_check_tx_hang(adapter, tx_ring, i)) {
791 /* schedule immediate reset if we believe we hung */ 791 /* schedule immediate reset if we believe we hung */
792 e_info(probe, "tx hang %d detected, resetting " 792 e_info(probe, "tx hang %d detected, resetting "
793 "adapter\n", adapter->tx_timeout_count + 1); 793 "adapter\n", adapter->tx_timeout_count + 1);
794 ixgbe_tx_timeout(adapter->netdev); 794 ixgbe_tx_timeout(adapter->netdev);
795 }
796 } 795 }
797 796
798 /* re-arm the interrupt */ 797 /* re-arm the interrupt */
@@ -1084,7 +1083,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1084 } 1083 }
1085 } 1084 }
1086 1085
1087 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 1086 if (ring_is_ps_enabled(rx_ring)) {
1088 if (!bi->page) { 1087 if (!bi->page) {
1089 bi->page = netdev_alloc_page(rx_ring->netdev); 1088 bi->page = netdev_alloc_page(rx_ring->netdev);
1090 if (!bi->page) { 1089 if (!bi->page) {
@@ -1214,7 +1213,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1214 (*work_done)++; 1213 (*work_done)++;
1215 1214
1216 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1215 rmb(); /* read descriptor and rx_buffer_info after status DD */
1217 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 1216 if (ring_is_ps_enabled(rx_ring)) {
1218 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); 1217 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
1219 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> 1218 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1220 IXGBE_RXDADV_HDRBUFLEN_SHIFT; 1219 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
@@ -1284,7 +1283,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1284 prefetch(next_rxd); 1283 prefetch(next_rxd);
1285 cleaned_count++; 1284 cleaned_count++;
1286 1285
1287 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) 1286 if (ring_is_rsc_enabled(rx_ring))
1288 rsc_count = ixgbe_get_rsc_count(rx_desc); 1287 rsc_count = ixgbe_get_rsc_count(rx_desc);
1289 1288
1290 if (rsc_count) { 1289 if (rsc_count) {
@@ -1299,7 +1298,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1299 if (skb->prev) 1298 if (skb->prev)
1300 skb = ixgbe_transform_rsc_queue(skb, 1299 skb = ixgbe_transform_rsc_queue(skb,
1301 &(rx_ring->rx_stats.rsc_count)); 1300 &(rx_ring->rx_stats.rsc_count));
1302 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 1301 if (ring_is_rsc_enabled(rx_ring)) {
1303 if (IXGBE_RSC_CB(skb)->delay_unmap) { 1302 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1304 dma_unmap_single(rx_ring->dev, 1303 dma_unmap_single(rx_ring->dev,
1305 IXGBE_RSC_CB(skb)->dma, 1304 IXGBE_RSC_CB(skb)->dma,
@@ -1308,7 +1307,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1308 IXGBE_RSC_CB(skb)->dma = 0; 1307 IXGBE_RSC_CB(skb)->dma = 0;
1309 IXGBE_RSC_CB(skb)->delay_unmap = false; 1308 IXGBE_RSC_CB(skb)->delay_unmap = false;
1310 } 1309 }
1311 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) 1310 if (ring_is_ps_enabled(rx_ring))
1312 rx_ring->rx_stats.rsc_count += 1311 rx_ring->rx_stats.rsc_count +=
1313 skb_shinfo(skb)->nr_frags; 1312 skb_shinfo(skb)->nr_frags;
1314 else 1313 else
@@ -1320,7 +1319,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1320 rx_ring->stats.bytes += skb->len; 1319 rx_ring->stats.bytes += skb->len;
1321 u64_stats_update_end(&rx_ring->syncp); 1320 u64_stats_update_end(&rx_ring->syncp);
1322 } else { 1321 } else {
1323 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 1322 if (ring_is_ps_enabled(rx_ring)) {
1324 rx_buffer_info->skb = next_buffer->skb; 1323 rx_buffer_info->skb = next_buffer->skb;
1325 rx_buffer_info->dma = next_buffer->dma; 1324 rx_buffer_info->dma = next_buffer->dma;
1326 next_buffer->skb = skb; 1325 next_buffer->skb = skb;
@@ -1782,8 +1781,8 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1782 for (i = 0; i < adapter->num_tx_queues; i++) { 1781 for (i = 0; i < adapter->num_tx_queues; i++) {
1783 struct ixgbe_ring *tx_ring = 1782 struct ixgbe_ring *tx_ring =
1784 adapter->tx_ring[i]; 1783 adapter->tx_ring[i];
1785 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, 1784 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
1786 &tx_ring->reinit_state)) 1785 &tx_ring->state))
1787 schedule_work(&adapter->fdir_reinit_task); 1786 schedule_work(&adapter->fdir_reinit_task);
1788 } 1787 }
1789 } 1788 }
@@ -2522,7 +2521,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2522 } 2521 }
2523 2522
2524 /* reinitialize flowdirector state */ 2523 /* reinitialize flowdirector state */
2525 set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state); 2524 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
2526 2525
2527 /* enable queue */ 2526 /* enable queue */
2528 txdctl |= IXGBE_TXDCTL_ENABLE; 2527 txdctl |= IXGBE_TXDCTL_ENABLE;
@@ -2632,7 +2631,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2632 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & 2631 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2633 IXGBE_SRRCTL_BSIZEHDR_MASK; 2632 IXGBE_SRRCTL_BSIZEHDR_MASK;
2634 2633
2635 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 2634 if (ring_is_ps_enabled(rx_ring)) {
2636#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER 2635#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
2637 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 2636 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2638#else 2637#else
@@ -2727,7 +2726,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2727 int rx_buf_len; 2726 int rx_buf_len;
2728 u16 reg_idx = ring->reg_idx; 2727 u16 reg_idx = ring->reg_idx;
2729 2728
2730 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) 2729 if (!ring_is_rsc_enabled(ring))
2731 return; 2730 return;
2732 2731
2733 rx_buf_len = ring->rx_buf_len; 2732 rx_buf_len = ring->rx_buf_len;
@@ -2738,7 +2737,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2738 * total size of max desc * buf_len is not greater 2737 * total size of max desc * buf_len is not greater
2739 * than 65535 2738 * than 65535
2740 */ 2739 */
2741 if (ring->flags & IXGBE_RING_RX_PS_ENABLED) { 2740 if (ring_is_ps_enabled(ring)) {
2742#if (MAX_SKB_FRAGS > 16) 2741#if (MAX_SKB_FRAGS > 16)
2743 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 2742 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2744#elif (MAX_SKB_FRAGS > 8) 2743#elif (MAX_SKB_FRAGS > 8)
@@ -2976,19 +2975,28 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
2976 rx_ring->rx_buf_len = rx_buf_len; 2975 rx_ring->rx_buf_len = rx_buf_len;
2977 2976
2978 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) 2977 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
2979 rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED; 2978 set_ring_ps_enabled(rx_ring);
2979 else
2980 clear_ring_ps_enabled(rx_ring);
2981
2982 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
2983 set_ring_rsc_enabled(rx_ring);
2980 else 2984 else
2981 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 2985 clear_ring_rsc_enabled(rx_ring);
2982 2986
2983#ifdef IXGBE_FCOE 2987#ifdef IXGBE_FCOE
2984 if (netdev->features & NETIF_F_FCOE_MTU) { 2988 if (netdev->features & NETIF_F_FCOE_MTU) {
2985 struct ixgbe_ring_feature *f; 2989 struct ixgbe_ring_feature *f;
2986 f = &adapter->ring_feature[RING_F_FCOE]; 2990 f = &adapter->ring_feature[RING_F_FCOE];
2987 if ((i >= f->mask) && (i < f->mask + f->indices)) { 2991 if ((i >= f->mask) && (i < f->mask + f->indices)) {
2988 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 2992 clear_ring_ps_enabled(rx_ring);
2989 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) 2993 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2990 rx_ring->rx_buf_len = 2994 rx_ring->rx_buf_len =
2991 IXGBE_FCOE_JUMBO_FRAME_SIZE; 2995 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2996 } else if (!ring_is_rsc_enabled(rx_ring) &&
2997 !ring_is_ps_enabled(rx_ring)) {
2998 rx_ring->rx_buf_len =
2999 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2992 } 3000 }
2993 } 3001 }
2994#endif /* IXGBE_FCOE */ 3002#endif /* IXGBE_FCOE */
@@ -5729,8 +5737,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
5729 5737
5730 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 5738 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5731 for (i = 0; i < adapter->num_tx_queues; i++) 5739 for (i = 0; i < adapter->num_tx_queues; i++)
5732 set_bit(__IXGBE_FDIR_INIT_DONE, 5740 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
5733 &(adapter->tx_ring[i]->reinit_state)); 5741 &(adapter->tx_ring[i]->state));
5734 } else { 5742 } else {
5735 e_err(probe, "failed to finish FDIR re-initialization, " 5743 e_err(probe, "failed to finish FDIR re-initialization, "
5736 "ignored adding FDIR ATR filters\n"); 5744 "ignored adding FDIR ATR filters\n");
@@ -5816,7 +5824,10 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5816 netif_carrier_on(netdev); 5824 netif_carrier_on(netdev);
5817 } else { 5825 } else {
5818 /* Force detection of hung controller */ 5826 /* Force detection of hung controller */
5819 adapter->detect_tx_hung = true; 5827 for (i = 0; i < adapter->num_tx_queues; i++) {
5828 tx_ring = adapter->tx_ring[i];
5829 set_check_for_tx_hang(tx_ring);
5830 }
5820 } 5831 }
5821 } else { 5832 } else {
5822 adapter->link_up = false; 5833 adapter->link_up = false;
@@ -6434,8 +6445,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6434 if (tx_ring->atr_sample_rate) { 6445 if (tx_ring->atr_sample_rate) {
6435 ++tx_ring->atr_count; 6446 ++tx_ring->atr_count;
6436 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && 6447 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
6437 test_bit(__IXGBE_FDIR_INIT_DONE, 6448 test_bit(__IXGBE_TX_FDIR_INIT_DONE,
6438 &tx_ring->reinit_state)) { 6449 &tx_ring->state)) {
6439 ixgbe_atr(adapter, skb, tx_ring->queue_index, 6450 ixgbe_atr(adapter, skb, tx_ring->queue_index,
6440 tx_flags, protocol); 6451 tx_flags, protocol);
6441 tx_ring->atr_count = 0; 6452 tx_ring->atr_count = 0;