aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2010-11-16 22:26:56 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2010-11-16 22:26:56 -0500
commit7d637bcc8f461f19e1d018078792ec0cd9b07b1d (patch)
tree05b890e7747abfdc0f4f60d88aa84676af39bb48 /drivers/net
parent33cf09c9586a0dce472ecd2aac13e8140c9ed1a1 (diff)
ixgbe: add a state flags to ring
This change adds a set of state flags to the rings that allow them to independently function allowing for features like RSC, packet split, and TX hang detection to be done per ring instead of for the entire device. This is accomplished by re-purposing the flow director reinit_state member and making it a global state instead since a long for a single bit flag is a bit wasteful. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ixgbe/ixgbe.h44
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c67
2 files changed, 72 insertions, 39 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index e87b0ffd5832..160ce9234546 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -159,6 +159,31 @@ struct ixgbe_rx_queue_stats {
159 u64 alloc_rx_buff_failed; 159 u64 alloc_rx_buff_failed;
160}; 160};
161 161
162enum ixbge_ring_state_t {
163 __IXGBE_TX_FDIR_INIT_DONE,
164 __IXGBE_TX_DETECT_HANG,
165 __IXGBE_RX_PS_ENABLED,
166 __IXGBE_RX_RSC_ENABLED,
167};
168
169#define ring_is_ps_enabled(ring) \
170 test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
171#define set_ring_ps_enabled(ring) \
172 set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
173#define clear_ring_ps_enabled(ring) \
174 clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
175#define check_for_tx_hang(ring) \
176 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
177#define set_check_for_tx_hang(ring) \
178 set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
179#define clear_check_for_tx_hang(ring) \
180 clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
181#define ring_is_rsc_enabled(ring) \
182 test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
183#define set_ring_rsc_enabled(ring) \
184 set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
185#define clear_ring_rsc_enabled(ring) \
186 clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
162struct ixgbe_ring { 187struct ixgbe_ring {
163 void *desc; /* descriptor ring memory */ 188 void *desc; /* descriptor ring memory */
164 struct device *dev; /* device for DMA mapping */ 189 struct device *dev; /* device for DMA mapping */
@@ -167,6 +192,7 @@ struct ixgbe_ring {
167 struct ixgbe_tx_buffer *tx_buffer_info; 192 struct ixgbe_tx_buffer *tx_buffer_info;
168 struct ixgbe_rx_buffer *rx_buffer_info; 193 struct ixgbe_rx_buffer *rx_buffer_info;
169 }; 194 };
195 unsigned long state;
170 u8 atr_sample_rate; 196 u8 atr_sample_rate;
171 u8 atr_count; 197 u8 atr_count;
172 u16 count; /* amount of descriptors */ 198 u16 count; /* amount of descriptors */
@@ -175,28 +201,25 @@ struct ixgbe_ring {
175 u16 next_to_clean; 201 u16 next_to_clean;
176 202
177 u8 queue_index; /* needed for multiqueue queue management */ 203 u8 queue_index; /* needed for multiqueue queue management */
204 u8 reg_idx; /* holds the special value that gets
205 * the hardware register offset
206 * associated with this ring, which is
207 * different for DCB and RSS modes
208 */
209
210 u16 work_limit; /* max work per interrupt */
178 211
179#define IXGBE_RING_RX_PS_ENABLED (u8)(1)
180 u8 flags; /* per ring feature flags */
181 u8 __iomem *tail; 212 u8 __iomem *tail;
182 213
183 unsigned int total_bytes; 214 unsigned int total_bytes;
184 unsigned int total_packets; 215 unsigned int total_packets;
185 216
186 u16 work_limit; /* max work per interrupt */
187 u16 reg_idx; /* holds the special value that gets
188 * the hardware register offset
189 * associated with this ring, which is
190 * different for DCB and RSS modes
191 */
192
193 struct ixgbe_queue_stats stats; 217 struct ixgbe_queue_stats stats;
194 struct u64_stats_sync syncp; 218 struct u64_stats_sync syncp;
195 union { 219 union {
196 struct ixgbe_tx_queue_stats tx_stats; 220 struct ixgbe_tx_queue_stats tx_stats;
197 struct ixgbe_rx_queue_stats rx_stats; 221 struct ixgbe_rx_queue_stats rx_stats;
198 }; 222 };
199 unsigned long reinit_state;
200 int numa_node; 223 int numa_node;
201 unsigned int size; /* length in bytes */ 224 unsigned int size; /* length in bytes */
202 dma_addr_t dma; /* phys. address of descriptor ring */ 225 dma_addr_t dma; /* phys. address of descriptor ring */
@@ -441,7 +464,6 @@ enum ixbge_state_t {
441 __IXGBE_TESTING, 464 __IXGBE_TESTING,
442 __IXGBE_RESETTING, 465 __IXGBE_RESETTING,
443 __IXGBE_DOWN, 466 __IXGBE_DOWN,
444 __IXGBE_FDIR_INIT_DONE,
445 __IXGBE_SFP_MODULE_NOT_FOUND 467 __IXGBE_SFP_MODULE_NOT_FOUND
446}; 468};
447 469
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index dc78736d3052..b798501500e6 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -687,7 +687,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
687 687
688 /* Detect a transmit hang in hardware, this serializes the 688 /* Detect a transmit hang in hardware, this serializes the
689 * check with the clearing of time_stamp and movement of eop */ 689 * check with the clearing of time_stamp and movement of eop */
690 adapter->detect_tx_hung = false; 690 clear_check_for_tx_hang(tx_ring);
691 if (tx_ring->tx_buffer_info[eop].time_stamp && 691 if (tx_ring->tx_buffer_info[eop].time_stamp &&
692 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && 692 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
693 ixgbe_tx_xon_state(adapter, tx_ring)) { 693 ixgbe_tx_xon_state(adapter, tx_ring)) {
@@ -786,13 +786,12 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
786 } 786 }
787 } 787 }
788 788
789 if (adapter->detect_tx_hung) { 789 if (check_for_tx_hang(tx_ring) &&
790 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) { 790 ixgbe_check_tx_hang(adapter, tx_ring, i)) {
791 /* schedule immediate reset if we believe we hung */ 791 /* schedule immediate reset if we believe we hung */
792 e_info(probe, "tx hang %d detected, resetting " 792 e_info(probe, "tx hang %d detected, resetting "
793 "adapter\n", adapter->tx_timeout_count + 1); 793 "adapter\n", adapter->tx_timeout_count + 1);
794 ixgbe_tx_timeout(adapter->netdev); 794 ixgbe_tx_timeout(adapter->netdev);
795 }
796 } 795 }
797 796
798 /* re-arm the interrupt */ 797 /* re-arm the interrupt */
@@ -1084,7 +1083,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1084 } 1083 }
1085 } 1084 }
1086 1085
1087 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 1086 if (ring_is_ps_enabled(rx_ring)) {
1088 if (!bi->page) { 1087 if (!bi->page) {
1089 bi->page = netdev_alloc_page(rx_ring->netdev); 1088 bi->page = netdev_alloc_page(rx_ring->netdev);
1090 if (!bi->page) { 1089 if (!bi->page) {
@@ -1214,7 +1213,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1214 (*work_done)++; 1213 (*work_done)++;
1215 1214
1216 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1215 rmb(); /* read descriptor and rx_buffer_info after status DD */
1217 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 1216 if (ring_is_ps_enabled(rx_ring)) {
1218 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); 1217 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
1219 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> 1218 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1220 IXGBE_RXDADV_HDRBUFLEN_SHIFT; 1219 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
@@ -1284,7 +1283,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1284 prefetch(next_rxd); 1283 prefetch(next_rxd);
1285 cleaned_count++; 1284 cleaned_count++;
1286 1285
1287 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) 1286 if (ring_is_rsc_enabled(rx_ring))
1288 rsc_count = ixgbe_get_rsc_count(rx_desc); 1287 rsc_count = ixgbe_get_rsc_count(rx_desc);
1289 1288
1290 if (rsc_count) { 1289 if (rsc_count) {
@@ -1299,7 +1298,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1299 if (skb->prev) 1298 if (skb->prev)
1300 skb = ixgbe_transform_rsc_queue(skb, 1299 skb = ixgbe_transform_rsc_queue(skb,
1301 &(rx_ring->rx_stats.rsc_count)); 1300 &(rx_ring->rx_stats.rsc_count));
1302 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 1301 if (ring_is_rsc_enabled(rx_ring)) {
1303 if (IXGBE_RSC_CB(skb)->delay_unmap) { 1302 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1304 dma_unmap_single(rx_ring->dev, 1303 dma_unmap_single(rx_ring->dev,
1305 IXGBE_RSC_CB(skb)->dma, 1304 IXGBE_RSC_CB(skb)->dma,
@@ -1308,7 +1307,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1308 IXGBE_RSC_CB(skb)->dma = 0; 1307 IXGBE_RSC_CB(skb)->dma = 0;
1309 IXGBE_RSC_CB(skb)->delay_unmap = false; 1308 IXGBE_RSC_CB(skb)->delay_unmap = false;
1310 } 1309 }
1311 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) 1310 if (ring_is_ps_enabled(rx_ring))
1312 rx_ring->rx_stats.rsc_count += 1311 rx_ring->rx_stats.rsc_count +=
1313 skb_shinfo(skb)->nr_frags; 1312 skb_shinfo(skb)->nr_frags;
1314 else 1313 else
@@ -1320,7 +1319,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1320 rx_ring->stats.bytes += skb->len; 1319 rx_ring->stats.bytes += skb->len;
1321 u64_stats_update_end(&rx_ring->syncp); 1320 u64_stats_update_end(&rx_ring->syncp);
1322 } else { 1321 } else {
1323 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 1322 if (ring_is_ps_enabled(rx_ring)) {
1324 rx_buffer_info->skb = next_buffer->skb; 1323 rx_buffer_info->skb = next_buffer->skb;
1325 rx_buffer_info->dma = next_buffer->dma; 1324 rx_buffer_info->dma = next_buffer->dma;
1326 next_buffer->skb = skb; 1325 next_buffer->skb = skb;
@@ -1782,8 +1781,8 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1782 for (i = 0; i < adapter->num_tx_queues; i++) { 1781 for (i = 0; i < adapter->num_tx_queues; i++) {
1783 struct ixgbe_ring *tx_ring = 1782 struct ixgbe_ring *tx_ring =
1784 adapter->tx_ring[i]; 1783 adapter->tx_ring[i];
1785 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, 1784 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
1786 &tx_ring->reinit_state)) 1785 &tx_ring->state))
1787 schedule_work(&adapter->fdir_reinit_task); 1786 schedule_work(&adapter->fdir_reinit_task);
1788 } 1787 }
1789 } 1788 }
@@ -2522,7 +2521,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2522 } 2521 }
2523 2522
2524 /* reinitialize flowdirector state */ 2523 /* reinitialize flowdirector state */
2525 set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state); 2524 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
2526 2525
2527 /* enable queue */ 2526 /* enable queue */
2528 txdctl |= IXGBE_TXDCTL_ENABLE; 2527 txdctl |= IXGBE_TXDCTL_ENABLE;
@@ -2632,7 +2631,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2632 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & 2631 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2633 IXGBE_SRRCTL_BSIZEHDR_MASK; 2632 IXGBE_SRRCTL_BSIZEHDR_MASK;
2634 2633
2635 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 2634 if (ring_is_ps_enabled(rx_ring)) {
2636#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER 2635#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
2637 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 2636 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2638#else 2637#else
@@ -2727,7 +2726,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2727 int rx_buf_len; 2726 int rx_buf_len;
2728 u16 reg_idx = ring->reg_idx; 2727 u16 reg_idx = ring->reg_idx;
2729 2728
2730 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) 2729 if (!ring_is_rsc_enabled(ring))
2731 return; 2730 return;
2732 2731
2733 rx_buf_len = ring->rx_buf_len; 2732 rx_buf_len = ring->rx_buf_len;
@@ -2738,7 +2737,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2738 * total size of max desc * buf_len is not greater 2737 * total size of max desc * buf_len is not greater
2739 * than 65535 2738 * than 65535
2740 */ 2739 */
2741 if (ring->flags & IXGBE_RING_RX_PS_ENABLED) { 2740 if (ring_is_ps_enabled(ring)) {
2742#if (MAX_SKB_FRAGS > 16) 2741#if (MAX_SKB_FRAGS > 16)
2743 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 2742 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2744#elif (MAX_SKB_FRAGS > 8) 2743#elif (MAX_SKB_FRAGS > 8)
@@ -2976,19 +2975,28 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
2976 rx_ring->rx_buf_len = rx_buf_len; 2975 rx_ring->rx_buf_len = rx_buf_len;
2977 2976
2978 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) 2977 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
2979 rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED; 2978 set_ring_ps_enabled(rx_ring);
2979 else
2980 clear_ring_ps_enabled(rx_ring);
2981
2982 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
2983 set_ring_rsc_enabled(rx_ring);
2980 else 2984 else
2981 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 2985 clear_ring_rsc_enabled(rx_ring);
2982 2986
2983#ifdef IXGBE_FCOE 2987#ifdef IXGBE_FCOE
2984 if (netdev->features & NETIF_F_FCOE_MTU) { 2988 if (netdev->features & NETIF_F_FCOE_MTU) {
2985 struct ixgbe_ring_feature *f; 2989 struct ixgbe_ring_feature *f;
2986 f = &adapter->ring_feature[RING_F_FCOE]; 2990 f = &adapter->ring_feature[RING_F_FCOE];
2987 if ((i >= f->mask) && (i < f->mask + f->indices)) { 2991 if ((i >= f->mask) && (i < f->mask + f->indices)) {
2988 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 2992 clear_ring_ps_enabled(rx_ring);
2989 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) 2993 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2990 rx_ring->rx_buf_len = 2994 rx_ring->rx_buf_len =
2991 IXGBE_FCOE_JUMBO_FRAME_SIZE; 2995 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2996 } else if (!ring_is_rsc_enabled(rx_ring) &&
2997 !ring_is_ps_enabled(rx_ring)) {
2998 rx_ring->rx_buf_len =
2999 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2992 } 3000 }
2993 } 3001 }
2994#endif /* IXGBE_FCOE */ 3002#endif /* IXGBE_FCOE */
@@ -5729,8 +5737,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
5729 5737
5730 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 5738 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5731 for (i = 0; i < adapter->num_tx_queues; i++) 5739 for (i = 0; i < adapter->num_tx_queues; i++)
5732 set_bit(__IXGBE_FDIR_INIT_DONE, 5740 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
5733 &(adapter->tx_ring[i]->reinit_state)); 5741 &(adapter->tx_ring[i]->state));
5734 } else { 5742 } else {
5735 e_err(probe, "failed to finish FDIR re-initialization, " 5743 e_err(probe, "failed to finish FDIR re-initialization, "
5736 "ignored adding FDIR ATR filters\n"); 5744 "ignored adding FDIR ATR filters\n");
@@ -5816,7 +5824,10 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5816 netif_carrier_on(netdev); 5824 netif_carrier_on(netdev);
5817 } else { 5825 } else {
5818 /* Force detection of hung controller */ 5826 /* Force detection of hung controller */
5819 adapter->detect_tx_hung = true; 5827 for (i = 0; i < adapter->num_tx_queues; i++) {
5828 tx_ring = adapter->tx_ring[i];
5829 set_check_for_tx_hang(tx_ring);
5830 }
5820 } 5831 }
5821 } else { 5832 } else {
5822 adapter->link_up = false; 5833 adapter->link_up = false;
@@ -6434,8 +6445,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6434 if (tx_ring->atr_sample_rate) { 6445 if (tx_ring->atr_sample_rate) {
6435 ++tx_ring->atr_count; 6446 ++tx_ring->atr_count;
6436 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && 6447 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
6437 test_bit(__IXGBE_FDIR_INIT_DONE, 6448 test_bit(__IXGBE_TX_FDIR_INIT_DONE,
6438 &tx_ring->reinit_state)) { 6449 &tx_ring->state)) {
6439 ixgbe_atr(adapter, skb, tx_ring->queue_index, 6450 ixgbe_atr(adapter, skb, tx_ring->queue_index,
6440 tx_flags, protocol); 6451 tx_flags, protocol);
6441 tx_ring->atr_count = 0; 6452 tx_ring->atr_count = 0;