aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe.h
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2010-11-16 22:26:56 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2010-11-16 22:26:56 -0500
commit7d637bcc8f461f19e1d018078792ec0cd9b07b1d (patch)
tree05b890e7747abfdc0f4f60d88aa84676af39bb48 /drivers/net/ixgbe/ixgbe.h
parent33cf09c9586a0dce472ecd2aac13e8140c9ed1a1 (diff)
ixgbe: add a state flags to ring
This change adds a set of state flags to the rings that allow them to independently function allowing for features like RSC, packet split, and TX hang detection to be done per ring instead of for the entire device. This is accomplished by re-purposing the flow director reinit_state member and making it a global state instead since a long for a single bit flag is a bit wasteful. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe.h')
-rw-r--r--drivers/net/ixgbe/ixgbe.h44
1 files changed, 33 insertions, 11 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index e87b0ffd5832..160ce9234546 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -159,6 +159,31 @@ struct ixgbe_rx_queue_stats {
159 u64 alloc_rx_buff_failed; 159 u64 alloc_rx_buff_failed;
160}; 160};
161 161
162enum ixbge_ring_state_t {
163 __IXGBE_TX_FDIR_INIT_DONE,
164 __IXGBE_TX_DETECT_HANG,
165 __IXGBE_RX_PS_ENABLED,
166 __IXGBE_RX_RSC_ENABLED,
167};
168
169#define ring_is_ps_enabled(ring) \
170 test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
171#define set_ring_ps_enabled(ring) \
172 set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
173#define clear_ring_ps_enabled(ring) \
174 clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
175#define check_for_tx_hang(ring) \
176 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
177#define set_check_for_tx_hang(ring) \
178 set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
179#define clear_check_for_tx_hang(ring) \
180 clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
181#define ring_is_rsc_enabled(ring) \
182 test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
183#define set_ring_rsc_enabled(ring) \
184 set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
185#define clear_ring_rsc_enabled(ring) \
186 clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
162struct ixgbe_ring { 187struct ixgbe_ring {
163 void *desc; /* descriptor ring memory */ 188 void *desc; /* descriptor ring memory */
164 struct device *dev; /* device for DMA mapping */ 189 struct device *dev; /* device for DMA mapping */
@@ -167,6 +192,7 @@ struct ixgbe_ring {
167 struct ixgbe_tx_buffer *tx_buffer_info; 192 struct ixgbe_tx_buffer *tx_buffer_info;
168 struct ixgbe_rx_buffer *rx_buffer_info; 193 struct ixgbe_rx_buffer *rx_buffer_info;
169 }; 194 };
195 unsigned long state;
170 u8 atr_sample_rate; 196 u8 atr_sample_rate;
171 u8 atr_count; 197 u8 atr_count;
172 u16 count; /* amount of descriptors */ 198 u16 count; /* amount of descriptors */
@@ -175,28 +201,25 @@ struct ixgbe_ring {
175 u16 next_to_clean; 201 u16 next_to_clean;
176 202
177 u8 queue_index; /* needed for multiqueue queue management */ 203 u8 queue_index; /* needed for multiqueue queue management */
204 u8 reg_idx; /* holds the special value that gets
205 * the hardware register offset
206 * associated with this ring, which is
207 * different for DCB and RSS modes
208 */
209
210 u16 work_limit; /* max work per interrupt */
178 211
179#define IXGBE_RING_RX_PS_ENABLED (u8)(1)
180 u8 flags; /* per ring feature flags */
181 u8 __iomem *tail; 212 u8 __iomem *tail;
182 213
183 unsigned int total_bytes; 214 unsigned int total_bytes;
184 unsigned int total_packets; 215 unsigned int total_packets;
185 216
186 u16 work_limit; /* max work per interrupt */
187 u16 reg_idx; /* holds the special value that gets
188 * the hardware register offset
189 * associated with this ring, which is
190 * different for DCB and RSS modes
191 */
192
193 struct ixgbe_queue_stats stats; 217 struct ixgbe_queue_stats stats;
194 struct u64_stats_sync syncp; 218 struct u64_stats_sync syncp;
195 union { 219 union {
196 struct ixgbe_tx_queue_stats tx_stats; 220 struct ixgbe_tx_queue_stats tx_stats;
197 struct ixgbe_rx_queue_stats rx_stats; 221 struct ixgbe_rx_queue_stats rx_stats;
198 }; 222 };
199 unsigned long reinit_state;
200 int numa_node; 223 int numa_node;
201 unsigned int size; /* length in bytes */ 224 unsigned int size; /* length in bytes */
202 dma_addr_t dma; /* phys. address of descriptor ring */ 225 dma_addr_t dma; /* phys. address of descriptor ring */
@@ -441,7 +464,6 @@ enum ixbge_state_t {
441 __IXGBE_TESTING, 464 __IXGBE_TESTING,
442 __IXGBE_RESETTING, 465 __IXGBE_RESETTING,
443 __IXGBE_DOWN, 466 __IXGBE_DOWN,
444 __IXGBE_FDIR_INIT_DONE,
445 __IXGBE_SFP_MODULE_NOT_FOUND 467 __IXGBE_SFP_MODULE_NOT_FOUND
446}; 468};
447 469