aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe.h
diff options
context:
space:
mode:
authorAyyappan Veeraiyan <ayyappan.veeraiyan@intel.com>2008-03-03 18:03:45 -0500
committerJeff Garzik <jeff@garzik.org>2008-03-17 07:49:28 -0400
commit021230d40ae0e6508d6c717b6e0d6d81cd77ac25 (patch)
tree44670c729adad8c4def18e4ff417542b5ad75b13 /drivers/net/ixgbe/ixgbe.h
parent53e7c46b0680ccc3ac67a2b8cd7f050569836e44 (diff)
ixgbe: Introduce MSI-X queue vector code
This code abstracts the per-queue MSI-X interrupt vector into a queue vector layer. This abstraction is needed since there can be many more queues than available MSI-X vectors in a machine. The MSI-X irq vectors are remapped to a shared queue vector which can point to several (both RX and TX) hardware queues. The NAPI algorithm then cleans the appropriate ring/queues on interrupt or poll. The remapping is a delicate and complex calculation to make sure that we're not unbalancing the irq load, and spreads the irqs as much as possible, and may combine RX and TX flows onto the same queue vector. This effectively enables receive flow hashing across vectors and helps irq load balance across CPUs. Signed-off-by: Ayyappan Veeraiyan <ayyappan.veeraiyan@intel.com> Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com> Acked-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Acked-by: Waskiewicz Jr, Peter P <peter.p.waskiewicz.jr@intel.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe.h')
-rw-r--r--drivers/net/ixgbe/ixgbe.h69
1 files changed, 58 insertions, 11 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index d0bf206632ca..20774772b608 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -120,7 +120,6 @@ struct ixgbe_queue_stats {
120}; 120};
121 121
122struct ixgbe_ring { 122struct ixgbe_ring {
123 struct ixgbe_adapter *adapter; /* backlink */
124 void *desc; /* descriptor ring memory */ 123 void *desc; /* descriptor ring memory */
125 dma_addr_t dma; /* phys. address of descriptor ring */ 124 dma_addr_t dma; /* phys. address of descriptor ring */
126 unsigned int size; /* length in bytes */ 125 unsigned int size; /* length in bytes */
@@ -128,6 +127,7 @@ struct ixgbe_ring {
128 unsigned int next_to_use; 127 unsigned int next_to_use;
129 unsigned int next_to_clean; 128 unsigned int next_to_clean;
130 129
130 int queue_index; /* needed for multiqueue queue management */
131 union { 131 union {
132 struct ixgbe_tx_buffer *tx_buffer_info; 132 struct ixgbe_tx_buffer *tx_buffer_info;
133 struct ixgbe_rx_buffer *rx_buffer_info; 133 struct ixgbe_rx_buffer *rx_buffer_info;
@@ -137,7 +137,13 @@ struct ixgbe_ring {
137 u16 tail; 137 u16 tail;
138 138
139 139
140 u16 reg_idx; /* holds the special value that gets the hardware register
141 * offset associated with this ring, which is different
142 * for DCE and RSS modes */
140 struct ixgbe_queue_stats stats; 143 struct ixgbe_queue_stats stats;
144 u8 v_idx; /* maps directly to the index for this ring in the hardware
145 * vector array, can also be used for finding the bit in EICR
146 * and friends that represents the vector for this ring */
141 147
142 u32 eims_value; 148 u32 eims_value;
143 u16 itr_register; 149 u16 itr_register;
@@ -146,6 +152,31 @@ struct ixgbe_ring {
146 u16 work_limit; /* max work per interrupt */ 152 u16 work_limit; /* max work per interrupt */
147}; 153};
148 154
155#define RING_F_VMDQ 1
156#define RING_F_RSS 2
157#define IXGBE_MAX_RSS_INDICES 16
158#define IXGBE_MAX_VMDQ_INDICES 16
159struct ixgbe_ring_feature {
160 int indices;
161 int mask;
162};
163
164#define MAX_RX_QUEUES 64
165#define MAX_TX_QUEUES 32
166
167/* MAX_MSIX_Q_VECTORS of these are allocated,
168 * but we only use one per queue-specific vector.
169 */
170struct ixgbe_q_vector {
171 struct ixgbe_adapter *adapter;
172 struct napi_struct napi;
173 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
174 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
175 u8 rxr_count; /* Rx ring count assigned to this vector */
176 u8 txr_count; /* Tx ring count assigned to this vector */
177 u32 eitr;
178};
179
149/* Helper macros to switch between ints/sec and what the register uses. 180/* Helper macros to switch between ints/sec and what the register uses.
150 * And yes, it's the same math going both ways. 181 * And yes, it's the same math going both ways.
151 */ 182 */
@@ -166,6 +197,14 @@ struct ixgbe_ring {
166 197
167#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 198#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
168 199
200#define OTHER_VECTOR 1
201#define NON_Q_VECTORS (OTHER_VECTOR)
202
203#define MAX_MSIX_Q_VECTORS 16
204#define MIN_MSIX_Q_VECTORS 2
205#define MAX_MSIX_COUNT (MAX_MSIX_Q_VECTORS + NON_Q_VECTORS)
206#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
207
169/* board specific private data structure */ 208/* board specific private data structure */
170struct ixgbe_adapter { 209struct ixgbe_adapter {
171 struct timer_list watchdog_timer; 210 struct timer_list watchdog_timer;
@@ -173,10 +212,11 @@ struct ixgbe_adapter {
173 u16 bd_number; 212 u16 bd_number;
174 u16 rx_buf_len; 213 u16 rx_buf_len;
175 struct work_struct reset_task; 214 struct work_struct reset_task;
215 struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
216 char name[MAX_MSIX_COUNT][IFNAMSIZ + 5];
176 217
177 /* TX */ 218 /* TX */
178 struct ixgbe_ring *tx_ring; /* One per active queue */ 219 struct ixgbe_ring *tx_ring; /* One per active queue */
179 struct napi_struct napi;
180 u64 restart_queue; 220 u64 restart_queue;
181 u64 lsc_int; 221 u64 lsc_int;
182 u64 hw_tso_ctxt; 222 u64 hw_tso_ctxt;
@@ -192,22 +232,26 @@ struct ixgbe_adapter {
192 u64 non_eop_descs; 232 u64 non_eop_descs;
193 int num_tx_queues; 233 int num_tx_queues;
194 int num_rx_queues; 234 int num_rx_queues;
235 int num_msix_vectors;
236 struct ixgbe_ring_feature ring_feature[3];
195 struct msix_entry *msix_entries; 237 struct msix_entry *msix_entries;
196 238
197 u64 rx_hdr_split; 239 u64 rx_hdr_split;
198 u32 alloc_rx_page_failed; 240 u32 alloc_rx_page_failed;
199 u32 alloc_rx_buff_failed; 241 u32 alloc_rx_buff_failed;
200 242
243 /* Some features need tri-state capability,
244 * thus the additional *_CAPABLE flags.
245 */
201 u32 flags; 246 u32 flags;
202#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1) 247#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1 << 0)
203#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) 248#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
204#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 2) 249#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 2)
205#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3) 250#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
206#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4) 251#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
207 252#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
208 /* Interrupt Throttle Rate */ 253#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 6)
209 u32 rx_eitr; 254#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 7)
210 u32 tx_eitr;
211 255
212 /* OS defined structs */ 256 /* OS defined structs */
213 struct net_device *netdev; 257 struct net_device *netdev;
@@ -218,7 +262,10 @@ struct ixgbe_adapter {
218 struct ixgbe_hw hw; 262 struct ixgbe_hw hw;
219 u16 msg_enable; 263 u16 msg_enable;
220 struct ixgbe_hw_stats stats; 264 struct ixgbe_hw_stats stats;
221 char lsc_name[IFNAMSIZ + 5]; 265
266 /* Interrupt Throttle Rate */
267 u32 rx_eitr;
268 u32 tx_eitr;
222 269
223 unsigned long state; 270 unsigned long state;
224 u64 tx_busy; 271 u64 tx_busy;