diff options
author | PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com> | 2010-02-03 09:19:12 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-02-03 22:48:35 -0500 |
commit | 4a0b9ca015bae64df7d97c9e0a1d33159b36e69f (patch) | |
tree | 5289dc2752eaeec08282a94008ff76c980a3f645 /drivers/net/ixgbe/ixgbe.h | |
parent | 1a6c14a2c7c313c584f26730e67f062f474bb744 (diff) |
ixgbe: Make descriptor ring allocations NUMA-aware
This patch allocates the ring structures themselves on each
NUMA node along with the buffer_info structures. This way we
don't allocate the entire ring memory on a single node in one
big block, thus reducing NUMA node memory crosstalk.
Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe.h')
-rw-r--r-- | drivers/net/ixgbe/ixgbe.h | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 33b79e812b4d..bffbe0d52d33 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -175,6 +175,7 @@ struct ixgbe_ring { | |||
175 | 175 | ||
176 | struct ixgbe_queue_stats stats; | 176 | struct ixgbe_queue_stats stats; |
177 | unsigned long reinit_state; | 177 | unsigned long reinit_state; |
178 | int numa_node; | ||
178 | u64 rsc_count; /* stat for coalesced packets */ | 179 | u64 rsc_count; /* stat for coalesced packets */ |
179 | u64 rsc_flush; /* stats for flushed packets */ | 180 | u64 rsc_flush; /* stats for flushed packets */ |
180 | u32 restart_queue; /* track tx queue restarts */ | 181 | u32 restart_queue; /* track tx queue restarts */ |
@@ -293,7 +294,7 @@ struct ixgbe_adapter { | |||
293 | u16 eitr_high; | 294 | u16 eitr_high; |
294 | 295 | ||
295 | /* TX */ | 296 | /* TX */ |
296 | struct ixgbe_ring *tx_ring ____cacheline_aligned_in_smp; /* One per active queue */ | 297 | struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; |
297 | int num_tx_queues; | 298 | int num_tx_queues; |
298 | u32 tx_timeout_count; | 299 | u32 tx_timeout_count; |
299 | bool detect_tx_hung; | 300 | bool detect_tx_hung; |
@@ -302,7 +303,7 @@ struct ixgbe_adapter { | |||
302 | u64 lsc_int; | 303 | u64 lsc_int; |
303 | 304 | ||
304 | /* RX */ | 305 | /* RX */ |
305 | struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */ | 306 | struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp; |
306 | int num_rx_queues; | 307 | int num_rx_queues; |
307 | int num_rx_pools; /* == num_rx_queues in 82598 */ | 308 | int num_rx_pools; /* == num_rx_queues in 82598 */ |
308 | int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ | 309 | int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ |