aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-02-19 23:39:23 -0500
committerDavid S. Miller <davem@davemloft.net>2009-02-20 03:22:53 -0500
commit1bfaf07bb1d7201d3c6cb984bccd9c2416e19b6c (patch)
tree7ec3c152351adcdac3afa62cbcbd5a31b710db94 /drivers/net
parent46544258de71d7e32342ee71a25146ec6e2e6e47 (diff)
igb: add vfs_allocated_count as placeholder for number of vfs
This is the first step in supporting sr-iov. The vf_allocated_count value will be 0 until we actually have vfs present. In the meantime it represents an offset value for the start of the queues. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/igb/igb.h9
-rw-r--r--drivers/net/igb/igb_main.c17
2 files changed, 17 insertions, 9 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 49fc0daf45af..3d3e5f6cd313 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -57,8 +57,10 @@ struct igb_adapter;
57#define IGB_MIN_ITR_USECS 10 57#define IGB_MIN_ITR_USECS 10
58 58
59/* Transmit and receive queues */ 59/* Transmit and receive queues */
60#define IGB_MAX_RX_QUEUES 4 60#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \
61#define IGB_MAX_TX_QUEUES 4 61 (adapter->vfs_allocated_count > 6 ? 1 : 2) : 4)
62#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES
63#define IGB_ABS_MAX_TX_QUEUES 4
62 64
63/* RX descriptor control thresholds. 65/* RX descriptor control thresholds.
64 * PTHRESH - MAC will consider prefetch if it has fewer than this number of 66 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
@@ -267,9 +269,10 @@ struct igb_adapter {
267 unsigned int flags; 269 unsigned int flags;
268 u32 eeprom_wol; 270 u32 eeprom_wol;
269 271
270 struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES]; 272 struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES];
271 unsigned int tx_ring_count; 273 unsigned int tx_ring_count;
272 unsigned int rx_ring_count; 274 unsigned int rx_ring_count;
275 unsigned int vfs_allocated_count;
273}; 276};
274 277
275#define IGB_FLAG_HAS_MSI (1 << 0) 278#define IGB_FLAG_HAS_MSI (1 << 0)
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 5a6fff622e4f..0dcc0c109b9d 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -312,6 +312,7 @@ module_exit(igb_exit_module);
312static void igb_cache_ring_register(struct igb_adapter *adapter) 312static void igb_cache_ring_register(struct igb_adapter *adapter)
313{ 313{
314 int i; 314 int i;
315 unsigned int rbase_offset = adapter->vfs_allocated_count;
315 316
316 switch (adapter->hw.mac.type) { 317 switch (adapter->hw.mac.type) {
317 case e1000_82576: 318 case e1000_82576:
@@ -321,9 +322,11 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
321 * and continue consuming queues in the same sequence 322 * and continue consuming queues in the same sequence
322 */ 323 */
323 for (i = 0; i < adapter->num_rx_queues; i++) 324 for (i = 0; i < adapter->num_rx_queues; i++)
324 adapter->rx_ring[i].reg_idx = Q_IDX_82576(i); 325 adapter->rx_ring[i].reg_idx = rbase_offset +
326 Q_IDX_82576(i);
325 for (i = 0; i < adapter->num_tx_queues; i++) 327 for (i = 0; i < adapter->num_tx_queues; i++)
326 adapter->tx_ring[i].reg_idx = Q_IDX_82576(i); 328 adapter->tx_ring[i].reg_idx = rbase_offset +
329 Q_IDX_82576(i);
327 break; 330 break;
328 case e1000_82575: 331 case e1000_82575:
329 default: 332 default:
@@ -423,7 +426,7 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
423 a vector number along with a "valid" bit. Sadly, the layout 426 a vector number along with a "valid" bit. Sadly, the layout
424 of the table is somewhat counterintuitive. */ 427 of the table is somewhat counterintuitive. */
425 if (rx_queue > IGB_N0_QUEUE) { 428 if (rx_queue > IGB_N0_QUEUE) {
426 index = (rx_queue >> 1); 429 index = (rx_queue >> 1) + adapter->vfs_allocated_count;
427 ivar = array_rd32(E1000_IVAR0, index); 430 ivar = array_rd32(E1000_IVAR0, index);
428 if (rx_queue & 0x1) { 431 if (rx_queue & 0x1) {
429 /* vector goes into third byte of register */ 432 /* vector goes into third byte of register */
@@ -438,7 +441,7 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
438 array_wr32(E1000_IVAR0, index, ivar); 441 array_wr32(E1000_IVAR0, index, ivar);
439 } 442 }
440 if (tx_queue > IGB_N0_QUEUE) { 443 if (tx_queue > IGB_N0_QUEUE) {
441 index = (tx_queue >> 1); 444 index = (tx_queue >> 1) + adapter->vfs_allocated_count;
442 ivar = array_rd32(E1000_IVAR0, index); 445 ivar = array_rd32(E1000_IVAR0, index);
443 if (tx_queue & 0x1) { 446 if (tx_queue & 0x1) {
444 /* vector goes into high byte of register */ 447 /* vector goes into high byte of register */
@@ -1157,7 +1160,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1157 pci_save_state(pdev); 1160 pci_save_state(pdev);
1158 1161
1159 err = -ENOMEM; 1162 err = -ENOMEM;
1160 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_MAX_TX_QUEUES); 1163 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1164 IGB_ABS_MAX_TX_QUEUES);
1161 if (!netdev) 1165 if (!netdev)
1162 goto err_alloc_etherdev; 1166 goto err_alloc_etherdev;
1163 1167
@@ -2029,6 +2033,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
2029 writel(reta.dword, 2033 writel(reta.dword,
2030 hw->hw_addr + E1000_RETA(0) + (j & ~3)); 2034 hw->hw_addr + E1000_RETA(0) + (j & ~3));
2031 } 2035 }
2036
2032 mrqc = E1000_MRQC_ENABLE_RSS_4Q; 2037 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2033 2038
2034 /* Fill out hash function seeds */ 2039 /* Fill out hash function seeds */
@@ -3150,7 +3155,7 @@ static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
3150 struct igb_ring *tx_ring; 3155 struct igb_ring *tx_ring;
3151 3156
3152 int r_idx = 0; 3157 int r_idx = 0;
3153 r_idx = skb->queue_mapping & (IGB_MAX_TX_QUEUES - 1); 3158 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
3154 tx_ring = adapter->multi_tx_table[r_idx]; 3159 tx_ring = adapter->multi_tx_table[r_idx];
3155 3160
3156 /* This goes back to the question of how to logically map a tx queue 3161 /* This goes back to the question of how to logically map a tx queue