aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igb/igb_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2011-08-26 03:44:10 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-09-20 02:59:34 -0400
commit1cc3bd879288c7f2f47eaebdde38ac5db4bfd082 (patch)
treee25e92ded629735f516b25ba934ad70d1536d89e /drivers/net/ethernet/intel/igb/igb_main.c
parent6013690699dd8316f4018324a6c2d90377d50d2c (diff)
igb: Remove multi_tx_table and simplify igb_xmit_frame
Instead of using the multi_tx_table to map possible Tx queues to Tx rings we can just do simple subtraction for the unlikely event that the Tx queue provided exceeds the number of Tx rings. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/igb/igb_main.c')
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c36
1 files changed, 22 insertions, 14 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 55d643180bfc..7ad25e867add 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1875,7 +1875,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1875 1875
1876 err = -ENOMEM; 1876 err = -ENOMEM;
1877 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), 1877 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1878 IGB_ABS_MAX_TX_QUEUES); 1878 IGB_MAX_TX_QUEUES);
1879 if (!netdev) 1879 if (!netdev)
1880 goto err_alloc_etherdev; 1880 goto err_alloc_etherdev;
1881 1881
@@ -2620,10 +2620,6 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2620 } 2620 }
2621 } 2621 }
2622 2622
2623 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
2624 int r_idx = i % adapter->num_tx_queues;
2625 adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
2626 }
2627 return err; 2623 return err;
2628} 2624}
2629 2625
@@ -4363,12 +4359,21 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4363 return NETDEV_TX_OK; 4359 return NETDEV_TX_OK;
4364} 4360}
4365 4361
4362static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4363 struct sk_buff *skb)
4364{
4365 unsigned int r_idx = skb->queue_mapping;
4366
4367 if (r_idx >= adapter->num_tx_queues)
4368 r_idx = r_idx % adapter->num_tx_queues;
4369
4370 return adapter->tx_ring[r_idx];
4371}
4372
4366static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, 4373static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4367 struct net_device *netdev) 4374 struct net_device *netdev)
4368{ 4375{
4369 struct igb_adapter *adapter = netdev_priv(netdev); 4376 struct igb_adapter *adapter = netdev_priv(netdev);
4370 struct igb_ring *tx_ring;
4371 int r_idx = 0;
4372 4377
4373 if (test_bit(__IGB_DOWN, &adapter->state)) { 4378 if (test_bit(__IGB_DOWN, &adapter->state)) {
4374 dev_kfree_skb_any(skb); 4379 dev_kfree_skb_any(skb);
@@ -4380,14 +4385,17 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4380 return NETDEV_TX_OK; 4385 return NETDEV_TX_OK;
4381 } 4386 }
4382 4387
4383 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1); 4388 /*
4384 tx_ring = adapter->multi_tx_table[r_idx]; 4389 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4390 * in order to meet this minimum size requirement.
4391 */
4392 if (skb->len < 17) {
4393 if (skb_padto(skb, 17))
4394 return NETDEV_TX_OK;
4395 skb->len = 17;
4396 }
4385 4397
4386 /* This goes back to the question of how to logically map a tx queue 4398 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
4387 * to a flow. Right now, performance is impacted slightly negatively
4388 * if using multiple tx queues. If the stack breaks away from a
4389 * single qdisc implementation, we can look at this again. */
4390 return igb_xmit_frame_ring(skb, tx_ring);
4391} 4399}
4392 4400
4393/** 4401/**