diff options
author | Yi Zou <yi.zou@intel.com> | 2009-12-03 06:32:44 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-12-03 18:43:21 -0500 |
commit | 5f715823a0f54ed010af9a27677ff8e992d745e8 (patch) | |
tree | 7e70ebe8edc51560c9b2c6399f1a12a0a1343059 /drivers/net/ixgbe | |
parent | 61a0f421ceccd52d0d893daf743b04c225ba5ef0 (diff) |
ixgbe: select FCoE Tx queue in ndo_select_queue
This removes the Tx queue selection for FCoE traffic from ixgbe_xmit_frame()
and does it in the ndo_select_queue() call, moving all Tx queue selection
into a single routine.
Signed-off-by: Yi Zou <yi.zou@intel.com>
Acked-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 24 |
1 files changed, 14 insertions, 10 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 09990ed105f..e3dc68ba4b7 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -5286,10 +5286,19 @@ static int ixgbe_maybe_stop_tx(struct net_device *netdev, | |||
5286 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | 5286 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) |
5287 | { | 5287 | { |
5288 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 5288 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
5289 | int txq = smp_processor_id(); | ||
5289 | 5290 | ||
5290 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) | 5291 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) |
5291 | return smp_processor_id(); | 5292 | return txq; |
5292 | 5293 | ||
5294 | #ifdef IXGBE_FCOE | ||
5295 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && | ||
5296 | (skb->protocol == htons(ETH_P_FCOE))) { | ||
5297 | txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); | ||
5298 | txq += adapter->ring_feature[RING_F_FCOE].mask; | ||
5299 | return txq; | ||
5300 | } | ||
5301 | #endif | ||
5293 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | 5302 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) |
5294 | return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13; | 5303 | return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13; |
5295 | 5304 | ||
@@ -5304,7 +5313,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, | |||
5304 | unsigned int first; | 5313 | unsigned int first; |
5305 | unsigned int tx_flags = 0; | 5314 | unsigned int tx_flags = 0; |
5306 | u8 hdr_len = 0; | 5315 | u8 hdr_len = 0; |
5307 | int r_idx = 0, tso; | 5316 | int tso; |
5308 | int count = 0; | 5317 | int count = 0; |
5309 | unsigned int f; | 5318 | unsigned int f; |
5310 | 5319 | ||
@@ -5312,13 +5321,13 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, | |||
5312 | tx_flags |= vlan_tx_tag_get(skb); | 5321 | tx_flags |= vlan_tx_tag_get(skb); |
5313 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 5322 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
5314 | tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; | 5323 | tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; |
5315 | tx_flags |= (skb->queue_mapping << 13); | 5324 | tx_flags |= ((skb->queue_mapping & 0x7) << 13); |
5316 | } | 5325 | } |
5317 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | 5326 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; |
5318 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | 5327 | tx_flags |= IXGBE_TX_FLAGS_VLAN; |
5319 | } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 5328 | } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
5320 | if (skb->priority != TC_PRIO_CONTROL) { | 5329 | if (skb->priority != TC_PRIO_CONTROL) { |
5321 | tx_flags |= (skb->queue_mapping << 13); | 5330 | tx_flags |= ((skb->queue_mapping & 0x7) << 13); |
5322 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | 5331 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; |
5323 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | 5332 | tx_flags |= IXGBE_TX_FLAGS_VLAN; |
5324 | } else { | 5333 | } else { |
@@ -5327,8 +5336,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, | |||
5327 | } | 5336 | } |
5328 | } | 5337 | } |
5329 | 5338 | ||
5330 | r_idx = skb->queue_mapping; | 5339 | tx_ring = &adapter->tx_ring[skb->queue_mapping]; |
5331 | tx_ring = &adapter->tx_ring[r_idx]; | ||
5332 | 5340 | ||
5333 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && | 5341 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && |
5334 | (skb->protocol == htons(ETH_P_FCOE))) { | 5342 | (skb->protocol == htons(ETH_P_FCOE))) { |
@@ -5340,10 +5348,6 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, | |||
5340 | tx_flags |= ((adapter->fcoe.up << 13) | 5348 | tx_flags |= ((adapter->fcoe.up << 13) |
5341 | << IXGBE_TX_FLAGS_VLAN_SHIFT); | 5349 | << IXGBE_TX_FLAGS_VLAN_SHIFT); |
5342 | #endif | 5350 | #endif |
5343 | r_idx = smp_processor_id(); | ||
5344 | r_idx &= (adapter->ring_feature[RING_F_FCOE].indices - 1); | ||
5345 | r_idx += adapter->ring_feature[RING_F_FCOE].mask; | ||
5346 | tx_ring = &adapter->tx_ring[r_idx]; | ||
5347 | #endif | 5351 | #endif |
5348 | } | 5352 | } |
5349 | /* four things can cause us to need a context descriptor */ | 5353 | /* four things can cause us to need a context descriptor */ |