aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bonding
diff options
context:
space:
mode:
authorNeil Horman <nhorman@tuxdriver.com>2011-06-03 06:35:52 -0400
committerDavid S. Miller <davem@davemloft.net>2011-06-05 17:31:25 -0400
commit374eeb5a9d77ea719c5c46f4d70226623f4528ce (patch)
tree2f9fdad71c04da1fe5cdb12433238d24d0a0b738 /drivers/net/bonding
parent5b446c6a7179513edcb34706088c4ce901b9a039 (diff)
bonding: reset queue mapping prior to transmission to physical device (v5)
The bonding driver is multiqueue enabled, in which each queue represents a slave to enable optional steering of output frames to given slaves against the default output policy. However, it needs to reset the skb->queue_mapping prior to queuing to the physical device or the physical slave (if it is multiqueue) could wind up transmitting on an unintended tx queue Change Notes: v2) Based on first pass review, updated the patch to restore the origional queue mapping that was found in bond_select_queue, rather than simply resetting to zero. This preserves the value of queue_mapping when it was set on receive in the forwarding case which is desireable. v3) Fixed spelling an casting error in skb->cb v4) fixed to store raw queue_mapping to avoid double decrement v5) Eric D requested that ->cb access be wrapped in a macro. Signed-off-by: Neil Horman <nhorman@tuxdriver.com> CC: Jay Vosburgh <fubar@us.ibm.com> CC: Andy Gospodarek <andy@greyhouse.net> CC: "David S. Miller" <davem@davemloft.net> Signed-off-by: Jay Vosburgh <fubar@us.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bonding')
-rw-r--r--drivers/net/bonding/bond_main.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 17b4dd94da90..652b30e525d0 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -388,6 +388,8 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
388 return next; 388 return next;
389} 389}
390 390
391#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
392
391/** 393/**
392 * bond_dev_queue_xmit - Prepare skb for xmit. 394 * bond_dev_queue_xmit - Prepare skb for xmit.
393 * 395 *
@@ -400,6 +402,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
400{ 402{
401 skb->dev = slave_dev; 403 skb->dev = slave_dev;
402 skb->priority = 1; 404 skb->priority = 1;
405
406 skb->queue_mapping = bond_queue_mapping(skb);
407
403 if (unlikely(netpoll_tx_running(slave_dev))) 408 if (unlikely(netpoll_tx_running(slave_dev)))
404 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); 409 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
405 else 410 else
@@ -4206,6 +4211,7 @@ static inline int bond_slave_override(struct bonding *bond,
4206 return res; 4211 return res;
4207} 4212}
4208 4213
4214
4209static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) 4215static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
4210{ 4216{
4211 /* 4217 /*
@@ -4216,6 +4222,11 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
4216 */ 4222 */
4217 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; 4223 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
4218 4224
4225 /*
4226 * Save the original txq to restore before passing to the driver
4227 */
4228 bond_queue_mapping(skb) = skb->queue_mapping;
4229
4219 if (unlikely(txq >= dev->real_num_tx_queues)) { 4230 if (unlikely(txq >= dev->real_num_tx_queues)) {
4220 do { 4231 do {
4221 txq -= dev->real_num_tx_queues; 4232 txq -= dev->real_num_tx_queues;