aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bonding/bond_alb.c
diff options
context:
space:
mode:
authorMahesh Bandewar <maheshb@google.com>2014-04-22 19:30:22 -0400
committerDavid S. Miller <davem@davemloft.net>2014-04-24 13:04:34 -0400
commite9f0fb88493570200b8dc1cc02d3e676412d25bc (patch)
treeedd781d8c00772c53e0736a95cf26f0ef2e620dc /drivers/net/bonding/bond_alb.c
parentf05b42eaa22cd7c6736d31316e6046c5127f8721 (diff)
bonding: Add tlb_dynamic_lb parameter for tlb mode
The aggresive load balancing causes packet re-ordering as active flows are moved from a slave to another within the group. Sometime this aggresive lb is not necessary if the preference is for less re-ordering. This parameter if used with value "0" disables this dynamic flow shuffling minimizing packet re-ordering. Of course the side effect is that it has to live with the static load balancing that the hashing distribution provides. This impact is less severe if the correct xmit-hashing-policy is used for the tlb setup. The default value of the parameter is set to "1" mimicing the earlier behavior. Ran the netperf test with 200 stream for 1 min between two hosts with 4x1G trunk (xmit-lb mode with xmit-policy L3+4) before and after these changes. Following was the command used for those 200 instances - netperf -t TCP_RR -l 60 -s 5 -H <host> -- -r81920,81920 Transactions per second: Before change: 1,367.11 After change: 1,470.65 Change-Id: Ie3f75c77282cf602e83a6e833c6eb164e72a0990 Signed-off-by: Mahesh Bandewar <maheshb@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bonding/bond_alb.c')
-rw-r--r--drivers/net/bonding/bond_alb.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 153232ed4b3f..70de039dad2e 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1356,7 +1356,8 @@ static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
1356 if (!tx_slave) { 1356 if (!tx_slave) {
1357 /* unbalanced or unassigned, send through primary */ 1357 /* unbalanced or unassigned, send through primary */
1358 tx_slave = rcu_dereference(bond->curr_active_slave); 1358 tx_slave = rcu_dereference(bond->curr_active_slave);
1359 bond_info->unbalanced_load += skb->len; 1359 if (bond->params.tlb_dynamic_lb)
1360 bond_info->unbalanced_load += skb->len;
1360 } 1361 }
1361 1362
1362 if (tx_slave && SLAVE_IS_OK(tx_slave)) { 1363 if (tx_slave && SLAVE_IS_OK(tx_slave)) {
@@ -1369,7 +1370,7 @@ static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
1369 goto out; 1370 goto out;
1370 } 1371 }
1371 1372
1372 if (tx_slave) { 1373 if (tx_slave && bond->params.tlb_dynamic_lb) {
1373 _lock_tx_hashtbl(bond); 1374 _lock_tx_hashtbl(bond);
1374 __tlb_clear_slave(bond, tx_slave, 0); 1375 __tlb_clear_slave(bond, tx_slave, 0);
1375 _unlock_tx_hashtbl(bond); 1376 _unlock_tx_hashtbl(bond);
@@ -1399,11 +1400,21 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1399 /* In case of IPX, it will falback to L2 hash */ 1400 /* In case of IPX, it will falback to L2 hash */
1400 case htons(ETH_P_IPV6): 1401 case htons(ETH_P_IPV6):
1401 hash_index = bond_xmit_hash(bond, skb); 1402 hash_index = bond_xmit_hash(bond, skb);
1402 tx_slave = tlb_choose_channel(bond, hash_index & 0xFF, skb->len); 1403 if (bond->params.tlb_dynamic_lb) {
1404 tx_slave = tlb_choose_channel(bond,
1405 hash_index & 0xFF,
1406 skb->len);
1407 } else {
1408 struct list_head *iter;
1409 int idx = hash_index % bond->slave_cnt;
1410
1411 bond_for_each_slave_rcu(bond, tx_slave, iter)
1412 if (--idx < 0)
1413 break;
1414 }
1403 break; 1415 break;
1404 } 1416 }
1405 } 1417 }
1406
1407 return bond_do_alb_xmit(skb, bond, tx_slave); 1418 return bond_do_alb_xmit(skb, bond, tx_slave);
1408} 1419}
1409 1420