aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bonding/bond_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bonding/bond_main.c')
-rw-r--r--drivers/net/bonding/bond_main.c66
1 files changed, 46 insertions, 20 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 430c02267d7e..0075514bf32f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1235,6 +1235,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1235 write_lock_bh(&bond->curr_slave_lock); 1235 write_lock_bh(&bond->curr_slave_lock);
1236 } 1236 }
1237 } 1237 }
1238
1239 /* resend IGMP joins since all were sent on curr_active_slave */
1240 if (bond->params.mode == BOND_MODE_ROUNDROBIN) {
1241 bond_resend_igmp_join_requests(bond);
1242 }
1238} 1243}
1239 1244
1240/** 1245/**
@@ -4138,22 +4143,41 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
4138 struct bonding *bond = netdev_priv(bond_dev); 4143 struct bonding *bond = netdev_priv(bond_dev);
4139 struct slave *slave, *start_at; 4144 struct slave *slave, *start_at;
4140 int i, slave_no, res = 1; 4145 int i, slave_no, res = 1;
4146 struct iphdr *iph = ip_hdr(skb);
4141 4147
4142 read_lock(&bond->lock); 4148 read_lock(&bond->lock);
4143 4149
4144 if (!BOND_IS_OK(bond)) 4150 if (!BOND_IS_OK(bond))
4145 goto out; 4151 goto out;
4146
4147 /* 4152 /*
4148 * Concurrent TX may collide on rr_tx_counter; we accept that 4153 * Start with the curr_active_slave that joined the bond as the
4149 * as being rare enough not to justify using an atomic op here 4154 * default for sending IGMP traffic. For failover purposes one
4155 * needs to maintain some consistency for the interface that will
4156 * send the join/membership reports. The curr_active_slave found
4157 * will send all of this type of traffic.
4150 */ 4158 */
4151 slave_no = bond->rr_tx_counter++ % bond->slave_cnt; 4159 if ((iph->protocol == IPPROTO_IGMP) &&
4160 (skb->protocol == htons(ETH_P_IP))) {
4152 4161
4153 bond_for_each_slave(bond, slave, i) { 4162 read_lock(&bond->curr_slave_lock);
4154 slave_no--; 4163 slave = bond->curr_active_slave;
4155 if (slave_no < 0) 4164 read_unlock(&bond->curr_slave_lock);
4156 break; 4165
4166 if (!slave)
4167 goto out;
4168 } else {
4169 /*
4170 * Concurrent TX may collide on rr_tx_counter; we accept
4171 * that as being rare enough not to justify using an
4172 * atomic op here.
4173 */
4174 slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
4175
4176 bond_for_each_slave(bond, slave, i) {
4177 slave_no--;
4178 if (slave_no < 0)
4179 break;
4180 }
4157 } 4181 }
4158 4182
4159 start_at = slave; 4183 start_at = slave;
@@ -4426,6 +4450,14 @@ static const struct net_device_ops bond_netdev_ops = {
4426 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, 4450 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
4427}; 4451};
4428 4452
4453static void bond_destructor(struct net_device *bond_dev)
4454{
4455 struct bonding *bond = netdev_priv(bond_dev);
4456 if (bond->wq)
4457 destroy_workqueue(bond->wq);
4458 free_netdev(bond_dev);
4459}
4460
4429static void bond_setup(struct net_device *bond_dev) 4461static void bond_setup(struct net_device *bond_dev)
4430{ 4462{
4431 struct bonding *bond = netdev_priv(bond_dev); 4463 struct bonding *bond = netdev_priv(bond_dev);
@@ -4446,7 +4478,7 @@ static void bond_setup(struct net_device *bond_dev)
4446 bond_dev->ethtool_ops = &bond_ethtool_ops; 4478 bond_dev->ethtool_ops = &bond_ethtool_ops;
4447 bond_set_mode_ops(bond, bond->params.mode); 4479 bond_set_mode_ops(bond, bond->params.mode);
4448 4480
4449 bond_dev->destructor = free_netdev; 4481 bond_dev->destructor = bond_destructor;
4450 4482
4451 /* Initialize the device options */ 4483 /* Initialize the device options */
4452 bond_dev->tx_queue_len = 0; 4484 bond_dev->tx_queue_len = 0;
@@ -4518,9 +4550,6 @@ static void bond_uninit(struct net_device *bond_dev)
4518 4550
4519 bond_remove_proc_entry(bond); 4551 bond_remove_proc_entry(bond);
4520 4552
4521 if (bond->wq)
4522 destroy_workqueue(bond->wq);
4523
4524 netif_addr_lock_bh(bond_dev); 4553 netif_addr_lock_bh(bond_dev);
4525 bond_mc_list_destroy(bond); 4554 bond_mc_list_destroy(bond);
4526 netif_addr_unlock_bh(bond_dev); 4555 netif_addr_unlock_bh(bond_dev);
@@ -4932,8 +4961,8 @@ int bond_create(struct net *net, const char *name)
4932 bond_setup); 4961 bond_setup);
4933 if (!bond_dev) { 4962 if (!bond_dev) {
4934 pr_err("%s: eek! can't alloc netdev!\n", name); 4963 pr_err("%s: eek! can't alloc netdev!\n", name);
4935 res = -ENOMEM; 4964 rtnl_unlock();
4936 goto out; 4965 return -ENOMEM;
4937 } 4966 }
4938 4967
4939 dev_net_set(bond_dev, net); 4968 dev_net_set(bond_dev, net);
@@ -4942,19 +4971,16 @@ int bond_create(struct net *net, const char *name)
4942 if (!name) { 4971 if (!name) {
4943 res = dev_alloc_name(bond_dev, "bond%d"); 4972 res = dev_alloc_name(bond_dev, "bond%d");
4944 if (res < 0) 4973 if (res < 0)
4945 goto out_netdev; 4974 goto out;
4946 } 4975 }
4947 4976
4948 res = register_netdevice(bond_dev); 4977 res = register_netdevice(bond_dev);
4949 if (res < 0)
4950 goto out_netdev;
4951 4978
4952out: 4979out:
4953 rtnl_unlock(); 4980 rtnl_unlock();
4981 if (res < 0)
4982 bond_destructor(bond_dev);
4954 return res; 4983 return res;
4955out_netdev:
4956 free_netdev(bond_dev);
4957 goto out;
4958} 4984}
4959 4985
4960static int __net_init bond_net_init(struct net *net) 4986static int __net_init bond_net_init(struct net *net)