aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJiri Pirko <jiri@resnulli.us>2013-07-20 06:13:52 -0400
committerDavid S. Miller <davem@davemloft.net>2013-07-23 19:52:47 -0400
commitfc423ff00df3a19554414eed80aef9de9b50313e (patch)
treef3c9c5997d4aa19afee173c384da88cbde2036ad
parentab2cfbb2bddb7c7bc4394e52e91044d5ff645cb4 (diff)
team: add peer notification
When port is enabled or disabled, allow to notify peers by unsolicitated NAs or gratuitous ARPs. Disabled by default. Signed-off-by: Jiri Pirko <jiri@resnulli.us> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/team/team.c87
-rw-r--r--include/linux/if_team.h8
2 files changed, 94 insertions, 1 deletions
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index bff7e0b0b4e7..0433ee994f8c 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -622,6 +622,46 @@ static int team_change_mode(struct team *team, const char *kind)
622} 622}
623 623
624 624
625/*********************
626 * Peers notification
627 *********************/
628
629static void team_notify_peers_work(struct work_struct *work)
630{
631 struct team *team;
632
633 team = container_of(work, struct team, notify_peers.dw.work);
634
635 if (!rtnl_trylock()) {
636 schedule_delayed_work(&team->notify_peers.dw, 0);
637 return;
638 }
639 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
640 rtnl_unlock();
641 if (!atomic_dec_and_test(&team->notify_peers.count_pending))
642 schedule_delayed_work(&team->notify_peers.dw,
643 msecs_to_jiffies(team->notify_peers.interval));
644}
645
646static void team_notify_peers(struct team *team)
647{
648 if (!team->notify_peers.count || !netif_running(team->dev))
649 return;
650 atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
651 schedule_delayed_work(&team->notify_peers.dw, 0);
652}
653
654static void team_notify_peers_init(struct team *team)
655{
656 INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
657}
658
659static void team_notify_peers_fini(struct team *team)
660{
661 cancel_delayed_work_sync(&team->notify_peers.dw);
662}
663
664
625/************************ 665/************************
626 * Rx path frame handler 666 * Rx path frame handler
627 ************************/ 667 ************************/
@@ -846,6 +886,7 @@ static void team_port_enable(struct team *team,
846 team_queue_override_port_add(team, port); 886 team_queue_override_port_add(team, port);
847 if (team->ops.port_enabled) 887 if (team->ops.port_enabled)
848 team->ops.port_enabled(team, port); 888 team->ops.port_enabled(team, port);
889 team_notify_peers(team);
849} 890}
850 891
851static void __reconstruct_port_hlist(struct team *team, int rm_index) 892static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -875,6 +916,7 @@ static void team_port_disable(struct team *team,
875 team->en_port_count--; 916 team->en_port_count--;
876 team_queue_override_port_del(team, port); 917 team_queue_override_port_del(team, port);
877 team_adjust_ops(team); 918 team_adjust_ops(team);
919 team_notify_peers(team);
878} 920}
879 921
880#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ 922#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -1205,6 +1247,34 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
1205 return team_change_mode(team, ctx->data.str_val); 1247 return team_change_mode(team, ctx->data.str_val);
1206} 1248}
1207 1249
1250static int team_notify_peers_count_get(struct team *team,
1251 struct team_gsetter_ctx *ctx)
1252{
1253 ctx->data.u32_val = team->notify_peers.count;
1254 return 0;
1255}
1256
1257static int team_notify_peers_count_set(struct team *team,
1258 struct team_gsetter_ctx *ctx)
1259{
1260 team->notify_peers.count = ctx->data.u32_val;
1261 return 0;
1262}
1263
1264static int team_notify_peers_interval_get(struct team *team,
1265 struct team_gsetter_ctx *ctx)
1266{
1267 ctx->data.u32_val = team->notify_peers.interval;
1268 return 0;
1269}
1270
1271static int team_notify_peers_interval_set(struct team *team,
1272 struct team_gsetter_ctx *ctx)
1273{
1274 team->notify_peers.interval = ctx->data.u32_val;
1275 return 0;
1276}
1277
1208static int team_port_en_option_get(struct team *team, 1278static int team_port_en_option_get(struct team *team,
1209 struct team_gsetter_ctx *ctx) 1279 struct team_gsetter_ctx *ctx)
1210{ 1280{
@@ -1317,6 +1387,18 @@ static const struct team_option team_options[] = {
1317 .setter = team_mode_option_set, 1387 .setter = team_mode_option_set,
1318 }, 1388 },
1319 { 1389 {
1390 .name = "notify_peers_count",
1391 .type = TEAM_OPTION_TYPE_U32,
1392 .getter = team_notify_peers_count_get,
1393 .setter = team_notify_peers_count_set,
1394 },
1395 {
1396 .name = "notify_peers_interval",
1397 .type = TEAM_OPTION_TYPE_U32,
1398 .getter = team_notify_peers_interval_get,
1399 .setter = team_notify_peers_interval_set,
1400 },
1401 {
1320 .name = "enabled", 1402 .name = "enabled",
1321 .type = TEAM_OPTION_TYPE_BOOL, 1403 .type = TEAM_OPTION_TYPE_BOOL,
1322 .per_port = true, 1404 .per_port = true,
@@ -1396,6 +1478,9 @@ static int team_init(struct net_device *dev)
1396 1478
1397 INIT_LIST_HEAD(&team->option_list); 1479 INIT_LIST_HEAD(&team->option_list);
1398 INIT_LIST_HEAD(&team->option_inst_list); 1480 INIT_LIST_HEAD(&team->option_inst_list);
1481
1482 team_notify_peers_init(team);
1483
1399 err = team_options_register(team, team_options, ARRAY_SIZE(team_options)); 1484 err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1400 if (err) 1485 if (err)
1401 goto err_options_register; 1486 goto err_options_register;
@@ -1406,6 +1491,7 @@ static int team_init(struct net_device *dev)
1406 return 0; 1491 return 0;
1407 1492
1408err_options_register: 1493err_options_register:
1494 team_notify_peers_fini(team);
1409 team_queue_override_fini(team); 1495 team_queue_override_fini(team);
1410err_team_queue_override_init: 1496err_team_queue_override_init:
1411 free_percpu(team->pcpu_stats); 1497 free_percpu(team->pcpu_stats);
@@ -1425,6 +1511,7 @@ static void team_uninit(struct net_device *dev)
1425 1511
1426 __team_change_mode(team, NULL); /* cleanup */ 1512 __team_change_mode(team, NULL); /* cleanup */
1427 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); 1513 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1514 team_notify_peers_fini(team);
1428 team_queue_override_fini(team); 1515 team_queue_override_fini(team);
1429 mutex_unlock(&team->lock); 1516 mutex_unlock(&team->lock);
1430} 1517}
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index f6156f91eb1c..b0b83683461e 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -10,9 +10,9 @@
10#ifndef _LINUX_IF_TEAM_H_ 10#ifndef _LINUX_IF_TEAM_H_
11#define _LINUX_IF_TEAM_H_ 11#define _LINUX_IF_TEAM_H_
12 12
13
14#include <linux/netpoll.h> 13#include <linux/netpoll.h>
15#include <net/sch_generic.h> 14#include <net/sch_generic.h>
15#include <linux/types.h>
16#include <uapi/linux/if_team.h> 16#include <uapi/linux/if_team.h>
17 17
18struct team_pcpu_stats { 18struct team_pcpu_stats {
@@ -194,6 +194,12 @@ struct team {
194 bool user_carrier_enabled; 194 bool user_carrier_enabled;
195 bool queue_override_enabled; 195 bool queue_override_enabled;
196 struct list_head *qom_lists; /* array of queue override mapping lists */ 196 struct list_head *qom_lists; /* array of queue override mapping lists */
197 struct {
198 unsigned int count;
199 unsigned int interval; /* in ms */
200 atomic_t count_pending;
201 struct delayed_work dw;
202 } notify_peers;
197 long mode_priv[TEAM_MODE_PRIV_LONGS]; 203 long mode_priv[TEAM_MODE_PRIV_LONGS];
198}; 204};
199 205