aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/team/team.c
diff options
context:
space:
mode:
authorJiri Pirko <jiri@resnulli.us>2012-07-27 02:28:55 -0400
committerDavid S. Miller <davem@davemloft.net>2012-08-03 23:40:12 -0400
commit8ff5105a2b9dd0ba596719b165c1827d101e5f1a (patch)
tree6d9ddf4e40cea45eefb14dd680c0f14b7db78eb8 /drivers/net/team/team.c
parenta86fc6b7d603992070c04bd7a8c217d55688b077 (diff)
team: add support for queue override by setting queue_id for port
Similar to what bonding has. This allows to set queue_id for port so this port will be used when skb with matching skb->queue_mapping is going to be transmitted. Signed-off-by: Jiri Pirko <jiri@resnulli.us> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/team/team.c')
-rw-r--r--drivers/net/team/team.c163
1 files changed, 161 insertions, 2 deletions
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a30b7c1bd9f6..ba10c469b02b 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -658,6 +658,122 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
658} 658}
659 659
660 660
661/*************************************
662 * Multiqueue Tx port select override
663 *************************************/
664
665static int team_queue_override_init(struct team *team)
666{
667 struct list_head *listarr;
668 unsigned int queue_cnt = team->dev->num_tx_queues - 1;
669 unsigned int i;
670
671 if (!queue_cnt)
672 return 0;
673 listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL);
674 if (!listarr)
675 return -ENOMEM;
676 team->qom_lists = listarr;
677 for (i = 0; i < queue_cnt; i++)
678 INIT_LIST_HEAD(listarr++);
679 return 0;
680}
681
682static void team_queue_override_fini(struct team *team)
683{
684 kfree(team->qom_lists);
685}
686
687static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
688{
689 return &team->qom_lists[queue_id - 1];
690}
691
692/*
693 * note: already called with rcu_read_lock
694 */
695static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
696{
697 struct list_head *qom_list;
698 struct team_port *port;
699
700 if (!team->queue_override_enabled || !skb->queue_mapping)
701 return false;
702 qom_list = __team_get_qom_list(team, skb->queue_mapping);
703 list_for_each_entry_rcu(port, qom_list, qom_list) {
704 if (!team_dev_queue_xmit(team, port, skb))
705 return true;
706 }
707 return false;
708}
709
710static void __team_queue_override_port_del(struct team *team,
711 struct team_port *port)
712{
713 list_del_rcu(&port->qom_list);
714 synchronize_rcu();
715 INIT_LIST_HEAD(&port->qom_list);
716}
717
718static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
719 struct team_port *cur)
720{
721 if (port->priority < cur->priority)
722 return true;
723 if (port->priority > cur->priority)
724 return false;
725 if (port->index < cur->index)
726 return true;
727 return false;
728}
729
730static void __team_queue_override_port_add(struct team *team,
731 struct team_port *port)
732{
733 struct team_port *cur;
734 struct list_head *qom_list;
735 struct list_head *node;
736
737 if (!port->queue_id || !team_port_enabled(port))
738 return;
739
740 qom_list = __team_get_qom_list(team, port->queue_id);
741 node = qom_list;
742 list_for_each_entry(cur, qom_list, qom_list) {
743 if (team_queue_override_port_has_gt_prio_than(port, cur))
744 break;
745 node = &cur->qom_list;
746 }
747 list_add_tail_rcu(&port->qom_list, node);
748}
749
750static void __team_queue_override_enabled_check(struct team *team)
751{
752 struct team_port *port;
753 bool enabled = false;
754
755 list_for_each_entry(port, &team->port_list, list) {
756 if (!list_empty(&port->qom_list)) {
757 enabled = true;
758 break;
759 }
760 }
761 if (enabled == team->queue_override_enabled)
762 return;
763 netdev_dbg(team->dev, "%s queue override\n",
764 enabled ? "Enabling" : "Disabling");
765 team->queue_override_enabled = enabled;
766}
767
768static void team_queue_override_port_refresh(struct team *team,
769 struct team_port *port)
770{
771 __team_queue_override_port_del(team, port);
772 __team_queue_override_port_add(team, port);
773 __team_queue_override_enabled_check(team);
774}
775
776
661/**************** 777/****************
662 * Port handling 778 * Port handling
663 ****************/ 779 ****************/
@@ -688,6 +804,7 @@ static void team_port_enable(struct team *team,
688 hlist_add_head_rcu(&port->hlist, 804 hlist_add_head_rcu(&port->hlist,
689 team_port_index_hash(team, port->index)); 805 team_port_index_hash(team, port->index));
690 team_adjust_ops(team); 806 team_adjust_ops(team);
807 team_queue_override_port_refresh(team, port);
691 if (team->ops.port_enabled) 808 if (team->ops.port_enabled)
692 team->ops.port_enabled(team, port); 809 team->ops.port_enabled(team, port);
693} 810}
@@ -716,6 +833,7 @@ static void team_port_disable(struct team *team,
716 hlist_del_rcu(&port->hlist); 833 hlist_del_rcu(&port->hlist);
717 __reconstruct_port_hlist(team, port->index); 834 __reconstruct_port_hlist(team, port->index);
718 port->index = -1; 835 port->index = -1;
836 team_queue_override_port_refresh(team, port);
719 __team_adjust_ops(team, team->en_port_count - 1); 837 __team_adjust_ops(team, team->en_port_count - 1);
720 /* 838 /*
721 * Wait until readers see adjusted ops. This ensures that 839 * Wait until readers see adjusted ops. This ensures that
@@ -881,6 +999,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
881 999
882 port->dev = port_dev; 1000 port->dev = port_dev;
883 port->team = team; 1001 port->team = team;
1002 INIT_LIST_HEAD(&port->qom_list);
884 1003
885 port->orig.mtu = port_dev->mtu; 1004 port->orig.mtu = port_dev->mtu;
886 err = dev_set_mtu(port_dev, dev->mtu); 1005 err = dev_set_mtu(port_dev, dev->mtu);
@@ -1107,9 +1226,34 @@ static int team_priority_option_set(struct team *team,
1107 struct team_port *port = ctx->info->port; 1226 struct team_port *port = ctx->info->port;
1108 1227
1109 port->priority = ctx->data.s32_val; 1228 port->priority = ctx->data.s32_val;
1229 team_queue_override_port_refresh(team, port);
1110 return 0; 1230 return 0;
1111} 1231}
1112 1232
1233static int team_queue_id_option_get(struct team *team,
1234 struct team_gsetter_ctx *ctx)
1235{
1236 struct team_port *port = ctx->info->port;
1237
1238 ctx->data.u32_val = port->queue_id;
1239 return 0;
1240}
1241
1242static int team_queue_id_option_set(struct team *team,
1243 struct team_gsetter_ctx *ctx)
1244{
1245 struct team_port *port = ctx->info->port;
1246
1247 if (port->queue_id == ctx->data.u32_val)
1248 return 0;
1249 if (ctx->data.u32_val >= team->dev->real_num_tx_queues)
1250 return -EINVAL;
1251 port->queue_id = ctx->data.u32_val;
1252 team_queue_override_port_refresh(team, port);
1253 return 0;
1254}
1255
1256
1113static const struct team_option team_options[] = { 1257static const struct team_option team_options[] = {
1114 { 1258 {
1115 .name = "mode", 1259 .name = "mode",
@@ -1145,6 +1289,13 @@ static const struct team_option team_options[] = {
1145 .getter = team_priority_option_get, 1289 .getter = team_priority_option_get,
1146 .setter = team_priority_option_set, 1290 .setter = team_priority_option_set,
1147 }, 1291 },
1292 {
1293 .name = "queue_id",
1294 .type = TEAM_OPTION_TYPE_U32,
1295 .per_port = true,
1296 .getter = team_queue_id_option_get,
1297 .setter = team_queue_id_option_set,
1298 },
1148}; 1299};
1149 1300
1150static struct lock_class_key team_netdev_xmit_lock_key; 1301static struct lock_class_key team_netdev_xmit_lock_key;
@@ -1180,6 +1331,9 @@ static int team_init(struct net_device *dev)
1180 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) 1331 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1181 INIT_HLIST_HEAD(&team->en_port_hlist[i]); 1332 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1182 INIT_LIST_HEAD(&team->port_list); 1333 INIT_LIST_HEAD(&team->port_list);
1334 err = team_queue_override_init(team);
1335 if (err)
1336 goto err_team_queue_override_init;
1183 1337
1184 team_adjust_ops(team); 1338 team_adjust_ops(team);
1185 1339
@@ -1195,6 +1349,8 @@ static int team_init(struct net_device *dev)
1195 return 0; 1349 return 0;
1196 1350
1197err_options_register: 1351err_options_register:
1352 team_queue_override_fini(team);
1353err_team_queue_override_init:
1198 free_percpu(team->pcpu_stats); 1354 free_percpu(team->pcpu_stats);
1199 1355
1200 return err; 1356 return err;
@@ -1212,6 +1368,7 @@ static void team_uninit(struct net_device *dev)
1212 1368
1213 __team_change_mode(team, NULL); /* cleanup */ 1369 __team_change_mode(team, NULL); /* cleanup */
1214 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); 1370 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1371 team_queue_override_fini(team);
1215 mutex_unlock(&team->lock); 1372 mutex_unlock(&team->lock);
1216} 1373}
1217 1374
@@ -1241,10 +1398,12 @@ static int team_close(struct net_device *dev)
1241static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) 1398static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1242{ 1399{
1243 struct team *team = netdev_priv(dev); 1400 struct team *team = netdev_priv(dev);
1244 bool tx_success = false; 1401 bool tx_success;
1245 unsigned int len = skb->len; 1402 unsigned int len = skb->len;
1246 1403
1247 tx_success = team->ops.transmit(team, skb); 1404 tx_success = team_queue_override_transmit(team, skb);
1405 if (!tx_success)
1406 tx_success = team->ops.transmit(team, skb);
1248 if (tx_success) { 1407 if (tx_success) {
1249 struct team_pcpu_stats *pcpu_stats; 1408 struct team_pcpu_stats *pcpu_stats;
1250 1409