aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2016-10-28 11:46:49 -0400
committerDavid S. Miller <davem@davemloft.net>2016-10-31 15:00:48 -0400
commit6234f87407cb2c02a5828e161225e5a84163dc85 (patch)
tree18b414bc9795bb4356938bbff7c31debd4cfbd14 /net/core/dev.c
parent8d059b0f6f5b1d3acf829454e1087818ad660058 (diff)
net: Refactor removal of queues from XPS map and apply on num_tc changes
This patch updates the code for removing queues from the XPS map and makes it so that we can apply the code any time we change either the number of traffic classes or the mapping of a given block of queues. This way we avoid having queues pulling traffic from a foreign traffic class. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c73
1 files changed, 50 insertions, 23 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index db0fdbbcd9b8..108a6adce185 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1970,32 +1970,50 @@ static DEFINE_MUTEX(xps_map_mutex);
1970#define xmap_dereference(P) \ 1970#define xmap_dereference(P) \
1971 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 1971 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1972 1972
1973static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps, 1973static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
1974 int cpu, u16 index) 1974 int tci, u16 index)
1975{ 1975{
1976 struct xps_map *map = NULL; 1976 struct xps_map *map = NULL;
1977 int pos; 1977 int pos;
1978 1978
1979 if (dev_maps) 1979 if (dev_maps)
1980 map = xmap_dereference(dev_maps->cpu_map[cpu]); 1980 map = xmap_dereference(dev_maps->cpu_map[tci]);
1981 if (!map)
1982 return false;
1981 1983
1982 for (pos = 0; map && pos < map->len; pos++) { 1984 for (pos = map->len; pos--;) {
1983 if (map->queues[pos] == index) { 1985 if (map->queues[pos] != index)
1984 if (map->len > 1) { 1986 continue;
1985 map->queues[pos] = map->queues[--map->len]; 1987
1986 } else { 1988 if (map->len > 1) {
1987 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL); 1989 map->queues[pos] = map->queues[--map->len];
1988 kfree_rcu(map, rcu);
1989 map = NULL;
1990 }
1991 break; 1990 break;
1992 } 1991 }
1992
1993 RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL);
1994 kfree_rcu(map, rcu);
1995 return false;
1993 } 1996 }
1994 1997
1995 return map; 1998 return true;
1996} 1999}
1997 2000
1998static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) 2001static bool remove_xps_queue_cpu(struct net_device *dev,
2002 struct xps_dev_maps *dev_maps,
2003 int cpu, u16 offset, u16 count)
2004{
2005 int i, j;
2006
2007 for (i = count, j = offset; i--; j++) {
2008 if (!remove_xps_queue(dev_maps, cpu, j))
2009 break;
2010 }
2011
2012 return i < 0;
2013}
2014
2015static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2016 u16 count)
1999{ 2017{
2000 struct xps_dev_maps *dev_maps; 2018 struct xps_dev_maps *dev_maps;
2001 int cpu, i; 2019 int cpu, i;
@@ -2007,21 +2025,16 @@ static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2007 if (!dev_maps) 2025 if (!dev_maps)
2008 goto out_no_maps; 2026 goto out_no_maps;
2009 2027
2010 for_each_possible_cpu(cpu) { 2028 for_each_possible_cpu(cpu)
2011 for (i = index; i < dev->num_tx_queues; i++) { 2029 active |= remove_xps_queue_cpu(dev, dev_maps, cpu,
2012 if (!remove_xps_queue(dev_maps, cpu, i)) 2030 offset, count);
2013 break;
2014 }
2015 if (i == dev->num_tx_queues)
2016 active = true;
2017 }
2018 2031
2019 if (!active) { 2032 if (!active) {
2020 RCU_INIT_POINTER(dev->xps_maps, NULL); 2033 RCU_INIT_POINTER(dev->xps_maps, NULL);
2021 kfree_rcu(dev_maps, rcu); 2034 kfree_rcu(dev_maps, rcu);
2022 } 2035 }
2023 2036
2024 for (i = index; i < dev->num_tx_queues; i++) 2037 for (i = offset + (count - 1); count--; i--)
2025 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i), 2038 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
2026 NUMA_NO_NODE); 2039 NUMA_NO_NODE);
2027 2040
@@ -2029,6 +2042,11 @@ out_no_maps:
2029 mutex_unlock(&xps_map_mutex); 2042 mutex_unlock(&xps_map_mutex);
2030} 2043}
2031 2044
2045static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2046{
2047 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2048}
2049
2032static struct xps_map *expand_xps_map(struct xps_map *map, 2050static struct xps_map *expand_xps_map(struct xps_map *map,
2033 int cpu, u16 index) 2051 int cpu, u16 index)
2034{ 2052{
@@ -2192,6 +2210,9 @@ EXPORT_SYMBOL(netif_set_xps_queue);
2192#endif 2210#endif
2193void netdev_reset_tc(struct net_device *dev) 2211void netdev_reset_tc(struct net_device *dev)
2194{ 2212{
2213#ifdef CONFIG_XPS
2214 netif_reset_xps_queues_gt(dev, 0);
2215#endif
2195 dev->num_tc = 0; 2216 dev->num_tc = 0;
2196 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 2217 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2197 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 2218 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
@@ -2203,6 +2224,9 @@ int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2203 if (tc >= dev->num_tc) 2224 if (tc >= dev->num_tc)
2204 return -EINVAL; 2225 return -EINVAL;
2205 2226
2227#ifdef CONFIG_XPS
2228 netif_reset_xps_queues(dev, offset, count);
2229#endif
2206 dev->tc_to_txq[tc].count = count; 2230 dev->tc_to_txq[tc].count = count;
2207 dev->tc_to_txq[tc].offset = offset; 2231 dev->tc_to_txq[tc].offset = offset;
2208 return 0; 2232 return 0;
@@ -2214,6 +2238,9 @@ int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2214 if (num_tc > TC_MAX_QUEUE) 2238 if (num_tc > TC_MAX_QUEUE)
2215 return -EINVAL; 2239 return -EINVAL;
2216 2240
2241#ifdef CONFIG_XPS
2242 netif_reset_xps_queues_gt(dev, 0);
2243#endif
2217 dev->num_tc = num_tc; 2244 dev->num_tc = num_tc;
2218 return 0; 2245 return 0;
2219} 2246}