diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2013-01-10 03:57:02 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-01-11 01:47:03 -0500 |
commit | 537c00de1c9ba9876b91d869e84caceefe2b8bf9 (patch) | |
tree | f808a5221aec07b3a0a01513e5de9d5cdc2a6b54 /net/core/dev.c | |
parent | 416186fbf8c5b4e4465a10c6ac7a45b6c47144b2 (diff) |
net: Add functions netif_reset_xps_queue and netif_set_xps_queue
This patch adds two functions, netif_reset_xps_queue and
netif_set_xps_queue. The main idea behind these two functions is to
provide a mechanism through which drivers can update their defaults in
regards to XPS.
Currently no such mechanism exists and as a result we cannot use XPS for
things such as ATR which would require a basic configuration to start in
which the Tx queues are mapped to CPUs via a 1:1 mapping. With this change
I am making it possible for drivers such as ixgbe to be able to use the XPS
feature by controlling the default configuration.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 155 |
1 files changed, 155 insertions, 0 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 81ff67149f62..257b29516f69 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1857,6 +1857,161 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq) | |||
1857 | } | 1857 | } |
1858 | } | 1858 | } |
1859 | 1859 | ||
1860 | #ifdef CONFIG_XPS | ||
1861 | static DEFINE_MUTEX(xps_map_mutex); | ||
1862 | #define xmap_dereference(P) \ | ||
1863 | rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) | ||
1864 | |||
1865 | void netif_reset_xps_queue(struct net_device *dev, u16 index) | ||
1866 | { | ||
1867 | struct xps_dev_maps *dev_maps; | ||
1868 | struct xps_map *map; | ||
1869 | int i, pos, nonempty = 0; | ||
1870 | |||
1871 | mutex_lock(&xps_map_mutex); | ||
1872 | dev_maps = xmap_dereference(dev->xps_maps); | ||
1873 | |||
1874 | if (!dev_maps) | ||
1875 | goto out_no_maps; | ||
1876 | |||
1877 | for_each_possible_cpu(i) { | ||
1878 | map = xmap_dereference(dev_maps->cpu_map[i]); | ||
1879 | if (!map) | ||
1880 | continue; | ||
1881 | |||
1882 | for (pos = 0; pos < map->len; pos++) | ||
1883 | if (map->queues[pos] == index) | ||
1884 | break; | ||
1885 | |||
1886 | if (pos < map->len) { | ||
1887 | if (map->len > 1) { | ||
1888 | map->queues[pos] = map->queues[--map->len]; | ||
1889 | } else { | ||
1890 | RCU_INIT_POINTER(dev_maps->cpu_map[i], NULL); | ||
1891 | kfree_rcu(map, rcu); | ||
1892 | map = NULL; | ||
1893 | } | ||
1894 | } | ||
1895 | if (map) | ||
1896 | nonempty = 1; | ||
1897 | } | ||
1898 | |||
1899 | if (!nonempty) { | ||
1900 | RCU_INIT_POINTER(dev->xps_maps, NULL); | ||
1901 | kfree_rcu(dev_maps, rcu); | ||
1902 | } | ||
1903 | |||
1904 | out_no_maps: | ||
1905 | mutex_unlock(&xps_map_mutex); | ||
1906 | } | ||
1907 | |||
1908 | int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index) | ||
1909 | { | ||
1910 | int i, cpu, pos, map_len, alloc_len, need_set; | ||
1911 | struct xps_map *map, *new_map; | ||
1912 | struct xps_dev_maps *dev_maps, *new_dev_maps; | ||
1913 | int nonempty = 0; | ||
1914 | int numa_node_id = -2; | ||
1915 | int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES); | ||
1916 | |||
1917 | new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); | ||
1918 | if (!new_dev_maps) | ||
1919 | return -ENOMEM; | ||
1920 | |||
1921 | mutex_lock(&xps_map_mutex); | ||
1922 | |||
1923 | dev_maps = xmap_dereference(dev->xps_maps); | ||
1924 | |||
1925 | for_each_possible_cpu(cpu) { | ||
1926 | map = dev_maps ? | ||
1927 | xmap_dereference(dev_maps->cpu_map[cpu]) : NULL; | ||
1928 | new_map = map; | ||
1929 | if (map) { | ||
1930 | for (pos = 0; pos < map->len; pos++) | ||
1931 | if (map->queues[pos] == index) | ||
1932 | break; | ||
1933 | map_len = map->len; | ||
1934 | alloc_len = map->alloc_len; | ||
1935 | } else | ||
1936 | pos = map_len = alloc_len = 0; | ||
1937 | |||
1938 | need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu); | ||
1939 | #ifdef CONFIG_NUMA | ||
1940 | if (need_set) { | ||
1941 | if (numa_node_id == -2) | ||
1942 | numa_node_id = cpu_to_node(cpu); | ||
1943 | else if (numa_node_id != cpu_to_node(cpu)) | ||
1944 | numa_node_id = -1; | ||
1945 | } | ||
1946 | #endif | ||
1947 | if (need_set && pos >= map_len) { | ||
1948 | /* Need to add queue to this CPU's map */ | ||
1949 | if (map_len >= alloc_len) { | ||
1950 | alloc_len = alloc_len ? | ||
1951 | 2 * alloc_len : XPS_MIN_MAP_ALLOC; | ||
1952 | new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), | ||
1953 | GFP_KERNEL, | ||
1954 | cpu_to_node(cpu)); | ||
1955 | if (!new_map) | ||
1956 | goto error; | ||
1957 | new_map->alloc_len = alloc_len; | ||
1958 | for (i = 0; i < map_len; i++) | ||
1959 | new_map->queues[i] = map->queues[i]; | ||
1960 | new_map->len = map_len; | ||
1961 | } | ||
1962 | new_map->queues[new_map->len++] = index; | ||
1963 | } else if (!need_set && pos < map_len) { | ||
1964 | /* Need to remove queue from this CPU's map */ | ||
1965 | if (map_len > 1) | ||
1966 | new_map->queues[pos] = | ||
1967 | new_map->queues[--new_map->len]; | ||
1968 | else | ||
1969 | new_map = NULL; | ||
1970 | } | ||
1971 | RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map); | ||
1972 | } | ||
1973 | |||
1974 | /* Cleanup old maps */ | ||
1975 | for_each_possible_cpu(cpu) { | ||
1976 | map = dev_maps ? | ||
1977 | xmap_dereference(dev_maps->cpu_map[cpu]) : NULL; | ||
1978 | if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map) | ||
1979 | kfree_rcu(map, rcu); | ||
1980 | if (new_dev_maps->cpu_map[cpu]) | ||
1981 | nonempty = 1; | ||
1982 | } | ||
1983 | |||
1984 | if (nonempty) { | ||
1985 | rcu_assign_pointer(dev->xps_maps, new_dev_maps); | ||
1986 | } else { | ||
1987 | kfree(new_dev_maps); | ||
1988 | RCU_INIT_POINTER(dev->xps_maps, NULL); | ||
1989 | } | ||
1990 | |||
1991 | if (dev_maps) | ||
1992 | kfree_rcu(dev_maps, rcu); | ||
1993 | |||
1994 | netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), | ||
1995 | (numa_node_id >= 0) ? numa_node_id : | ||
1996 | NUMA_NO_NODE); | ||
1997 | |||
1998 | mutex_unlock(&xps_map_mutex); | ||
1999 | |||
2000 | return 0; | ||
2001 | error: | ||
2002 | mutex_unlock(&xps_map_mutex); | ||
2003 | |||
2004 | if (new_dev_maps) | ||
2005 | for_each_possible_cpu(i) | ||
2006 | kfree(rcu_dereference_protected( | ||
2007 | new_dev_maps->cpu_map[i], | ||
2008 | 1)); | ||
2009 | kfree(new_dev_maps); | ||
2010 | return -ENOMEM; | ||
2011 | } | ||
2012 | EXPORT_SYMBOL(netif_set_xps_queue); | ||
2013 | |||
2014 | #endif | ||
1860 | /* | 2015 | /* |
1861 | * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues | 2016 | * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues |
1862 | * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. | 2017 | * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. |