aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-11-28 16:43:02 -0500
committerDavid S. Miller <davem@davemloft.net>2010-11-29 12:43:13 -0500
commita41778694806ac1ccd4b1dafed1abef8d5ba98ac (patch)
tree499550eb704ba7811960d633161f5c386e4332cb /net/core
parentb02038a17b271e0f70616c54e4eccb5cc33d1b74 (diff)
xps: add __rcu annotations
Avoid sparse warnings : add __rcu annotations and use rcu_dereference_protected() where necessary. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/net-sysfs.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 35ef42fa0cf3..f85cee3d869e 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -899,6 +899,8 @@ static void xps_dev_maps_release(struct rcu_head *rcu)
899} 899}
900 900
901static DEFINE_MUTEX(xps_map_mutex); 901static DEFINE_MUTEX(xps_map_mutex);
902#define xmap_dereference(P) \
903 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
902 904
903static ssize_t store_xps_map(struct netdev_queue *queue, 905static ssize_t store_xps_map(struct netdev_queue *queue,
904 struct netdev_queue_attribute *attribute, 906 struct netdev_queue_attribute *attribute,
@@ -935,11 +937,12 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
935 937
936 mutex_lock(&xps_map_mutex); 938 mutex_lock(&xps_map_mutex);
937 939
938 dev_maps = dev->xps_maps; 940 dev_maps = xmap_dereference(dev->xps_maps);
939 941
940 for_each_possible_cpu(cpu) { 942 for_each_possible_cpu(cpu) {
941 new_map = map = dev_maps ? dev_maps->cpu_map[cpu] : NULL; 943 map = dev_maps ?
942 944 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
945 new_map = map;
943 if (map) { 946 if (map) {
944 for (pos = 0; pos < map->len; pos++) 947 for (pos = 0; pos < map->len; pos++)
945 if (map->queues[pos] == index) 948 if (map->queues[pos] == index)
@@ -975,13 +978,14 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
975 else 978 else
976 new_map = NULL; 979 new_map = NULL;
977 } 980 }
978 new_dev_maps->cpu_map[cpu] = new_map; 981 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
979 } 982 }
980 983
981 /* Cleanup old maps */ 984 /* Cleanup old maps */
982 for_each_possible_cpu(cpu) { 985 for_each_possible_cpu(cpu) {
983 map = dev_maps ? dev_maps->cpu_map[cpu] : NULL; 986 map = dev_maps ?
984 if (map && new_dev_maps->cpu_map[cpu] != map) 987 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
988 if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
985 call_rcu(&map->rcu, xps_map_release); 989 call_rcu(&map->rcu, xps_map_release);
986 if (new_dev_maps->cpu_map[cpu]) 990 if (new_dev_maps->cpu_map[cpu])
987 nonempty = 1; 991 nonempty = 1;
@@ -1007,7 +1011,9 @@ error:
1007 1011
1008 if (new_dev_maps) 1012 if (new_dev_maps)
1009 for_each_possible_cpu(i) 1013 for_each_possible_cpu(i)
1010 kfree(new_dev_maps->cpu_map[i]); 1014 kfree(rcu_dereference_protected(
1015 new_dev_maps->cpu_map[i],
1016 1));
1011 kfree(new_dev_maps); 1017 kfree(new_dev_maps);
1012 free_cpumask_var(mask); 1018 free_cpumask_var(mask);
1013 return -ENOMEM; 1019 return -ENOMEM;
@@ -1033,11 +1039,11 @@ static void netdev_queue_release(struct kobject *kobj)
1033 index = get_netdev_queue_index(queue); 1039 index = get_netdev_queue_index(queue);
1034 1040
1035 mutex_lock(&xps_map_mutex); 1041 mutex_lock(&xps_map_mutex);
1036 dev_maps = dev->xps_maps; 1042 dev_maps = xmap_dereference(dev->xps_maps);
1037 1043
1038 if (dev_maps) { 1044 if (dev_maps) {
1039 for_each_possible_cpu(i) { 1045 for_each_possible_cpu(i) {
1040 map = dev_maps->cpu_map[i]; 1046 map = xmap_dereference(dev_maps->cpu_map[i]);
1041 if (!map) 1047 if (!map)
1042 continue; 1048 continue;
1043 1049