aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorTom Herbert <therbert@google.com>2011-11-28 11:33:02 -0500
committerDavid S. Miller <davem@davemloft.net>2011-11-29 12:46:19 -0500
commit927fbec13e40648d3c87cbb1daaac5b1fb9c8775 (patch)
tree04fa0c2153a8875a77d7b2505bc8b228aa6d442b /net
parentc5d67bd78c5dc540e3461c36fb3d389fbe0de4c3 (diff)
xps: Add xps_queue_release function
This patch moves the xps specific parts in netdev_queue_release into its own function which netdev_queue_release can call. This allows netdev_queue_release to be more generic (for adding new attributes to tx queues). Signed-off-by: Tom Herbert <therbert@google.com> Acked-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/net-sysfs.c89
1 files changed, 47 insertions, 42 deletions
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index db6c2f83633f..b17c14a0fce9 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -910,6 +910,52 @@ static DEFINE_MUTEX(xps_map_mutex);
910#define xmap_dereference(P) \ 910#define xmap_dereference(P) \
911 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 911 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
912 912
913static void xps_queue_release(struct netdev_queue *queue)
914{
915 struct net_device *dev = queue->dev;
916 struct xps_dev_maps *dev_maps;
917 struct xps_map *map;
918 unsigned long index;
919 int i, pos, nonempty = 0;
920
921 index = get_netdev_queue_index(queue);
922
923 mutex_lock(&xps_map_mutex);
924 dev_maps = xmap_dereference(dev->xps_maps);
925
926 if (dev_maps) {
927 for_each_possible_cpu(i) {
928 map = xmap_dereference(dev_maps->cpu_map[i]);
929 if (!map)
930 continue;
931
932 for (pos = 0; pos < map->len; pos++)
933 if (map->queues[pos] == index)
934 break;
935
936 if (pos < map->len) {
937 if (map->len > 1)
938 map->queues[pos] =
939 map->queues[--map->len];
940 else {
941 RCU_INIT_POINTER(dev_maps->cpu_map[i],
942 NULL);
943 kfree_rcu(map, rcu);
944 map = NULL;
945 }
946 }
947 if (map)
948 nonempty = 1;
949 }
950
951 if (!nonempty) {
952 RCU_INIT_POINTER(dev->xps_maps, NULL);
953 kfree_rcu(dev_maps, rcu);
954 }
955 }
956 mutex_unlock(&xps_map_mutex);
957}
958
913static ssize_t store_xps_map(struct netdev_queue *queue, 959static ssize_t store_xps_map(struct netdev_queue *queue,
914 struct netdev_queue_attribute *attribute, 960 struct netdev_queue_attribute *attribute,
915 const char *buf, size_t len) 961 const char *buf, size_t len)
@@ -1054,49 +1100,8 @@ static struct attribute *netdev_queue_default_attrs[] = {
1054static void netdev_queue_release(struct kobject *kobj) 1100static void netdev_queue_release(struct kobject *kobj)
1055{ 1101{
1056 struct netdev_queue *queue = to_netdev_queue(kobj); 1102 struct netdev_queue *queue = to_netdev_queue(kobj);
1057 struct net_device *dev = queue->dev;
1058 struct xps_dev_maps *dev_maps;
1059 struct xps_map *map;
1060 unsigned long index;
1061 int i, pos, nonempty = 0;
1062
1063 index = get_netdev_queue_index(queue);
1064
1065 mutex_lock(&xps_map_mutex);
1066 dev_maps = xmap_dereference(dev->xps_maps);
1067 1103
1068 if (dev_maps) { 1104 xps_queue_release(queue);
1069 for_each_possible_cpu(i) {
1070 map = xmap_dereference(dev_maps->cpu_map[i]);
1071 if (!map)
1072 continue;
1073
1074 for (pos = 0; pos < map->len; pos++)
1075 if (map->queues[pos] == index)
1076 break;
1077
1078 if (pos < map->len) {
1079 if (map->len > 1)
1080 map->queues[pos] =
1081 map->queues[--map->len];
1082 else {
1083 RCU_INIT_POINTER(dev_maps->cpu_map[i],
1084 NULL);
1085 kfree_rcu(map, rcu);
1086 map = NULL;
1087 }
1088 }
1089 if (map)
1090 nonempty = 1;
1091 }
1092
1093 if (!nonempty) {
1094 RCU_INIT_POINTER(dev->xps_maps, NULL);
1095 kfree_rcu(dev_maps, rcu);
1096 }
1097 }
1098
1099 mutex_unlock(&xps_map_mutex);
1100 1105
1101 memset(kobj, 0, sizeof(*kobj)); 1106 memset(kobj, 0, sizeof(*kobj));
1102 dev_put(queue->dev); 1107 dev_put(queue->dev);