aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/net-sysfs.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/net-sysfs.c')
-rw-r--r--net/core/net-sysfs.c184
1 files changed, 23 insertions, 161 deletions
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 334efd5d67a9..7427ab5e27d8 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -21,6 +21,7 @@
21#include <linux/vmalloc.h> 21#include <linux/vmalloc.h>
22#include <linux/export.h> 22#include <linux/export.h>
23#include <linux/jiffies.h> 23#include <linux/jiffies.h>
24#include <linux/pm_runtime.h>
24 25
25#include "net-sysfs.h" 26#include "net-sysfs.h"
26 27
@@ -126,6 +127,19 @@ static ssize_t show_broadcast(struct device *dev,
126 return -EINVAL; 127 return -EINVAL;
127} 128}
128 129
130static int change_carrier(struct net_device *net, unsigned long new_carrier)
131{
132 if (!netif_running(net))
133 return -EINVAL;
134 return dev_change_carrier(net, (bool) new_carrier);
135}
136
137static ssize_t store_carrier(struct device *dev, struct device_attribute *attr,
138 const char *buf, size_t len)
139{
140 return netdev_store(dev, attr, buf, len, change_carrier);
141}
142
129static ssize_t show_carrier(struct device *dev, 143static ssize_t show_carrier(struct device *dev,
130 struct device_attribute *attr, char *buf) 144 struct device_attribute *attr, char *buf)
131{ 145{
@@ -331,7 +345,7 @@ static struct device_attribute net_class_attributes[] = {
331 __ATTR(link_mode, S_IRUGO, show_link_mode, NULL), 345 __ATTR(link_mode, S_IRUGO, show_link_mode, NULL),
332 __ATTR(address, S_IRUGO, show_address, NULL), 346 __ATTR(address, S_IRUGO, show_address, NULL),
333 __ATTR(broadcast, S_IRUGO, show_broadcast, NULL), 347 __ATTR(broadcast, S_IRUGO, show_broadcast, NULL),
334 __ATTR(carrier, S_IRUGO, show_carrier, NULL), 348 __ATTR(carrier, S_IRUGO | S_IWUSR, show_carrier, store_carrier),
335 __ATTR(speed, S_IRUGO, show_speed, NULL), 349 __ATTR(speed, S_IRUGO, show_speed, NULL),
336 __ATTR(duplex, S_IRUGO, show_duplex, NULL), 350 __ATTR(duplex, S_IRUGO, show_duplex, NULL),
337 __ATTR(dormant, S_IRUGO, show_dormant, NULL), 351 __ATTR(dormant, S_IRUGO, show_dormant, NULL),
@@ -989,68 +1003,14 @@ static ssize_t show_xps_map(struct netdev_queue *queue,
989 return len; 1003 return len;
990} 1004}
991 1005
992static DEFINE_MUTEX(xps_map_mutex);
993#define xmap_dereference(P) \
994 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
995
996static void xps_queue_release(struct netdev_queue *queue)
997{
998 struct net_device *dev = queue->dev;
999 struct xps_dev_maps *dev_maps;
1000 struct xps_map *map;
1001 unsigned long index;
1002 int i, pos, nonempty = 0;
1003
1004 index = get_netdev_queue_index(queue);
1005
1006 mutex_lock(&xps_map_mutex);
1007 dev_maps = xmap_dereference(dev->xps_maps);
1008
1009 if (dev_maps) {
1010 for_each_possible_cpu(i) {
1011 map = xmap_dereference(dev_maps->cpu_map[i]);
1012 if (!map)
1013 continue;
1014
1015 for (pos = 0; pos < map->len; pos++)
1016 if (map->queues[pos] == index)
1017 break;
1018
1019 if (pos < map->len) {
1020 if (map->len > 1)
1021 map->queues[pos] =
1022 map->queues[--map->len];
1023 else {
1024 RCU_INIT_POINTER(dev_maps->cpu_map[i],
1025 NULL);
1026 kfree_rcu(map, rcu);
1027 map = NULL;
1028 }
1029 }
1030 if (map)
1031 nonempty = 1;
1032 }
1033
1034 if (!nonempty) {
1035 RCU_INIT_POINTER(dev->xps_maps, NULL);
1036 kfree_rcu(dev_maps, rcu);
1037 }
1038 }
1039 mutex_unlock(&xps_map_mutex);
1040}
1041
1042static ssize_t store_xps_map(struct netdev_queue *queue, 1006static ssize_t store_xps_map(struct netdev_queue *queue,
1043 struct netdev_queue_attribute *attribute, 1007 struct netdev_queue_attribute *attribute,
1044 const char *buf, size_t len) 1008 const char *buf, size_t len)
1045{ 1009{
1046 struct net_device *dev = queue->dev; 1010 struct net_device *dev = queue->dev;
1047 cpumask_var_t mask;
1048 int err, i, cpu, pos, map_len, alloc_len, need_set;
1049 unsigned long index; 1011 unsigned long index;
1050 struct xps_map *map, *new_map; 1012 cpumask_var_t mask;
1051 struct xps_dev_maps *dev_maps, *new_dev_maps; 1013 int err;
1052 int nonempty = 0;
1053 int numa_node_id = -2;
1054 1014
1055 if (!capable(CAP_NET_ADMIN)) 1015 if (!capable(CAP_NET_ADMIN))
1056 return -EPERM; 1016 return -EPERM;
@@ -1066,105 +1026,11 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
1066 return err; 1026 return err;
1067 } 1027 }
1068 1028
1069 new_dev_maps = kzalloc(max_t(unsigned int, 1029 err = netif_set_xps_queue(dev, mask, index);
1070 XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
1071 if (!new_dev_maps) {
1072 free_cpumask_var(mask);
1073 return -ENOMEM;
1074 }
1075
1076 mutex_lock(&xps_map_mutex);
1077
1078 dev_maps = xmap_dereference(dev->xps_maps);
1079
1080 for_each_possible_cpu(cpu) {
1081 map = dev_maps ?
1082 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1083 new_map = map;
1084 if (map) {
1085 for (pos = 0; pos < map->len; pos++)
1086 if (map->queues[pos] == index)
1087 break;
1088 map_len = map->len;
1089 alloc_len = map->alloc_len;
1090 } else
1091 pos = map_len = alloc_len = 0;
1092
1093 need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
1094#ifdef CONFIG_NUMA
1095 if (need_set) {
1096 if (numa_node_id == -2)
1097 numa_node_id = cpu_to_node(cpu);
1098 else if (numa_node_id != cpu_to_node(cpu))
1099 numa_node_id = -1;
1100 }
1101#endif
1102 if (need_set && pos >= map_len) {
1103 /* Need to add queue to this CPU's map */
1104 if (map_len >= alloc_len) {
1105 alloc_len = alloc_len ?
1106 2 * alloc_len : XPS_MIN_MAP_ALLOC;
1107 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
1108 GFP_KERNEL,
1109 cpu_to_node(cpu));
1110 if (!new_map)
1111 goto error;
1112 new_map->alloc_len = alloc_len;
1113 for (i = 0; i < map_len; i++)
1114 new_map->queues[i] = map->queues[i];
1115 new_map->len = map_len;
1116 }
1117 new_map->queues[new_map->len++] = index;
1118 } else if (!need_set && pos < map_len) {
1119 /* Need to remove queue from this CPU's map */
1120 if (map_len > 1)
1121 new_map->queues[pos] =
1122 new_map->queues[--new_map->len];
1123 else
1124 new_map = NULL;
1125 }
1126 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
1127 }
1128
1129 /* Cleanup old maps */
1130 for_each_possible_cpu(cpu) {
1131 map = dev_maps ?
1132 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1133 if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
1134 kfree_rcu(map, rcu);
1135 if (new_dev_maps->cpu_map[cpu])
1136 nonempty = 1;
1137 }
1138
1139 if (nonempty) {
1140 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1141 } else {
1142 kfree(new_dev_maps);
1143 RCU_INIT_POINTER(dev->xps_maps, NULL);
1144 }
1145
1146 if (dev_maps)
1147 kfree_rcu(dev_maps, rcu);
1148
1149 netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id :
1150 NUMA_NO_NODE);
1151
1152 mutex_unlock(&xps_map_mutex);
1153 1030
1154 free_cpumask_var(mask); 1031 free_cpumask_var(mask);
1155 return len;
1156 1032
1157error: 1033 return err ? : len;
1158 mutex_unlock(&xps_map_mutex);
1159
1160 if (new_dev_maps)
1161 for_each_possible_cpu(i)
1162 kfree(rcu_dereference_protected(
1163 new_dev_maps->cpu_map[i],
1164 1));
1165 kfree(new_dev_maps);
1166 free_cpumask_var(mask);
1167 return -ENOMEM;
1168} 1034}
1169 1035
1170static struct netdev_queue_attribute xps_cpus_attribute = 1036static struct netdev_queue_attribute xps_cpus_attribute =
@@ -1183,10 +1049,6 @@ static void netdev_queue_release(struct kobject *kobj)
1183{ 1049{
1184 struct netdev_queue *queue = to_netdev_queue(kobj); 1050 struct netdev_queue *queue = to_netdev_queue(kobj);
1185 1051
1186#ifdef CONFIG_XPS
1187 xps_queue_release(queue);
1188#endif
1189
1190 memset(kobj, 0, sizeof(*kobj)); 1052 memset(kobj, 0, sizeof(*kobj));
1191 dev_put(queue->dev); 1053 dev_put(queue->dev);
1192} 1054}
@@ -1334,7 +1196,6 @@ struct kobj_ns_type_operations net_ns_type_operations = {
1334}; 1196};
1335EXPORT_SYMBOL_GPL(net_ns_type_operations); 1197EXPORT_SYMBOL_GPL(net_ns_type_operations);
1336 1198
1337#ifdef CONFIG_HOTPLUG
1338static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) 1199static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1339{ 1200{
1340 struct net_device *dev = to_net_dev(d); 1201 struct net_device *dev = to_net_dev(d);
@@ -1353,7 +1214,6 @@ static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1353exit: 1214exit:
1354 return retval; 1215 return retval;
1355} 1216}
1356#endif
1357 1217
1358/* 1218/*
1359 * netdev_release -- destroy and free a dead device. 1219 * netdev_release -- destroy and free a dead device.
@@ -1382,9 +1242,7 @@ static struct class net_class = {
1382#ifdef CONFIG_SYSFS 1242#ifdef CONFIG_SYSFS
1383 .dev_attrs = net_class_attributes, 1243 .dev_attrs = net_class_attributes,
1384#endif /* CONFIG_SYSFS */ 1244#endif /* CONFIG_SYSFS */
1385#ifdef CONFIG_HOTPLUG
1386 .dev_uevent = netdev_uevent, 1245 .dev_uevent = netdev_uevent,
1387#endif
1388 .ns_type = &net_ns_type_operations, 1246 .ns_type = &net_ns_type_operations,
1389 .namespace = net_namespace, 1247 .namespace = net_namespace,
1390}; 1248};
@@ -1400,6 +1258,8 @@ void netdev_unregister_kobject(struct net_device * net)
1400 1258
1401 remove_queue_kobjects(net); 1259 remove_queue_kobjects(net);
1402 1260
1261 pm_runtime_set_memalloc_noio(dev, false);
1262
1403 device_del(dev); 1263 device_del(dev);
1404} 1264}
1405 1265
@@ -1444,6 +1304,8 @@ int netdev_register_kobject(struct net_device *net)
1444 return error; 1304 return error;
1445 } 1305 }
1446 1306
1307 pm_runtime_set_memalloc_noio(dev, true);
1308
1447 return error; 1309 return error;
1448} 1310}
1449 1311