diff options
author | stephen hemminger <stephen@networkplumber.org> | 2017-08-18 16:46:28 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-08-19 01:38:47 -0400 |
commit | 6648c65e7ea72c3b19ea908d046e4a47e90fd907 (patch) | |
tree | f4423395dcffa85449e328be72586aaff3d3d10b | |
parent | 667e427bc356a43e130cfc03ea4273603487cc69 (diff) |
net: style cleanups
Make code closer to current style. Mostly whitespace changes.
Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/core/net-sysfs.c | 68 |
1 files changed, 36 insertions, 32 deletions
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index ca82c4a72350..927a6dcbad96 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -97,7 +97,8 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, | |||
97 | return restart_syscall(); | 97 | return restart_syscall(); |
98 | 98 | ||
99 | if (dev_isalive(netdev)) { | 99 | if (dev_isalive(netdev)) { |
100 | if ((ret = (*set)(netdev, new)) == 0) | 100 | ret = (*set)(netdev, new); |
101 | if (ret == 0) | ||
101 | ret = len; | 102 | ret = len; |
102 | } | 103 | } |
103 | rtnl_unlock(); | 104 | rtnl_unlock(); |
@@ -160,6 +161,7 @@ static ssize_t broadcast_show(struct device *dev, | |||
160 | struct device_attribute *attr, char *buf) | 161 | struct device_attribute *attr, char *buf) |
161 | { | 162 | { |
162 | struct net_device *ndev = to_net_dev(dev); | 163 | struct net_device *ndev = to_net_dev(dev); |
164 | |||
163 | if (dev_isalive(ndev)) | 165 | if (dev_isalive(ndev)) |
164 | return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len); | 166 | return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len); |
165 | return -EINVAL; | 167 | return -EINVAL; |
@@ -170,7 +172,7 @@ static int change_carrier(struct net_device *dev, unsigned long new_carrier) | |||
170 | { | 172 | { |
171 | if (!netif_running(dev)) | 173 | if (!netif_running(dev)) |
172 | return -EINVAL; | 174 | return -EINVAL; |
173 | return dev_change_carrier(dev, (bool) new_carrier); | 175 | return dev_change_carrier(dev, (bool)new_carrier); |
174 | } | 176 | } |
175 | 177 | ||
176 | static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, | 178 | static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, |
@@ -183,9 +185,10 @@ static ssize_t carrier_show(struct device *dev, | |||
183 | struct device_attribute *attr, char *buf) | 185 | struct device_attribute *attr, char *buf) |
184 | { | 186 | { |
185 | struct net_device *netdev = to_net_dev(dev); | 187 | struct net_device *netdev = to_net_dev(dev); |
186 | if (netif_running(netdev)) { | 188 | |
189 | if (netif_running(netdev)) | ||
187 | return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev)); | 190 | return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev)); |
188 | } | 191 | |
189 | return -EINVAL; | 192 | return -EINVAL; |
190 | } | 193 | } |
191 | static DEVICE_ATTR_RW(carrier); | 194 | static DEVICE_ATTR_RW(carrier); |
@@ -290,6 +293,7 @@ static ssize_t carrier_changes_show(struct device *dev, | |||
290 | char *buf) | 293 | char *buf) |
291 | { | 294 | { |
292 | struct net_device *netdev = to_net_dev(dev); | 295 | struct net_device *netdev = to_net_dev(dev); |
296 | |||
293 | return sprintf(buf, fmt_dec, | 297 | return sprintf(buf, fmt_dec, |
294 | atomic_read(&netdev->carrier_changes)); | 298 | atomic_read(&netdev->carrier_changes)); |
295 | } | 299 | } |
@@ -299,7 +303,7 @@ static DEVICE_ATTR_RO(carrier_changes); | |||
299 | 303 | ||
300 | static int change_mtu(struct net_device *dev, unsigned long new_mtu) | 304 | static int change_mtu(struct net_device *dev, unsigned long new_mtu) |
301 | { | 305 | { |
302 | return dev_set_mtu(dev, (int) new_mtu); | 306 | return dev_set_mtu(dev, (int)new_mtu); |
303 | } | 307 | } |
304 | 308 | ||
305 | static ssize_t mtu_store(struct device *dev, struct device_attribute *attr, | 309 | static ssize_t mtu_store(struct device *dev, struct device_attribute *attr, |
@@ -311,7 +315,7 @@ NETDEVICE_SHOW_RW(mtu, fmt_dec); | |||
311 | 315 | ||
312 | static int change_flags(struct net_device *dev, unsigned long new_flags) | 316 | static int change_flags(struct net_device *dev, unsigned long new_flags) |
313 | { | 317 | { |
314 | return dev_change_flags(dev, (unsigned int) new_flags); | 318 | return dev_change_flags(dev, (unsigned int)new_flags); |
315 | } | 319 | } |
316 | 320 | ||
317 | static ssize_t flags_store(struct device *dev, struct device_attribute *attr, | 321 | static ssize_t flags_store(struct device *dev, struct device_attribute *attr, |
@@ -362,8 +366,8 @@ static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) | |||
362 | } | 366 | } |
363 | 367 | ||
364 | static ssize_t gro_flush_timeout_store(struct device *dev, | 368 | static ssize_t gro_flush_timeout_store(struct device *dev, |
365 | struct device_attribute *attr, | 369 | struct device_attribute *attr, |
366 | const char *buf, size_t len) | 370 | const char *buf, size_t len) |
367 | { | 371 | { |
368 | if (!capable(CAP_NET_ADMIN)) | 372 | if (!capable(CAP_NET_ADMIN)) |
369 | return -EPERM; | 373 | return -EPERM; |
@@ -412,7 +416,7 @@ static DEVICE_ATTR_RW(ifalias); | |||
412 | 416 | ||
413 | static int change_group(struct net_device *dev, unsigned long new_group) | 417 | static int change_group(struct net_device *dev, unsigned long new_group) |
414 | { | 418 | { |
415 | dev_set_group(dev, (int) new_group); | 419 | dev_set_group(dev, (int)new_group); |
416 | return 0; | 420 | return 0; |
417 | } | 421 | } |
418 | 422 | ||
@@ -426,7 +430,7 @@ static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store); | |||
426 | 430 | ||
427 | static int change_proto_down(struct net_device *dev, unsigned long proto_down) | 431 | static int change_proto_down(struct net_device *dev, unsigned long proto_down) |
428 | { | 432 | { |
429 | return dev_change_proto_down(dev, (bool) proto_down); | 433 | return dev_change_proto_down(dev, (bool)proto_down); |
430 | } | 434 | } |
431 | 435 | ||
432 | static ssize_t proto_down_store(struct device *dev, | 436 | static ssize_t proto_down_store(struct device *dev, |
@@ -549,14 +553,14 @@ static ssize_t netstat_show(const struct device *d, | |||
549 | ssize_t ret = -EINVAL; | 553 | ssize_t ret = -EINVAL; |
550 | 554 | ||
551 | WARN_ON(offset > sizeof(struct rtnl_link_stats64) || | 555 | WARN_ON(offset > sizeof(struct rtnl_link_stats64) || |
552 | offset % sizeof(u64) != 0); | 556 | offset % sizeof(u64) != 0); |
553 | 557 | ||
554 | read_lock(&dev_base_lock); | 558 | read_lock(&dev_base_lock); |
555 | if (dev_isalive(dev)) { | 559 | if (dev_isalive(dev)) { |
556 | struct rtnl_link_stats64 temp; | 560 | struct rtnl_link_stats64 temp; |
557 | const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); | 561 | const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); |
558 | 562 | ||
559 | ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset)); | 563 | ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset)); |
560 | } | 564 | } |
561 | read_unlock(&dev_base_lock); | 565 | read_unlock(&dev_base_lock); |
562 | return ret; | 566 | return ret; |
@@ -565,7 +569,7 @@ static ssize_t netstat_show(const struct device *d, | |||
565 | /* generate a read-only statistics attribute */ | 569 | /* generate a read-only statistics attribute */ |
566 | #define NETSTAT_ENTRY(name) \ | 570 | #define NETSTAT_ENTRY(name) \ |
567 | static ssize_t name##_show(struct device *d, \ | 571 | static ssize_t name##_show(struct device *d, \ |
568 | struct device_attribute *attr, char *buf) \ | 572 | struct device_attribute *attr, char *buf) \ |
569 | { \ | 573 | { \ |
570 | return netstat_show(d, attr, buf, \ | 574 | return netstat_show(d, attr, buf, \ |
571 | offsetof(struct rtnl_link_stats64, name)); \ | 575 | offsetof(struct rtnl_link_stats64, name)); \ |
@@ -625,7 +629,6 @@ static struct attribute *netstat_attrs[] __ro_after_init = { | |||
625 | NULL | 629 | NULL |
626 | }; | 630 | }; |
627 | 631 | ||
628 | |||
629 | static const struct attribute_group netstat_group = { | 632 | static const struct attribute_group netstat_group = { |
630 | .name = "statistics", | 633 | .name = "statistics", |
631 | .attrs = netstat_attrs, | 634 | .attrs = netstat_attrs, |
@@ -647,8 +650,8 @@ static const struct attribute_group wireless_group = { | |||
647 | #endif /* CONFIG_SYSFS */ | 650 | #endif /* CONFIG_SYSFS */ |
648 | 651 | ||
649 | #ifdef CONFIG_SYSFS | 652 | #ifdef CONFIG_SYSFS |
650 | #define to_rx_queue_attr(_attr) container_of(_attr, \ | 653 | #define to_rx_queue_attr(_attr) \ |
651 | struct rx_queue_attribute, attr) | 654 | container_of(_attr, struct rx_queue_attribute, attr) |
652 | 655 | ||
653 | #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) | 656 | #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) |
654 | 657 | ||
@@ -725,8 +728,8 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, | |||
725 | } | 728 | } |
726 | 729 | ||
727 | map = kzalloc(max_t(unsigned int, | 730 | map = kzalloc(max_t(unsigned int, |
728 | RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), | 731 | RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), |
729 | GFP_KERNEL); | 732 | GFP_KERNEL); |
730 | if (!map) { | 733 | if (!map) { |
731 | free_cpumask_var(mask); | 734 | free_cpumask_var(mask); |
732 | return -ENOMEM; | 735 | return -ENOMEM; |
@@ -736,9 +739,9 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, | |||
736 | for_each_cpu_and(cpu, mask, cpu_online_mask) | 739 | for_each_cpu_and(cpu, mask, cpu_online_mask) |
737 | map->cpus[i++] = cpu; | 740 | map->cpus[i++] = cpu; |
738 | 741 | ||
739 | if (i) | 742 | if (i) { |
740 | map->len = i; | 743 | map->len = i; |
741 | else { | 744 | } else { |
742 | kfree(map); | 745 | kfree(map); |
743 | map = NULL; | 746 | map = NULL; |
744 | } | 747 | } |
@@ -827,8 +830,9 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, | |||
827 | table->mask = mask; | 830 | table->mask = mask; |
828 | for (count = 0; count <= mask; count++) | 831 | for (count = 0; count <= mask; count++) |
829 | table->flows[count].cpu = RPS_NO_CPU; | 832 | table->flows[count].cpu = RPS_NO_CPU; |
830 | } else | 833 | } else { |
831 | table = NULL; | 834 | table = NULL; |
835 | } | ||
832 | 836 | ||
833 | spin_lock(&rps_dev_flow_lock); | 837 | spin_lock(&rps_dev_flow_lock); |
834 | old_table = rcu_dereference_protected(queue->rps_flow_table, | 838 | old_table = rcu_dereference_protected(queue->rps_flow_table, |
@@ -865,7 +869,6 @@ static void rx_queue_release(struct kobject *kobj) | |||
865 | struct rps_map *map; | 869 | struct rps_map *map; |
866 | struct rps_dev_flow_table *flow_table; | 870 | struct rps_dev_flow_table *flow_table; |
867 | 871 | ||
868 | |||
869 | map = rcu_dereference_protected(queue->rps_map, 1); | 872 | map = rcu_dereference_protected(queue->rps_map, 1); |
870 | if (map) { | 873 | if (map) { |
871 | RCU_INIT_POINTER(queue->rps_map, NULL); | 874 | RCU_INIT_POINTER(queue->rps_map, NULL); |
@@ -910,7 +913,7 @@ static int rx_queue_add_kobject(struct net_device *dev, int index) | |||
910 | 913 | ||
911 | kobj->kset = dev->queues_kset; | 914 | kobj->kset = dev->queues_kset; |
912 | error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, | 915 | error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, |
913 | "rx-%u", index); | 916 | "rx-%u", index); |
914 | if (error) | 917 | if (error) |
915 | return error; | 918 | return error; |
916 | 919 | ||
@@ -974,8 +977,8 @@ struct netdev_queue_attribute { | |||
974 | ssize_t (*store)(struct netdev_queue *queue, | 977 | ssize_t (*store)(struct netdev_queue *queue, |
975 | const char *buf, size_t len); | 978 | const char *buf, size_t len); |
976 | }; | 979 | }; |
977 | #define to_netdev_queue_attr(_attr) container_of(_attr, \ | 980 | #define to_netdev_queue_attr(_attr) \ |
978 | struct netdev_queue_attribute, attr) | 981 | container_of(_attr, struct netdev_queue_attribute, attr) |
979 | 982 | ||
980 | #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj) | 983 | #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj) |
981 | 984 | ||
@@ -1104,9 +1107,9 @@ static ssize_t bql_set(const char *buf, const size_t count, | |||
1104 | unsigned int value; | 1107 | unsigned int value; |
1105 | int err; | 1108 | int err; |
1106 | 1109 | ||
1107 | if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) | 1110 | if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) { |
1108 | value = DQL_MAX_LIMIT; | 1111 | value = DQL_MAX_LIMIT; |
1109 | else { | 1112 | } else { |
1110 | err = kstrtouint(buf, 10, &value); | 1113 | err = kstrtouint(buf, 10, &value); |
1111 | if (err < 0) | 1114 | if (err < 0) |
1112 | return err; | 1115 | return err; |
@@ -1320,7 +1323,7 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index) | |||
1320 | 1323 | ||
1321 | kobj->kset = dev->queues_kset; | 1324 | kobj->kset = dev->queues_kset; |
1322 | error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, | 1325 | error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, |
1323 | "tx-%u", index); | 1326 | "tx-%u", index); |
1324 | if (error) | 1327 | if (error) |
1325 | return error; | 1328 | return error; |
1326 | 1329 | ||
@@ -1377,7 +1380,7 @@ static int register_queue_kobjects(struct net_device *dev) | |||
1377 | 1380 | ||
1378 | #ifdef CONFIG_SYSFS | 1381 | #ifdef CONFIG_SYSFS |
1379 | dev->queues_kset = kset_create_and_add("queues", | 1382 | dev->queues_kset = kset_create_and_add("queues", |
1380 | NULL, &dev->dev.kobj); | 1383 | NULL, &dev->dev.kobj); |
1381 | if (!dev->queues_kset) | 1384 | if (!dev->queues_kset) |
1382 | return -ENOMEM; | 1385 | return -ENOMEM; |
1383 | real_rx = dev->real_num_rx_queues; | 1386 | real_rx = dev->real_num_rx_queues; |
@@ -1467,7 +1470,8 @@ static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) | |||
1467 | 1470 | ||
1468 | /* pass ifindex to uevent. | 1471 | /* pass ifindex to uevent. |
1469 | * ifindex is useful as it won't change (interface name may change) | 1472 | * ifindex is useful as it won't change (interface name may change) |
1470 | * and is what RtNetlink uses natively. */ | 1473 | * and is what RtNetlink uses natively. |
1474 | */ | ||
1471 | retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex); | 1475 | retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex); |
1472 | 1476 | ||
1473 | exit: | 1477 | exit: |
@@ -1542,7 +1546,7 @@ EXPORT_SYMBOL(of_find_net_device_by_node); | |||
1542 | */ | 1546 | */ |
1543 | void netdev_unregister_kobject(struct net_device *ndev) | 1547 | void netdev_unregister_kobject(struct net_device *ndev) |
1544 | { | 1548 | { |
1545 | struct device *dev = &(ndev->dev); | 1549 | struct device *dev = &ndev->dev; |
1546 | 1550 | ||
1547 | if (!atomic_read(&dev_net(ndev)->count)) | 1551 | if (!atomic_read(&dev_net(ndev)->count)) |
1548 | dev_set_uevent_suppress(dev, 1); | 1552 | dev_set_uevent_suppress(dev, 1); |
@@ -1559,7 +1563,7 @@ void netdev_unregister_kobject(struct net_device *ndev) | |||
1559 | /* Create sysfs entries for network device. */ | 1563 | /* Create sysfs entries for network device. */ |
1560 | int netdev_register_kobject(struct net_device *ndev) | 1564 | int netdev_register_kobject(struct net_device *ndev) |
1561 | { | 1565 | { |
1562 | struct device *dev = &(ndev->dev); | 1566 | struct device *dev = &ndev->dev; |
1563 | const struct attribute_group **groups = ndev->sysfs_groups; | 1567 | const struct attribute_group **groups = ndev->sysfs_groups; |
1564 | int error = 0; | 1568 | int error = 0; |
1565 | 1569 | ||