aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/net-sysfs.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 00:04:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 00:04:44 -0400
commitf8965467f366fd18f01feafb5db10512d7b4422c (patch)
tree3706a9cd779859271ca61b85c63a1bc3f82d626e /net/core/net-sysfs.c
parenta26272e5200765691e67d6780e52b32498fdb659 (diff)
parent2ec8c6bb5d8f3a62a79f463525054bae1e3d4487 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1674 commits) qlcnic: adding co maintainer ixgbe: add support for active DA cables ixgbe: dcb, do not tag tc_prio_control frames ixgbe: fix ixgbe_tx_is_paused logic ixgbe: always enable vlan strip/insert when DCB is enabled ixgbe: remove some redundant code in setting FCoE FIP filter ixgbe: fix wrong offset to fc_frame_header in ixgbe_fcoe_ddp ixgbe: fix header len when unsplit packet overflows to data buffer ipv6: Never schedule DAD timer on dead address ipv6: Use POSTDAD state ipv6: Use state_lock to protect ifa state ipv6: Replace inet6_ifaddr->dead with state cxgb4: notify upper drivers if the device is already up when they load cxgb4: keep interrupts available when the ports are brought down cxgb4: fix initial addition of MAC address cnic: Return SPQ credit to bnx2x after ring setup and shutdown. cnic: Convert cnic_local_flags to atomic ops. can: Fix SJA1000 command register writes on SMP systems bridge: fix build for CONFIG_SYSFS disabled ARCNET: Limit com20020 PCI ID matches for SOHARD cards ... Fix up various conflicts with pcmcia tree drivers/net/ {pcmcia/3c589_cs.c, wireless/orinoco/orinoco_cs.c and wireless/orinoco/spectrum_cs.c} and feature removal (Documentation/feature-removal-schedule.txt). Also fix a non-content conflict due to pm_qos_requirement getting renamed in the PM tree (now pm_qos_request) in net/mac80211/scan.c
Diffstat (limited to 'net/core/net-sysfs.c')
-rw-r--r--net/core/net-sysfs.c318
1 files changed, 317 insertions, 1 deletions
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 59cfc7d8fc45..c57c4b228bb5 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -17,6 +17,7 @@
17#include <net/sock.h> 17#include <net/sock.h>
18#include <linux/rtnetlink.h> 18#include <linux/rtnetlink.h>
19#include <linux/wireless.h> 19#include <linux/wireless.h>
20#include <linux/vmalloc.h>
20#include <net/wext.h> 21#include <net/wext.h>
21 22
22#include "net-sysfs.h" 23#include "net-sysfs.h"
@@ -467,6 +468,304 @@ static struct attribute_group wireless_group = {
467}; 468};
468#endif 469#endif
469 470
471#ifdef CONFIG_RPS
472/*
473 * RX queue sysfs structures and functions.
474 */
475struct rx_queue_attribute {
476 struct attribute attr;
477 ssize_t (*show)(struct netdev_rx_queue *queue,
478 struct rx_queue_attribute *attr, char *buf);
479 ssize_t (*store)(struct netdev_rx_queue *queue,
480 struct rx_queue_attribute *attr, const char *buf, size_t len);
481};
482#define to_rx_queue_attr(_attr) container_of(_attr, \
483 struct rx_queue_attribute, attr)
484
485#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
486
487static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
488 char *buf)
489{
490 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
491 struct netdev_rx_queue *queue = to_rx_queue(kobj);
492
493 if (!attribute->show)
494 return -EIO;
495
496 return attribute->show(queue, attribute, buf);
497}
498
499static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
500 const char *buf, size_t count)
501{
502 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
503 struct netdev_rx_queue *queue = to_rx_queue(kobj);
504
505 if (!attribute->store)
506 return -EIO;
507
508 return attribute->store(queue, attribute, buf, count);
509}
510
511static struct sysfs_ops rx_queue_sysfs_ops = {
512 .show = rx_queue_attr_show,
513 .store = rx_queue_attr_store,
514};
515
516static ssize_t show_rps_map(struct netdev_rx_queue *queue,
517 struct rx_queue_attribute *attribute, char *buf)
518{
519 struct rps_map *map;
520 cpumask_var_t mask;
521 size_t len = 0;
522 int i;
523
524 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
525 return -ENOMEM;
526
527 rcu_read_lock();
528 map = rcu_dereference(queue->rps_map);
529 if (map)
530 for (i = 0; i < map->len; i++)
531 cpumask_set_cpu(map->cpus[i], mask);
532
533 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
534 if (PAGE_SIZE - len < 3) {
535 rcu_read_unlock();
536 free_cpumask_var(mask);
537 return -EINVAL;
538 }
539 rcu_read_unlock();
540
541 free_cpumask_var(mask);
542 len += sprintf(buf + len, "\n");
543 return len;
544}
545
546static void rps_map_release(struct rcu_head *rcu)
547{
548 struct rps_map *map = container_of(rcu, struct rps_map, rcu);
549
550 kfree(map);
551}
552
553static ssize_t store_rps_map(struct netdev_rx_queue *queue,
554 struct rx_queue_attribute *attribute,
555 const char *buf, size_t len)
556{
557 struct rps_map *old_map, *map;
558 cpumask_var_t mask;
559 int err, cpu, i;
560 static DEFINE_SPINLOCK(rps_map_lock);
561
562 if (!capable(CAP_NET_ADMIN))
563 return -EPERM;
564
565 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
566 return -ENOMEM;
567
568 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
569 if (err) {
570 free_cpumask_var(mask);
571 return err;
572 }
573
574 map = kzalloc(max_t(unsigned,
575 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
576 GFP_KERNEL);
577 if (!map) {
578 free_cpumask_var(mask);
579 return -ENOMEM;
580 }
581
582 i = 0;
583 for_each_cpu_and(cpu, mask, cpu_online_mask)
584 map->cpus[i++] = cpu;
585
586 if (i)
587 map->len = i;
588 else {
589 kfree(map);
590 map = NULL;
591 }
592
593 spin_lock(&rps_map_lock);
594 old_map = queue->rps_map;
595 rcu_assign_pointer(queue->rps_map, map);
596 spin_unlock(&rps_map_lock);
597
598 if (old_map)
599 call_rcu(&old_map->rcu, rps_map_release);
600
601 free_cpumask_var(mask);
602 return len;
603}
604
605static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
606 struct rx_queue_attribute *attr,
607 char *buf)
608{
609 struct rps_dev_flow_table *flow_table;
610 unsigned int val = 0;
611
612 rcu_read_lock();
613 flow_table = rcu_dereference(queue->rps_flow_table);
614 if (flow_table)
615 val = flow_table->mask + 1;
616 rcu_read_unlock();
617
618 return sprintf(buf, "%u\n", val);
619}
620
621static void rps_dev_flow_table_release_work(struct work_struct *work)
622{
623 struct rps_dev_flow_table *table = container_of(work,
624 struct rps_dev_flow_table, free_work);
625
626 vfree(table);
627}
628
629static void rps_dev_flow_table_release(struct rcu_head *rcu)
630{
631 struct rps_dev_flow_table *table = container_of(rcu,
632 struct rps_dev_flow_table, rcu);
633
634 INIT_WORK(&table->free_work, rps_dev_flow_table_release_work);
635 schedule_work(&table->free_work);
636}
637
638static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
639 struct rx_queue_attribute *attr,
640 const char *buf, size_t len)
641{
642 unsigned int count;
643 char *endp;
644 struct rps_dev_flow_table *table, *old_table;
645 static DEFINE_SPINLOCK(rps_dev_flow_lock);
646
647 if (!capable(CAP_NET_ADMIN))
648 return -EPERM;
649
650 count = simple_strtoul(buf, &endp, 0);
651 if (endp == buf)
652 return -EINVAL;
653
654 if (count) {
655 int i;
656
657 if (count > 1<<30) {
658 /* Enforce a limit to prevent overflow */
659 return -EINVAL;
660 }
661 count = roundup_pow_of_two(count);
662 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
663 if (!table)
664 return -ENOMEM;
665
666 table->mask = count - 1;
667 for (i = 0; i < count; i++)
668 table->flows[i].cpu = RPS_NO_CPU;
669 } else
670 table = NULL;
671
672 spin_lock(&rps_dev_flow_lock);
673 old_table = queue->rps_flow_table;
674 rcu_assign_pointer(queue->rps_flow_table, table);
675 spin_unlock(&rps_dev_flow_lock);
676
677 if (old_table)
678 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
679
680 return len;
681}
682
683static struct rx_queue_attribute rps_cpus_attribute =
684 __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
685
686
687static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
688 __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
689 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
690
691static struct attribute *rx_queue_default_attrs[] = {
692 &rps_cpus_attribute.attr,
693 &rps_dev_flow_table_cnt_attribute.attr,
694 NULL
695};
696
697static void rx_queue_release(struct kobject *kobj)
698{
699 struct netdev_rx_queue *queue = to_rx_queue(kobj);
700 struct netdev_rx_queue *first = queue->first;
701
702 if (queue->rps_map)
703 call_rcu(&queue->rps_map->rcu, rps_map_release);
704
705 if (queue->rps_flow_table)
706 call_rcu(&queue->rps_flow_table->rcu,
707 rps_dev_flow_table_release);
708
709 if (atomic_dec_and_test(&first->count))
710 kfree(first);
711}
712
713static struct kobj_type rx_queue_ktype = {
714 .sysfs_ops = &rx_queue_sysfs_ops,
715 .release = rx_queue_release,
716 .default_attrs = rx_queue_default_attrs,
717};
718
719static int rx_queue_add_kobject(struct net_device *net, int index)
720{
721 struct netdev_rx_queue *queue = net->_rx + index;
722 struct kobject *kobj = &queue->kobj;
723 int error = 0;
724
725 kobj->kset = net->queues_kset;
726 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
727 "rx-%u", index);
728 if (error) {
729 kobject_put(kobj);
730 return error;
731 }
732
733 kobject_uevent(kobj, KOBJ_ADD);
734
735 return error;
736}
737
738static int rx_queue_register_kobjects(struct net_device *net)
739{
740 int i;
741 int error = 0;
742
743 net->queues_kset = kset_create_and_add("queues",
744 NULL, &net->dev.kobj);
745 if (!net->queues_kset)
746 return -ENOMEM;
747 for (i = 0; i < net->num_rx_queues; i++) {
748 error = rx_queue_add_kobject(net, i);
749 if (error)
750 break;
751 }
752
753 if (error)
754 while (--i >= 0)
755 kobject_put(&net->_rx[i].kobj);
756
757 return error;
758}
759
760static void rx_queue_remove_kobjects(struct net_device *net)
761{
762 int i;
763
764 for (i = 0; i < net->num_rx_queues; i++)
765 kobject_put(&net->_rx[i].kobj);
766 kset_unregister(net->queues_kset);
767}
768#endif /* CONFIG_RPS */
470#endif /* CONFIG_SYSFS */ 769#endif /* CONFIG_SYSFS */
471 770
472#ifdef CONFIG_HOTPLUG 771#ifdef CONFIG_HOTPLUG
@@ -530,6 +829,10 @@ void netdev_unregister_kobject(struct net_device * net)
530 if (!net_eq(dev_net(net), &init_net)) 829 if (!net_eq(dev_net(net), &init_net))
531 return; 830 return;
532 831
832#ifdef CONFIG_RPS
833 rx_queue_remove_kobjects(net);
834#endif
835
533 device_del(dev); 836 device_del(dev);
534} 837}
535 838
@@ -538,6 +841,7 @@ int netdev_register_kobject(struct net_device *net)
538{ 841{
539 struct device *dev = &(net->dev); 842 struct device *dev = &(net->dev);
540 const struct attribute_group **groups = net->sysfs_groups; 843 const struct attribute_group **groups = net->sysfs_groups;
844 int error = 0;
541 845
542 dev->class = &net_class; 846 dev->class = &net_class;
543 dev->platform_data = net; 847 dev->platform_data = net;
@@ -564,7 +868,19 @@ int netdev_register_kobject(struct net_device *net)
564 if (!net_eq(dev_net(net), &init_net)) 868 if (!net_eq(dev_net(net), &init_net))
565 return 0; 869 return 0;
566 870
567 return device_add(dev); 871 error = device_add(dev);
872 if (error)
873 return error;
874
875#ifdef CONFIG_RPS
876 error = rx_queue_register_kobjects(net);
877 if (error) {
878 device_del(dev);
879 return error;
880 }
881#endif
882
883 return error;
568} 884}
569 885
570int netdev_class_create_file(struct class_attribute *class_attr) 886int netdev_class_create_file(struct class_attribute *class_attr)