diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-10-02 02:11:55 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-10-05 03:23:44 -0400 |
commit | 24824a09e35402b8d58dcc5be803a5ad3937bdba (patch) | |
tree | 65c5fa4046646623b130702c9abc92c485ec575b /net | |
parent | 0bd9e6a964d86a19f54a9ba31168a37d64e451d1 (diff) |
net: dynamic ingress_queue allocation
ingress being not used very much, and net_device->ingress_queue being
quite a big object (128 or 256 bytes), use a dynamic allocation if
needed (tc qdisc add dev eth0 ingress ...)
dev_ingress_queue(dev) helper should be used only with RTNL taken.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 34 | ||||
-rw-r--r-- | net/sched/sch_api.c | 42 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 12 |
3 files changed, 62 insertions, 26 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index a313bab1b754..ce6ad88c980b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2702,11 +2702,10 @@ EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); | |||
2702 | * the ingress scheduler, you just cant add policies on ingress. | 2702 | * the ingress scheduler, you just cant add policies on ingress. |
2703 | * | 2703 | * |
2704 | */ | 2704 | */ |
2705 | static int ing_filter(struct sk_buff *skb) | 2705 | static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) |
2706 | { | 2706 | { |
2707 | struct net_device *dev = skb->dev; | 2707 | struct net_device *dev = skb->dev; |
2708 | u32 ttl = G_TC_RTTL(skb->tc_verd); | 2708 | u32 ttl = G_TC_RTTL(skb->tc_verd); |
2709 | struct netdev_queue *rxq; | ||
2710 | int result = TC_ACT_OK; | 2709 | int result = TC_ACT_OK; |
2711 | struct Qdisc *q; | 2710 | struct Qdisc *q; |
2712 | 2711 | ||
@@ -2720,8 +2719,6 @@ static int ing_filter(struct sk_buff *skb) | |||
2720 | skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); | 2719 | skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); |
2721 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); | 2720 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); |
2722 | 2721 | ||
2723 | rxq = &dev->ingress_queue; | ||
2724 | |||
2725 | q = rxq->qdisc; | 2722 | q = rxq->qdisc; |
2726 | if (q != &noop_qdisc) { | 2723 | if (q != &noop_qdisc) { |
2727 | spin_lock(qdisc_lock(q)); | 2724 | spin_lock(qdisc_lock(q)); |
@@ -2737,7 +2734,9 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb, | |||
2737 | struct packet_type **pt_prev, | 2734 | struct packet_type **pt_prev, |
2738 | int *ret, struct net_device *orig_dev) | 2735 | int *ret, struct net_device *orig_dev) |
2739 | { | 2736 | { |
2740 | if (skb->dev->ingress_queue.qdisc == &noop_qdisc) | 2737 | struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); |
2738 | |||
2739 | if (!rxq || rxq->qdisc == &noop_qdisc) | ||
2741 | goto out; | 2740 | goto out; |
2742 | 2741 | ||
2743 | if (*pt_prev) { | 2742 | if (*pt_prev) { |
@@ -2745,7 +2744,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb, | |||
2745 | *pt_prev = NULL; | 2744 | *pt_prev = NULL; |
2746 | } | 2745 | } |
2747 | 2746 | ||
2748 | switch (ing_filter(skb)) { | 2747 | switch (ing_filter(skb, rxq)) { |
2749 | case TC_ACT_SHOT: | 2748 | case TC_ACT_SHOT: |
2750 | case TC_ACT_STOLEN: | 2749 | case TC_ACT_STOLEN: |
2751 | kfree_skb(skb); | 2750 | kfree_skb(skb); |
@@ -4940,7 +4939,6 @@ static void __netdev_init_queue_locks_one(struct net_device *dev, | |||
4940 | static void netdev_init_queue_locks(struct net_device *dev) | 4939 | static void netdev_init_queue_locks(struct net_device *dev) |
4941 | { | 4940 | { |
4942 | netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL); | 4941 | netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL); |
4943 | __netdev_init_queue_locks_one(dev, &dev->ingress_queue, NULL); | ||
4944 | } | 4942 | } |
4945 | 4943 | ||
4946 | unsigned long netdev_fix_features(unsigned long features, const char *name) | 4944 | unsigned long netdev_fix_features(unsigned long features, const char *name) |
@@ -5452,11 +5450,29 @@ static void netdev_init_one_queue(struct net_device *dev, | |||
5452 | 5450 | ||
5453 | static void netdev_init_queues(struct net_device *dev) | 5451 | static void netdev_init_queues(struct net_device *dev) |
5454 | { | 5452 | { |
5455 | netdev_init_one_queue(dev, &dev->ingress_queue, NULL); | ||
5456 | netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); | 5453 | netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); |
5457 | spin_lock_init(&dev->tx_global_lock); | 5454 | spin_lock_init(&dev->tx_global_lock); |
5458 | } | 5455 | } |
5459 | 5456 | ||
5457 | struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) | ||
5458 | { | ||
5459 | struct netdev_queue *queue = dev_ingress_queue(dev); | ||
5460 | |||
5461 | #ifdef CONFIG_NET_CLS_ACT | ||
5462 | if (queue) | ||
5463 | return queue; | ||
5464 | queue = kzalloc(sizeof(*queue), GFP_KERNEL); | ||
5465 | if (!queue) | ||
5466 | return NULL; | ||
5467 | netdev_init_one_queue(dev, queue, NULL); | ||
5468 | __netdev_init_queue_locks_one(dev, queue, NULL); | ||
5469 | queue->qdisc = &noop_qdisc; | ||
5470 | queue->qdisc_sleeping = &noop_qdisc; | ||
5471 | rcu_assign_pointer(dev->ingress_queue, queue); | ||
5472 | #endif | ||
5473 | return queue; | ||
5474 | } | ||
5475 | |||
5460 | /** | 5476 | /** |
5461 | * alloc_netdev_mq - allocate network device | 5477 | * alloc_netdev_mq - allocate network device |
5462 | * @sizeof_priv: size of private data to allocate space for | 5478 | * @sizeof_priv: size of private data to allocate space for |
@@ -5559,6 +5575,8 @@ void free_netdev(struct net_device *dev) | |||
5559 | 5575 | ||
5560 | kfree(dev->_tx); | 5576 | kfree(dev->_tx); |
5561 | 5577 | ||
5578 | kfree(rcu_dereference_raw(dev->ingress_queue)); | ||
5579 | |||
5562 | /* Flush device addresses */ | 5580 | /* Flush device addresses */ |
5563 | dev_addr_flush(dev); | 5581 | dev_addr_flush(dev); |
5564 | 5582 | ||
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index b8020784d0e9..b22ca2d1cebc 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -240,7 +240,10 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) | |||
240 | if (q) | 240 | if (q) |
241 | goto out; | 241 | goto out; |
242 | 242 | ||
243 | q = qdisc_match_from_root(dev->ingress_queue.qdisc_sleeping, handle); | 243 | if (dev_ingress_queue(dev)) |
244 | q = qdisc_match_from_root( | ||
245 | dev_ingress_queue(dev)->qdisc_sleeping, | ||
246 | handle); | ||
244 | out: | 247 | out: |
245 | return q; | 248 | return q; |
246 | } | 249 | } |
@@ -690,6 +693,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
690 | (new && new->flags & TCQ_F_INGRESS)) { | 693 | (new && new->flags & TCQ_F_INGRESS)) { |
691 | num_q = 1; | 694 | num_q = 1; |
692 | ingress = 1; | 695 | ingress = 1; |
696 | if (!dev_ingress_queue(dev)) | ||
697 | return -ENOENT; | ||
693 | } | 698 | } |
694 | 699 | ||
695 | if (dev->flags & IFF_UP) | 700 | if (dev->flags & IFF_UP) |
@@ -701,7 +706,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
701 | } | 706 | } |
702 | 707 | ||
703 | for (i = 0; i < num_q; i++) { | 708 | for (i = 0; i < num_q; i++) { |
704 | struct netdev_queue *dev_queue = &dev->ingress_queue; | 709 | struct netdev_queue *dev_queue = dev_ingress_queue(dev); |
705 | 710 | ||
706 | if (!ingress) | 711 | if (!ingress) |
707 | dev_queue = netdev_get_tx_queue(dev, i); | 712 | dev_queue = netdev_get_tx_queue(dev, i); |
@@ -979,7 +984,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
979 | return -ENOENT; | 984 | return -ENOENT; |
980 | q = qdisc_leaf(p, clid); | 985 | q = qdisc_leaf(p, clid); |
981 | } else { /* ingress */ | 986 | } else { /* ingress */ |
982 | q = dev->ingress_queue.qdisc_sleeping; | 987 | if (dev_ingress_queue(dev)) |
988 | q = dev_ingress_queue(dev)->qdisc_sleeping; | ||
983 | } | 989 | } |
984 | } else { | 990 | } else { |
985 | q = dev->qdisc; | 991 | q = dev->qdisc; |
@@ -1043,8 +1049,9 @@ replay: | |||
1043 | if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL) | 1049 | if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL) |
1044 | return -ENOENT; | 1050 | return -ENOENT; |
1045 | q = qdisc_leaf(p, clid); | 1051 | q = qdisc_leaf(p, clid); |
1046 | } else { /*ingress */ | 1052 | } else { /* ingress */ |
1047 | q = dev->ingress_queue.qdisc_sleeping; | 1053 | if (dev_ingress_queue_create(dev)) |
1054 | q = dev_ingress_queue(dev)->qdisc_sleeping; | ||
1048 | } | 1055 | } |
1049 | } else { | 1056 | } else { |
1050 | q = dev->qdisc; | 1057 | q = dev->qdisc; |
@@ -1123,11 +1130,14 @@ replay: | |||
1123 | create_n_graft: | 1130 | create_n_graft: |
1124 | if (!(n->nlmsg_flags&NLM_F_CREATE)) | 1131 | if (!(n->nlmsg_flags&NLM_F_CREATE)) |
1125 | return -ENOENT; | 1132 | return -ENOENT; |
1126 | if (clid == TC_H_INGRESS) | 1133 | if (clid == TC_H_INGRESS) { |
1127 | q = qdisc_create(dev, &dev->ingress_queue, p, | 1134 | if (dev_ingress_queue(dev)) |
1128 | tcm->tcm_parent, tcm->tcm_parent, | 1135 | q = qdisc_create(dev, dev_ingress_queue(dev), p, |
1129 | tca, &err); | 1136 | tcm->tcm_parent, tcm->tcm_parent, |
1130 | else { | 1137 | tca, &err); |
1138 | else | ||
1139 | err = -ENOENT; | ||
1140 | } else { | ||
1131 | struct netdev_queue *dev_queue; | 1141 | struct netdev_queue *dev_queue; |
1132 | 1142 | ||
1133 | if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue) | 1143 | if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue) |
@@ -1304,8 +1314,10 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) | |||
1304 | if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0) | 1314 | if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0) |
1305 | goto done; | 1315 | goto done; |
1306 | 1316 | ||
1307 | dev_queue = &dev->ingress_queue; | 1317 | dev_queue = dev_ingress_queue(dev); |
1308 | if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0) | 1318 | if (dev_queue && |
1319 | tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, | ||
1320 | &q_idx, s_q_idx) < 0) | ||
1309 | goto done; | 1321 | goto done; |
1310 | 1322 | ||
1311 | cont: | 1323 | cont: |
@@ -1595,8 +1607,10 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) | |||
1595 | if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0) | 1607 | if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0) |
1596 | goto done; | 1608 | goto done; |
1597 | 1609 | ||
1598 | dev_queue = &dev->ingress_queue; | 1610 | dev_queue = dev_ingress_queue(dev); |
1599 | if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0) | 1611 | if (dev_queue && |
1612 | tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, | ||
1613 | &t, s_t) < 0) | ||
1600 | goto done; | 1614 | goto done; |
1601 | 1615 | ||
1602 | done: | 1616 | done: |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 545278a1c478..3d57681bdb76 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -753,7 +753,8 @@ void dev_activate(struct net_device *dev) | |||
753 | 753 | ||
754 | need_watchdog = 0; | 754 | need_watchdog = 0; |
755 | netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); | 755 | netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); |
756 | transition_one_qdisc(dev, &dev->ingress_queue, NULL); | 756 | if (dev_ingress_queue(dev)) |
757 | transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); | ||
757 | 758 | ||
758 | if (need_watchdog) { | 759 | if (need_watchdog) { |
759 | dev->trans_start = jiffies; | 760 | dev->trans_start = jiffies; |
@@ -812,7 +813,8 @@ static bool some_qdisc_is_busy(struct net_device *dev) | |||
812 | void dev_deactivate(struct net_device *dev) | 813 | void dev_deactivate(struct net_device *dev) |
813 | { | 814 | { |
814 | netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); | 815 | netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); |
815 | dev_deactivate_queue(dev, &dev->ingress_queue, &noop_qdisc); | 816 | if (dev_ingress_queue(dev)) |
817 | dev_deactivate_queue(dev, dev_ingress_queue(dev), &noop_qdisc); | ||
816 | 818 | ||
817 | dev_watchdog_down(dev); | 819 | dev_watchdog_down(dev); |
818 | 820 | ||
@@ -838,7 +840,8 @@ void dev_init_scheduler(struct net_device *dev) | |||
838 | { | 840 | { |
839 | dev->qdisc = &noop_qdisc; | 841 | dev->qdisc = &noop_qdisc; |
840 | netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); | 842 | netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); |
841 | dev_init_scheduler_queue(dev, &dev->ingress_queue, &noop_qdisc); | 843 | if (dev_ingress_queue(dev)) |
844 | dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); | ||
842 | 845 | ||
843 | setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); | 846 | setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); |
844 | } | 847 | } |
@@ -861,7 +864,8 @@ static void shutdown_scheduler_queue(struct net_device *dev, | |||
861 | void dev_shutdown(struct net_device *dev) | 864 | void dev_shutdown(struct net_device *dev) |
862 | { | 865 | { |
863 | netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); | 866 | netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); |
864 | shutdown_scheduler_queue(dev, &dev->ingress_queue, &noop_qdisc); | 867 | if (dev_ingress_queue(dev)) |
868 | shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); | ||
865 | qdisc_destroy(dev->qdisc); | 869 | qdisc_destroy(dev->qdisc); |
866 | dev->qdisc = &noop_qdisc; | 870 | dev->qdisc = &noop_qdisc; |
867 | 871 | ||