aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-10-02 02:11:55 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-05 03:23:44 -0400
commit24824a09e35402b8d58dcc5be803a5ad3937bdba (patch)
tree65c5fa4046646623b130702c9abc92c485ec575b /net/core
parent0bd9e6a964d86a19f54a9ba31168a37d64e451d1 (diff)
net: dynamic ingress_queue allocation
ingress being not used very much, and net_device->ingress_queue being quite a big object (128 or 256 bytes), use a dynamic allocation if needed (tc qdisc add dev eth0 ingress ...) dev_ingress_queue(dev) helper should be used only with RTNL taken. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c34
1 files changed, 26 insertions, 8 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index a313bab1b75..ce6ad88c980 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2702,11 +2702,10 @@ EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
2702 * the ingress scheduler, you just cant add policies on ingress. 2702 * the ingress scheduler, you just cant add policies on ingress.
2703 * 2703 *
2704 */ 2704 */
2705static int ing_filter(struct sk_buff *skb) 2705static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
2706{ 2706{
2707 struct net_device *dev = skb->dev; 2707 struct net_device *dev = skb->dev;
2708 u32 ttl = G_TC_RTTL(skb->tc_verd); 2708 u32 ttl = G_TC_RTTL(skb->tc_verd);
2709 struct netdev_queue *rxq;
2710 int result = TC_ACT_OK; 2709 int result = TC_ACT_OK;
2711 struct Qdisc *q; 2710 struct Qdisc *q;
2712 2711
@@ -2720,8 +2719,6 @@ static int ing_filter(struct sk_buff *skb)
2720 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); 2719 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2721 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); 2720 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2722 2721
2723 rxq = &dev->ingress_queue;
2724
2725 q = rxq->qdisc; 2722 q = rxq->qdisc;
2726 if (q != &noop_qdisc) { 2723 if (q != &noop_qdisc) {
2727 spin_lock(qdisc_lock(q)); 2724 spin_lock(qdisc_lock(q));
@@ -2737,7 +2734,9 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2737 struct packet_type **pt_prev, 2734 struct packet_type **pt_prev,
2738 int *ret, struct net_device *orig_dev) 2735 int *ret, struct net_device *orig_dev)
2739{ 2736{
2740 if (skb->dev->ingress_queue.qdisc == &noop_qdisc) 2737 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
2738
2739 if (!rxq || rxq->qdisc == &noop_qdisc)
2741 goto out; 2740 goto out;
2742 2741
2743 if (*pt_prev) { 2742 if (*pt_prev) {
@@ -2745,7 +2744,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2745 *pt_prev = NULL; 2744 *pt_prev = NULL;
2746 } 2745 }
2747 2746
2748 switch (ing_filter(skb)) { 2747 switch (ing_filter(skb, rxq)) {
2749 case TC_ACT_SHOT: 2748 case TC_ACT_SHOT:
2750 case TC_ACT_STOLEN: 2749 case TC_ACT_STOLEN:
2751 kfree_skb(skb); 2750 kfree_skb(skb);
@@ -4940,7 +4939,6 @@ static void __netdev_init_queue_locks_one(struct net_device *dev,
4940static void netdev_init_queue_locks(struct net_device *dev) 4939static void netdev_init_queue_locks(struct net_device *dev)
4941{ 4940{
4942 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL); 4941 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4943 __netdev_init_queue_locks_one(dev, &dev->ingress_queue, NULL);
4944} 4942}
4945 4943
4946unsigned long netdev_fix_features(unsigned long features, const char *name) 4944unsigned long netdev_fix_features(unsigned long features, const char *name)
@@ -5452,11 +5450,29 @@ static void netdev_init_one_queue(struct net_device *dev,
5452 5450
5453static void netdev_init_queues(struct net_device *dev) 5451static void netdev_init_queues(struct net_device *dev)
5454{ 5452{
5455 netdev_init_one_queue(dev, &dev->ingress_queue, NULL);
5456 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 5453 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5457 spin_lock_init(&dev->tx_global_lock); 5454 spin_lock_init(&dev->tx_global_lock);
5458} 5455}
5459 5456
5457struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5458{
5459 struct netdev_queue *queue = dev_ingress_queue(dev);
5460
5461#ifdef CONFIG_NET_CLS_ACT
5462 if (queue)
5463 return queue;
5464 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5465 if (!queue)
5466 return NULL;
5467 netdev_init_one_queue(dev, queue, NULL);
5468 __netdev_init_queue_locks_one(dev, queue, NULL);
5469 queue->qdisc = &noop_qdisc;
5470 queue->qdisc_sleeping = &noop_qdisc;
5471 rcu_assign_pointer(dev->ingress_queue, queue);
5472#endif
5473 return queue;
5474}
5475
5460/** 5476/**
5461 * alloc_netdev_mq - allocate network device 5477 * alloc_netdev_mq - allocate network device
5462 * @sizeof_priv: size of private data to allocate space for 5478 * @sizeof_priv: size of private data to allocate space for
@@ -5559,6 +5575,8 @@ void free_netdev(struct net_device *dev)
5559 5575
5560 kfree(dev->_tx); 5576 kfree(dev->_tx);
5561 5577
5578 kfree(rcu_dereference_raw(dev->ingress_queue));
5579
5562 /* Flush device addresses */ 5580 /* Flush device addresses */
5563 dev_addr_flush(dev); 5581 dev_addr_flush(dev);
5564 5582