diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-08 20:33:13 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-08 20:33:13 -0400 |
commit | 555353cfa1aee293de445bfa6de43276138ddd82 (patch) | |
tree | b5daba85806b8e36731c4a474aac97f1a0140a51 /net | |
parent | dc2b48475a0a36f8b3bbb2da60d3a006dc5c2c84 (diff) |
netdev: The ingress_lock member is no longer needed.
Every qdisc is assosciated with a queue, and in the case of ingress
qdiscs that will now be netdev->rx_queue so using that queue's lock is
the thing to do.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 12 | ||||
-rw-r--r-- | net/sched/sch_api.c | 3 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 10 |
3 files changed, 13 insertions, 12 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 05011048b86c..2322fb69fd53 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2014,10 +2014,11 @@ static inline struct sk_buff *handle_macvlan(struct sk_buff *skb, | |||
2014 | */ | 2014 | */ |
2015 | static int ing_filter(struct sk_buff *skb) | 2015 | static int ing_filter(struct sk_buff *skb) |
2016 | { | 2016 | { |
2017 | struct Qdisc *q; | ||
2018 | struct net_device *dev = skb->dev; | 2017 | struct net_device *dev = skb->dev; |
2019 | int result = TC_ACT_OK; | ||
2020 | u32 ttl = G_TC_RTTL(skb->tc_verd); | 2018 | u32 ttl = G_TC_RTTL(skb->tc_verd); |
2019 | struct netdev_queue *rxq; | ||
2020 | int result = TC_ACT_OK; | ||
2021 | struct Qdisc *q; | ||
2021 | 2022 | ||
2022 | if (MAX_RED_LOOP < ttl++) { | 2023 | if (MAX_RED_LOOP < ttl++) { |
2023 | printk(KERN_WARNING | 2024 | printk(KERN_WARNING |
@@ -2029,10 +2030,12 @@ static int ing_filter(struct sk_buff *skb) | |||
2029 | skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); | 2030 | skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); |
2030 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); | 2031 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); |
2031 | 2032 | ||
2032 | spin_lock(&dev->ingress_lock); | 2033 | rxq = &dev->rx_queue; |
2034 | |||
2035 | spin_lock(&rxq->lock); | ||
2033 | if ((q = dev->qdisc_ingress) != NULL) | 2036 | if ((q = dev->qdisc_ingress) != NULL) |
2034 | result = q->enqueue(skb, q); | 2037 | result = q->enqueue(skb, q); |
2035 | spin_unlock(&dev->ingress_lock); | 2038 | spin_unlock(&rxq->lock); |
2036 | 2039 | ||
2037 | return result; | 2040 | return result; |
2038 | } | 2041 | } |
@@ -3795,7 +3798,6 @@ int register_netdevice(struct net_device *dev) | |||
3795 | spin_lock_init(&dev->_xmit_lock); | 3798 | spin_lock_init(&dev->_xmit_lock); |
3796 | netdev_set_lockdep_class(&dev->_xmit_lock, dev->type); | 3799 | netdev_set_lockdep_class(&dev->_xmit_lock, dev->type); |
3797 | dev->xmit_lock_owner = -1; | 3800 | dev->xmit_lock_owner = -1; |
3798 | spin_lock_init(&dev->ingress_lock); | ||
3799 | 3801 | ||
3800 | dev->iflink = -1; | 3802 | dev->iflink = -1; |
3801 | 3803 | ||
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 2a1834f8c7d8..570cef2a9c5f 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -601,12 +601,11 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, | |||
601 | 601 | ||
602 | sch->parent = parent; | 602 | sch->parent = parent; |
603 | 603 | ||
604 | sch->stats_lock = &dev_queue->lock; | ||
604 | if (handle == TC_H_INGRESS) { | 605 | if (handle == TC_H_INGRESS) { |
605 | sch->flags |= TCQ_F_INGRESS; | 606 | sch->flags |= TCQ_F_INGRESS; |
606 | sch->stats_lock = &dev->ingress_lock; | ||
607 | handle = TC_H_MAKE(TC_H_INGRESS, 0); | 607 | handle = TC_H_MAKE(TC_H_INGRESS, 0); |
608 | } else { | 608 | } else { |
609 | sch->stats_lock = &dev_queue->lock; | ||
610 | if (handle == 0) { | 609 | if (handle == 0) { |
611 | handle = qdisc_alloc_handle(dev); | 610 | handle = qdisc_alloc_handle(dev); |
612 | err = -ENOMEM; | 611 | err = -ENOMEM; |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index ee8f9f78a095..804d44b00348 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -35,24 +35,24 @@ | |||
35 | * - enqueue, dequeue are serialized via top level device | 35 | * - enqueue, dequeue are serialized via top level device |
36 | * spinlock queue->lock. | 36 | * spinlock queue->lock. |
37 | * - ingress filtering is serialized via top level device | 37 | * - ingress filtering is serialized via top level device |
38 | * spinlock dev->ingress_lock. | 38 | * spinlock dev->rx_queue.lock. |
39 | * - updates to tree and tree walking are only done under the rtnl mutex. | 39 | * - updates to tree and tree walking are only done under the rtnl mutex. |
40 | */ | 40 | */ |
41 | 41 | ||
42 | void qdisc_lock_tree(struct net_device *dev) | 42 | void qdisc_lock_tree(struct net_device *dev) |
43 | __acquires(dev->tx_queue.lock) | 43 | __acquires(dev->tx_queue.lock) |
44 | __acquires(dev->ingress_lock) | 44 | __acquires(dev->rx_queue.lock) |
45 | { | 45 | { |
46 | spin_lock_bh(&dev->tx_queue.lock); | 46 | spin_lock_bh(&dev->tx_queue.lock); |
47 | spin_lock(&dev->ingress_lock); | 47 | spin_lock(&dev->rx_queue.lock); |
48 | } | 48 | } |
49 | EXPORT_SYMBOL(qdisc_lock_tree); | 49 | EXPORT_SYMBOL(qdisc_lock_tree); |
50 | 50 | ||
51 | void qdisc_unlock_tree(struct net_device *dev) | 51 | void qdisc_unlock_tree(struct net_device *dev) |
52 | __releases(dev->ingress_lock) | 52 | __releases(dev->rx_queue.lock) |
53 | __releases(dev->tx_queue.lock) | 53 | __releases(dev->tx_queue.lock) |
54 | { | 54 | { |
55 | spin_unlock(&dev->ingress_lock); | 55 | spin_unlock(&dev->rx_queue.lock); |
56 | spin_unlock_bh(&dev->tx_queue.lock); | 56 | spin_unlock_bh(&dev->tx_queue.lock); |
57 | } | 57 | } |
58 | EXPORT_SYMBOL(qdisc_unlock_tree); | 58 | EXPORT_SYMBOL(qdisc_unlock_tree); |