aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-08 20:33:13 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-08 20:33:13 -0400
commit555353cfa1aee293de445bfa6de43276138ddd82 (patch)
treeb5daba85806b8e36731c4a474aac97f1a0140a51
parentdc2b48475a0a36f8b3bbb2da60d3a006dc5c2c84 (diff)
netdev: The ingress_lock member is no longer needed.
Every qdisc is assosciated with a queue, and in the case of ingress qdiscs that will now be netdev->rx_queue so using that queue's lock is the thing to do. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ifb.c8
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--net/core/dev.c12
-rw-r--r--net/sched/sch_api.c3
-rw-r--r--net/sched/sch_generic.c10
5 files changed, 17 insertions, 18 deletions
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index bc3de272a829..ccbd6554f6eb 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -229,13 +229,13 @@ module_param(numifbs, int, 0);
229MODULE_PARM_DESC(numifbs, "Number of ifb devices"); 229MODULE_PARM_DESC(numifbs, "Number of ifb devices");
230 230
231/* 231/*
232 * dev_ifb->tx_queue.lock is usually taken after dev->ingress_lock, 232 * dev_ifb->tx_queue.lock is usually taken after dev->rx_queue.lock,
233 * reversely to e.g. qdisc_lock_tree(). It should be safe until 233 * reversely to e.g. qdisc_lock_tree(). It should be safe until
234 * ifb doesn't take dev->tx_queue.lock with dev_ifb->ingress_lock. 234 * ifb doesn't take dev->tx_queue.lock with dev_ifb->rx_queue.lock.
235 * But lockdep should know that ifb has different locks from dev. 235 * But lockdep should know that ifb has different locks from dev.
236 */ 236 */
237static struct lock_class_key ifb_tx_queue_lock_key; 237static struct lock_class_key ifb_tx_queue_lock_key;
238static struct lock_class_key ifb_ingress_lock_key; 238static struct lock_class_key ifb_rx_queue_lock_key;
239 239
240 240
241static int __init ifb_init_one(int index) 241static int __init ifb_init_one(int index)
@@ -259,7 +259,7 @@ static int __init ifb_init_one(int index)
259 goto err; 259 goto err;
260 260
261 lockdep_set_class(&dev_ifb->tx_queue.lock, &ifb_tx_queue_lock_key); 261 lockdep_set_class(&dev_ifb->tx_queue.lock, &ifb_tx_queue_lock_key);
262 lockdep_set_class(&dev_ifb->ingress_lock, &ifb_ingress_lock_key); 262 lockdep_set_class(&dev_ifb->rx_queue.lock, &ifb_rx_queue_lock_key);
263 263
264 return 0; 264 return 0;
265 265
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e835acacb479..633a44c6fa5e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -632,8 +632,6 @@ struct net_device
632 struct netdev_queue rx_queue; 632 struct netdev_queue rx_queue;
633 struct netdev_queue tx_queue ____cacheline_aligned_in_smp; 633 struct netdev_queue tx_queue ____cacheline_aligned_in_smp;
634 634
635 /* ingress path synchronizer */
636 spinlock_t ingress_lock;
637 struct Qdisc *qdisc_ingress; 635 struct Qdisc *qdisc_ingress;
638 636
639/* 637/*
diff --git a/net/core/dev.c b/net/core/dev.c
index 05011048b86c..2322fb69fd53 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2014,10 +2014,11 @@ static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2014 */ 2014 */
2015static int ing_filter(struct sk_buff *skb) 2015static int ing_filter(struct sk_buff *skb)
2016{ 2016{
2017 struct Qdisc *q;
2018 struct net_device *dev = skb->dev; 2017 struct net_device *dev = skb->dev;
2019 int result = TC_ACT_OK;
2020 u32 ttl = G_TC_RTTL(skb->tc_verd); 2018 u32 ttl = G_TC_RTTL(skb->tc_verd);
2019 struct netdev_queue *rxq;
2020 int result = TC_ACT_OK;
2021 struct Qdisc *q;
2021 2022
2022 if (MAX_RED_LOOP < ttl++) { 2023 if (MAX_RED_LOOP < ttl++) {
2023 printk(KERN_WARNING 2024 printk(KERN_WARNING
@@ -2029,10 +2030,12 @@ static int ing_filter(struct sk_buff *skb)
2029 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); 2030 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2030 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); 2031 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2031 2032
2032 spin_lock(&dev->ingress_lock); 2033 rxq = &dev->rx_queue;
2034
2035 spin_lock(&rxq->lock);
2033 if ((q = dev->qdisc_ingress) != NULL) 2036 if ((q = dev->qdisc_ingress) != NULL)
2034 result = q->enqueue(skb, q); 2037 result = q->enqueue(skb, q);
2035 spin_unlock(&dev->ingress_lock); 2038 spin_unlock(&rxq->lock);
2036 2039
2037 return result; 2040 return result;
2038} 2041}
@@ -3795,7 +3798,6 @@ int register_netdevice(struct net_device *dev)
3795 spin_lock_init(&dev->_xmit_lock); 3798 spin_lock_init(&dev->_xmit_lock);
3796 netdev_set_lockdep_class(&dev->_xmit_lock, dev->type); 3799 netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
3797 dev->xmit_lock_owner = -1; 3800 dev->xmit_lock_owner = -1;
3798 spin_lock_init(&dev->ingress_lock);
3799 3801
3800 dev->iflink = -1; 3802 dev->iflink = -1;
3801 3803
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 2a1834f8c7d8..570cef2a9c5f 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -601,12 +601,11 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
601 601
602 sch->parent = parent; 602 sch->parent = parent;
603 603
604 sch->stats_lock = &dev_queue->lock;
604 if (handle == TC_H_INGRESS) { 605 if (handle == TC_H_INGRESS) {
605 sch->flags |= TCQ_F_INGRESS; 606 sch->flags |= TCQ_F_INGRESS;
606 sch->stats_lock = &dev->ingress_lock;
607 handle = TC_H_MAKE(TC_H_INGRESS, 0); 607 handle = TC_H_MAKE(TC_H_INGRESS, 0);
608 } else { 608 } else {
609 sch->stats_lock = &dev_queue->lock;
610 if (handle == 0) { 609 if (handle == 0) {
611 handle = qdisc_alloc_handle(dev); 610 handle = qdisc_alloc_handle(dev);
612 err = -ENOMEM; 611 err = -ENOMEM;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index ee8f9f78a095..804d44b00348 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -35,24 +35,24 @@
35 * - enqueue, dequeue are serialized via top level device 35 * - enqueue, dequeue are serialized via top level device
36 * spinlock queue->lock. 36 * spinlock queue->lock.
37 * - ingress filtering is serialized via top level device 37 * - ingress filtering is serialized via top level device
38 * spinlock dev->ingress_lock. 38 * spinlock dev->rx_queue.lock.
39 * - updates to tree and tree walking are only done under the rtnl mutex. 39 * - updates to tree and tree walking are only done under the rtnl mutex.
40 */ 40 */
41 41
42void qdisc_lock_tree(struct net_device *dev) 42void qdisc_lock_tree(struct net_device *dev)
43 __acquires(dev->tx_queue.lock) 43 __acquires(dev->tx_queue.lock)
44 __acquires(dev->ingress_lock) 44 __acquires(dev->rx_queue.lock)
45{ 45{
46 spin_lock_bh(&dev->tx_queue.lock); 46 spin_lock_bh(&dev->tx_queue.lock);
47 spin_lock(&dev->ingress_lock); 47 spin_lock(&dev->rx_queue.lock);
48} 48}
49EXPORT_SYMBOL(qdisc_lock_tree); 49EXPORT_SYMBOL(qdisc_lock_tree);
50 50
51void qdisc_unlock_tree(struct net_device *dev) 51void qdisc_unlock_tree(struct net_device *dev)
52 __releases(dev->ingress_lock) 52 __releases(dev->rx_queue.lock)
53 __releases(dev->tx_queue.lock) 53 __releases(dev->tx_queue.lock)
54{ 54{
55 spin_unlock(&dev->ingress_lock); 55 spin_unlock(&dev->rx_queue.lock);
56 spin_unlock_bh(&dev->tx_queue.lock); 56 spin_unlock_bh(&dev->tx_queue.lock);
57} 57}
58EXPORT_SYMBOL(qdisc_unlock_tree); 58EXPORT_SYMBOL(qdisc_unlock_tree);