aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-09 01:49:00 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-09 01:49:00 -0400
commit816f3258e70db38d6d92c8d871377179fd69160f (patch)
tree7ab28132592c82e2ac40317733ea1dd7d6f4e5b5
parentb0e1e6462df3c5944010b3328a546d8fe5d932cd (diff)
netdev: Kill qdisc_ingress, use netdev->rx_queue.qdisc instead.
Now that our qdisc management is bi-directional, per-queue, and fully orthogonal, there is no reason to have a special ingress qdisc pointer in struct net_device. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--net/core/dev.c4
-rw-r--r--net/sched/sch_api.c11
3 files changed, 8 insertions, 10 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index df702a7b3db5..e7c49246fd88 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -634,9 +634,6 @@ struct net_device
634 634
635 struct netdev_queue rx_queue; 635 struct netdev_queue rx_queue;
636 struct netdev_queue tx_queue ____cacheline_aligned_in_smp; 636 struct netdev_queue tx_queue ____cacheline_aligned_in_smp;
637
638 struct Qdisc *qdisc_ingress;
639
640 unsigned long tx_queue_len; /* Max frames per queue allowed */ 637 unsigned long tx_queue_len; /* Max frames per queue allowed */
641 638
642 /* Partially transmitted GSO packet. */ 639 /* Partially transmitted GSO packet. */
diff --git a/net/core/dev.c b/net/core/dev.c
index ce79c28d739d..ab760a954d99 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2033,7 +2033,7 @@ static int ing_filter(struct sk_buff *skb)
2033 rxq = &dev->rx_queue; 2033 rxq = &dev->rx_queue;
2034 2034
2035 spin_lock(&rxq->lock); 2035 spin_lock(&rxq->lock);
2036 if ((q = dev->qdisc_ingress) != NULL) 2036 if ((q = rxq->qdisc) != NULL)
2037 result = q->enqueue(skb, q); 2037 result = q->enqueue(skb, q);
2038 spin_unlock(&rxq->lock); 2038 spin_unlock(&rxq->lock);
2039 2039
@@ -2044,7 +2044,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2044 struct packet_type **pt_prev, 2044 struct packet_type **pt_prev,
2045 int *ret, struct net_device *orig_dev) 2045 int *ret, struct net_device *orig_dev)
2046{ 2046{
2047 if (!skb->dev->qdisc_ingress) 2047 if (!skb->dev->rx_queue.qdisc)
2048 goto out; 2048 goto out;
2049 2049
2050 if (*pt_prev) { 2050 if (*pt_prev) {
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 2313fa7c97be..4003c280b69f 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -450,14 +450,15 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
450 450
451 qdisc_lock_tree(dev); 451 qdisc_lock_tree(dev);
452 if (qdisc && qdisc->flags&TCQ_F_INGRESS) { 452 if (qdisc && qdisc->flags&TCQ_F_INGRESS) {
453 oqdisc = dev->qdisc_ingress; 453 dev_queue = &dev->rx_queue;
454 oqdisc = dev_queue->qdisc;
454 /* Prune old scheduler */ 455 /* Prune old scheduler */
455 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) { 456 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
456 /* delete */ 457 /* delete */
457 qdisc_reset(oqdisc); 458 qdisc_reset(oqdisc);
458 dev->qdisc_ingress = NULL; 459 dev_queue->qdisc = NULL;
459 } else { /* new */ 460 } else { /* new */
460 dev->qdisc_ingress = qdisc; 461 dev_queue->qdisc = qdisc;
461 } 462 }
462 463
463 } else { 464 } else {
@@ -739,7 +740,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
739 return -ENOENT; 740 return -ENOENT;
740 q = qdisc_leaf(p, clid); 741 q = qdisc_leaf(p, clid);
741 } else { /* ingress */ 742 } else { /* ingress */
742 q = dev->qdisc_ingress; 743 q = dev->rx_queue.qdisc;
743 } 744 }
744 } else { 745 } else {
745 struct netdev_queue *dev_queue = &dev->tx_queue; 746 struct netdev_queue *dev_queue = &dev->tx_queue;
@@ -814,7 +815,7 @@ replay:
814 return -ENOENT; 815 return -ENOENT;
815 q = qdisc_leaf(p, clid); 816 q = qdisc_leaf(p, clid);
816 } else { /*ingress */ 817 } else { /*ingress */
817 q = dev->qdisc_ingress; 818 q = dev->rx_queue.qdisc;
818 } 819 }
819 } else { 820 } else {
820 struct netdev_queue *dev_queue = &dev->tx_queue; 821 struct netdev_queue *dev_queue = &dev->tx_queue;