diff options
| author | David S. Miller <davem@davemloft.net> | 2008-07-09 01:49:00 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2008-07-09 01:49:00 -0400 |
| commit | 816f3258e70db38d6d92c8d871377179fd69160f (patch) | |
| tree | 7ab28132592c82e2ac40317733ea1dd7d6f4e5b5 /net/sched/sch_api.c | |
| parent | b0e1e6462df3c5944010b3328a546d8fe5d932cd (diff) | |
netdev: Kill qdisc_ingress, use netdev->rx_queue.qdisc instead.
Now that our qdisc management is bi-directional, per-queue, and fully
orthogonal, there is no reason to have a special ingress qdisc pointer
in struct net_device.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_api.c')
| -rw-r--r-- | net/sched/sch_api.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 2313fa7c97be..4003c280b69f 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
| @@ -450,14 +450,15 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) | |||
| 450 | 450 | ||
| 451 | qdisc_lock_tree(dev); | 451 | qdisc_lock_tree(dev); |
| 452 | if (qdisc && qdisc->flags&TCQ_F_INGRESS) { | 452 | if (qdisc && qdisc->flags&TCQ_F_INGRESS) { |
| 453 | oqdisc = dev->qdisc_ingress; | 453 | dev_queue = &dev->rx_queue; |
| 454 | oqdisc = dev_queue->qdisc; | ||
| 454 | /* Prune old scheduler */ | 455 | /* Prune old scheduler */ |
| 455 | if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) { | 456 | if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) { |
| 456 | /* delete */ | 457 | /* delete */ |
| 457 | qdisc_reset(oqdisc); | 458 | qdisc_reset(oqdisc); |
| 458 | dev->qdisc_ingress = NULL; | 459 | dev_queue->qdisc = NULL; |
| 459 | } else { /* new */ | 460 | } else { /* new */ |
| 460 | dev->qdisc_ingress = qdisc; | 461 | dev_queue->qdisc = qdisc; |
| 461 | } | 462 | } |
| 462 | 463 | ||
| 463 | } else { | 464 | } else { |
| @@ -739,7 +740,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
| 739 | return -ENOENT; | 740 | return -ENOENT; |
| 740 | q = qdisc_leaf(p, clid); | 741 | q = qdisc_leaf(p, clid); |
| 741 | } else { /* ingress */ | 742 | } else { /* ingress */ |
| 742 | q = dev->qdisc_ingress; | 743 | q = dev->rx_queue.qdisc; |
| 743 | } | 744 | } |
| 744 | } else { | 745 | } else { |
| 745 | struct netdev_queue *dev_queue = &dev->tx_queue; | 746 | struct netdev_queue *dev_queue = &dev->tx_queue; |
| @@ -814,7 +815,7 @@ replay: | |||
| 814 | return -ENOENT; | 815 | return -ENOENT; |
| 815 | q = qdisc_leaf(p, clid); | 816 | q = qdisc_leaf(p, clid); |
| 816 | } else { /*ingress */ | 817 | } else { /*ingress */ |
| 817 | q = dev->qdisc_ingress; | 818 | q = dev->rx_queue.qdisc; |
| 818 | } | 819 | } |
| 819 | } else { | 820 | } else { |
| 820 | struct netdev_queue *dev_queue = &dev->tx_queue; | 821 | struct netdev_queue *dev_queue = &dev->tx_queue; |
