diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-09 02:10:33 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-09 02:10:33 -0400 |
commit | 970565bbad0c7b98db0d14131a69e5a0f4445d49 (patch) | |
tree | 59ac641da5b22bb5ea6a0a333ceaa907f9959d10 /net/sched | |
parent | c2aa288548a29d909ec875e81137fb0dbbb420b7 (diff) |
netdev: Move gso_skb into netdev_queue.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_generic.c | 42 |
1 files changed, 23 insertions, 19 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index dda78ee314ec..8247a406a401 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -63,10 +63,11 @@ static inline int qdisc_qlen(struct Qdisc *q) | |||
63 | } | 63 | } |
64 | 64 | ||
65 | static inline int dev_requeue_skb(struct sk_buff *skb, struct net_device *dev, | 65 | static inline int dev_requeue_skb(struct sk_buff *skb, struct net_device *dev, |
66 | struct netdev_queue *dev_queue, | ||
66 | struct Qdisc *q) | 67 | struct Qdisc *q) |
67 | { | 68 | { |
68 | if (unlikely(skb->next)) | 69 | if (unlikely(skb->next)) |
69 | dev->gso_skb = skb; | 70 | dev_queue->gso_skb = skb; |
70 | else | 71 | else |
71 | q->ops->requeue(skb, q); | 72 | q->ops->requeue(skb, q); |
72 | 73 | ||
@@ -75,12 +76,13 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct net_device *dev, | |||
75 | } | 76 | } |
76 | 77 | ||
77 | static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev, | 78 | static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev, |
79 | struct netdev_queue *dev_queue, | ||
78 | struct Qdisc *q) | 80 | struct Qdisc *q) |
79 | { | 81 | { |
80 | struct sk_buff *skb; | 82 | struct sk_buff *skb; |
81 | 83 | ||
82 | if ((skb = dev->gso_skb)) | 84 | if ((skb = dev_queue->gso_skb)) |
83 | dev->gso_skb = NULL; | 85 | dev_queue->gso_skb = NULL; |
84 | else | 86 | else |
85 | skb = q->dequeue(q); | 87 | skb = q->dequeue(q); |
86 | 88 | ||
@@ -89,6 +91,7 @@ static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev, | |||
89 | 91 | ||
90 | static inline int handle_dev_cpu_collision(struct sk_buff *skb, | 92 | static inline int handle_dev_cpu_collision(struct sk_buff *skb, |
91 | struct net_device *dev, | 93 | struct net_device *dev, |
94 | struct netdev_queue *dev_queue, | ||
92 | struct Qdisc *q) | 95 | struct Qdisc *q) |
93 | { | 96 | { |
94 | int ret; | 97 | int ret; |
@@ -111,7 +114,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, | |||
111 | * some time. | 114 | * some time. |
112 | */ | 115 | */ |
113 | __get_cpu_var(netdev_rx_stat).cpu_collision++; | 116 | __get_cpu_var(netdev_rx_stat).cpu_collision++; |
114 | ret = dev_requeue_skb(skb, dev, q); | 117 | ret = dev_requeue_skb(skb, dev, dev_queue, q); |
115 | } | 118 | } |
116 | 119 | ||
117 | return ret; | 120 | return ret; |
@@ -144,7 +147,7 @@ static inline int qdisc_restart(struct net_device *dev) | |||
144 | int ret = NETDEV_TX_BUSY; | 147 | int ret = NETDEV_TX_BUSY; |
145 | 148 | ||
146 | /* Dequeue packet */ | 149 | /* Dequeue packet */ |
147 | if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL)) | 150 | if (unlikely((skb = dev_dequeue_skb(dev, txq, q)) == NULL)) |
148 | return 0; | 151 | return 0; |
149 | 152 | ||
150 | 153 | ||
@@ -167,7 +170,7 @@ static inline int qdisc_restart(struct net_device *dev) | |||
167 | 170 | ||
168 | case NETDEV_TX_LOCKED: | 171 | case NETDEV_TX_LOCKED: |
169 | /* Driver try lock failed */ | 172 | /* Driver try lock failed */ |
170 | ret = handle_dev_cpu_collision(skb, dev, q); | 173 | ret = handle_dev_cpu_collision(skb, dev, txq, q); |
171 | break; | 174 | break; |
172 | 175 | ||
173 | default: | 176 | default: |
@@ -176,7 +179,7 @@ static inline int qdisc_restart(struct net_device *dev) | |||
176 | printk(KERN_WARNING "BUG %s code %d qlen %d\n", | 179 | printk(KERN_WARNING "BUG %s code %d qlen %d\n", |
177 | dev->name, ret, q->q.qlen); | 180 | dev->name, ret, q->q.qlen); |
178 | 181 | ||
179 | ret = dev_requeue_skb(skb, dev, q); | 182 | ret = dev_requeue_skb(skb, dev, txq, q); |
180 | break; | 183 | break; |
181 | } | 184 | } |
182 | 185 | ||
@@ -578,31 +581,32 @@ void dev_activate(struct net_device *dev) | |||
578 | spin_unlock_bh(&txq->lock); | 581 | spin_unlock_bh(&txq->lock); |
579 | } | 582 | } |
580 | 583 | ||
581 | static void dev_deactivate_queue(struct net_device *dev, | 584 | static void dev_deactivate_queue(struct netdev_queue *dev_queue, |
582 | struct netdev_queue *dev_queue, | ||
583 | struct Qdisc *qdisc_default) | 585 | struct Qdisc *qdisc_default) |
584 | { | 586 | { |
585 | struct Qdisc *qdisc = dev_queue->qdisc; | 587 | struct Qdisc *qdisc; |
588 | struct sk_buff *skb; | ||
589 | |||
590 | spin_lock_bh(&dev_queue->lock); | ||
586 | 591 | ||
592 | qdisc = dev_queue->qdisc; | ||
587 | if (qdisc) { | 593 | if (qdisc) { |
588 | dev_queue->qdisc = qdisc_default; | 594 | dev_queue->qdisc = qdisc_default; |
589 | qdisc_reset(qdisc); | 595 | qdisc_reset(qdisc); |
590 | } | 596 | } |
597 | skb = dev_queue->gso_skb; | ||
598 | dev_queue->gso_skb = NULL; | ||
599 | |||
600 | spin_unlock_bh(&dev_queue->lock); | ||
601 | |||
602 | kfree_skb(skb); | ||
591 | } | 603 | } |
592 | 604 | ||
593 | void dev_deactivate(struct net_device *dev) | 605 | void dev_deactivate(struct net_device *dev) |
594 | { | 606 | { |
595 | struct sk_buff *skb; | ||
596 | int running; | 607 | int running; |
597 | 608 | ||
598 | spin_lock_bh(&dev->tx_queue.lock); | 609 | dev_deactivate_queue(&dev->tx_queue, &noop_qdisc); |
599 | dev_deactivate_queue(dev, &dev->tx_queue, &noop_qdisc); | ||
600 | |||
601 | skb = dev->gso_skb; | ||
602 | dev->gso_skb = NULL; | ||
603 | spin_unlock_bh(&dev->tx_queue.lock); | ||
604 | |||
605 | kfree_skb(skb); | ||
606 | 610 | ||
607 | dev_watchdog_down(dev); | 611 | dev_watchdog_down(dev); |
608 | 612 | ||