aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2006-06-22 05:57:17 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-06-23 05:07:31 -0400
commitf6a78bfcb141f963187464bac838d46a81c3882a (patch)
treefe30917dea1ab4cc046c6f1b8c1875373040c84a /net/sched
parent7967168cefdbc63bf332d6b1548eca7cd65ebbcc (diff)
[NET]: Add generic segmentation offload
This patch adds the infrastructure for generic segmentation offload. The idea is to tap into the potential savings of TSO without hardware support by postponing the allocation of segmented skb's until just before the entry point into the NIC driver. The same structure can be used to support software IPv6 TSO, as well as UFO and segmentation offload for other relevant protocols, e.g., DCCP. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_generic.c19
1 files changed, 14 insertions, 5 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 7aad0121232c..74d4a1dceeec 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -96,8 +96,11 @@ static inline int qdisc_restart(struct net_device *dev)
96 struct sk_buff *skb; 96 struct sk_buff *skb;
97 97
98 /* Dequeue packet */ 98 /* Dequeue packet */
99 if ((skb = q->dequeue(q)) != NULL) { 99 if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) {
100 unsigned nolock = (dev->features & NETIF_F_LLTX); 100 unsigned nolock = (dev->features & NETIF_F_LLTX);
101
102 dev->gso_skb = NULL;
103
101 /* 104 /*
102 * When the driver has LLTX set it does its own locking 105 * When the driver has LLTX set it does its own locking
103 * in start_xmit. No need to add additional overhead by 106 * in start_xmit. No need to add additional overhead by
@@ -134,10 +137,8 @@ static inline int qdisc_restart(struct net_device *dev)
134 137
135 if (!netif_queue_stopped(dev)) { 138 if (!netif_queue_stopped(dev)) {
136 int ret; 139 int ret;
137 if (netdev_nit)
138 dev_queue_xmit_nit(skb, dev);
139 140
140 ret = dev->hard_start_xmit(skb, dev); 141 ret = dev_hard_start_xmit(skb, dev);
141 if (ret == NETDEV_TX_OK) { 142 if (ret == NETDEV_TX_OK) {
142 if (!nolock) { 143 if (!nolock) {
143 netif_tx_unlock(dev); 144 netif_tx_unlock(dev);
@@ -171,7 +172,10 @@ static inline int qdisc_restart(struct net_device *dev)
171 */ 172 */
172 173
173requeue: 174requeue:
174 q->ops->requeue(skb, q); 175 if (skb->next)
176 dev->gso_skb = skb;
177 else
178 q->ops->requeue(skb, q);
175 netif_schedule(dev); 179 netif_schedule(dev);
176 return 1; 180 return 1;
177 } 181 }
@@ -593,6 +597,11 @@ void dev_deactivate(struct net_device *dev)
593 /* Wait for outstanding qdisc_run calls. */ 597 /* Wait for outstanding qdisc_run calls. */
594 while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) 598 while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
595 yield(); 599 yield();
600
601 if (dev->gso_skb) {
602 kfree_skb(dev->gso_skb);
603 dev->gso_skb = NULL;
604 }
596} 605}
597 606
598void dev_init_scheduler(struct net_device *dev) 607void dev_init_scheduler(struct net_device *dev)