diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2006-06-22 05:57:17 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-06-23 05:07:31 -0400 |
commit | f6a78bfcb141f963187464bac838d46a81c3882a (patch) | |
tree | fe30917dea1ab4cc046c6f1b8c1875373040c84a /include/linux/netdevice.h | |
parent | 7967168cefdbc63bf332d6b1548eca7cd65ebbcc (diff) |
[NET]: Add generic segmentation offload
This patch adds the infrastructure for generic segmentation offload.
The idea is to tap into the potential savings of TSO without hardware
support by postponing the allocation of segmented skb's until just
before the entry point into the NIC driver.
The same structure can be used to support software IPv6 TSO, as well as
UFO and segmentation offload for other relevant protocols, e.g., DCCP.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 8 |
1 files changed, 7 insertions, 1 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index fa5671307b90..b4eae18390cc 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -405,6 +405,9 @@ struct net_device | |||
405 | struct list_head qdisc_list; | 405 | struct list_head qdisc_list; |
406 | unsigned long tx_queue_len; /* Max frames per queue allowed */ | 406 | unsigned long tx_queue_len; /* Max frames per queue allowed */ |
407 | 407 | ||
408 | /* Partially transmitted GSO packet. */ | ||
409 | struct sk_buff *gso_skb; | ||
410 | |||
408 | /* ingress path synchronizer */ | 411 | /* ingress path synchronizer */ |
409 | spinlock_t ingress_lock; | 412 | spinlock_t ingress_lock; |
410 | struct Qdisc *qdisc_ingress; | 413 | struct Qdisc *qdisc_ingress; |
@@ -539,6 +542,7 @@ struct packet_type { | |||
539 | struct net_device *, | 542 | struct net_device *, |
540 | struct packet_type *, | 543 | struct packet_type *, |
541 | struct net_device *); | 544 | struct net_device *); |
545 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, int sg); | ||
542 | void *af_packet_priv; | 546 | void *af_packet_priv; |
543 | struct list_head list; | 547 | struct list_head list; |
544 | }; | 548 | }; |
@@ -689,7 +693,8 @@ extern int dev_change_name(struct net_device *, char *); | |||
689 | extern int dev_set_mtu(struct net_device *, int); | 693 | extern int dev_set_mtu(struct net_device *, int); |
690 | extern int dev_set_mac_address(struct net_device *, | 694 | extern int dev_set_mac_address(struct net_device *, |
691 | struct sockaddr *); | 695 | struct sockaddr *); |
692 | extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); | 696 | extern int dev_hard_start_xmit(struct sk_buff *skb, |
697 | struct net_device *dev); | ||
693 | 698 | ||
694 | extern void dev_init(void); | 699 | extern void dev_init(void); |
695 | 700 | ||
@@ -963,6 +968,7 @@ extern int netdev_max_backlog; | |||
963 | extern int weight_p; | 968 | extern int weight_p; |
964 | extern int netdev_set_master(struct net_device *dev, struct net_device *master); | 969 | extern int netdev_set_master(struct net_device *dev, struct net_device *master); |
965 | extern int skb_checksum_help(struct sk_buff *skb, int inward); | 970 | extern int skb_checksum_help(struct sk_buff *skb, int inward); |
971 | extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int sg); | ||
966 | #ifdef CONFIG_BUG | 972 | #ifdef CONFIG_BUG |
967 | extern void netdev_rx_csum_fault(struct net_device *dev); | 973 | extern void netdev_rx_csum_fault(struct net_device *dev); |
968 | #else | 974 | #else |