diff options
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 85 |
1 files changed, 78 insertions, 7 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index cebe677e153b..85f99f60deea 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -36,6 +36,7 @@ | |||
36 | 36 | ||
37 | #include <linux/device.h> | 37 | #include <linux/device.h> |
38 | #include <linux/percpu.h> | 38 | #include <linux/percpu.h> |
39 | #include <linux/dmaengine.h> | ||
39 | 40 | ||
40 | struct divert_blk; | 41 | struct divert_blk; |
41 | struct vlan_group; | 42 | struct vlan_group; |
@@ -231,6 +232,7 @@ enum netdev_state_t | |||
231 | __LINK_STATE_RX_SCHED, | 232 | __LINK_STATE_RX_SCHED, |
232 | __LINK_STATE_LINKWATCH_PENDING, | 233 | __LINK_STATE_LINKWATCH_PENDING, |
233 | __LINK_STATE_DORMANT, | 234 | __LINK_STATE_DORMANT, |
235 | __LINK_STATE_QDISC_RUNNING, | ||
234 | }; | 236 | }; |
235 | 237 | ||
236 | 238 | ||
@@ -306,9 +308,20 @@ struct net_device | |||
306 | #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ | 308 | #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ |
307 | #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ | 309 | #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ |
308 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ | 310 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ |
309 | #define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */ | 311 | #define NETIF_F_GSO 2048 /* Enable software GSO. */ |
310 | #define NETIF_F_LLTX 4096 /* LockLess TX */ | 312 | #define NETIF_F_LLTX 4096 /* LockLess TX */ |
311 | #define NETIF_F_UFO 8192 /* Can offload UDP Large Send*/ | 313 | |
314 | /* Segmentation offload features */ | ||
315 | #define NETIF_F_GSO_SHIFT 16 | ||
316 | #define NETIF_F_GSO_MASK 0xffff0000 | ||
317 | #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) | ||
318 | #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) | ||
319 | #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) | ||
320 | #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) | ||
321 | #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) | ||
322 | |||
323 | #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) | ||
324 | #define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) | ||
312 | 325 | ||
313 | struct net_device *next_sched; | 326 | struct net_device *next_sched; |
314 | 327 | ||
@@ -397,6 +410,9 @@ struct net_device | |||
397 | struct list_head qdisc_list; | 410 | struct list_head qdisc_list; |
398 | unsigned long tx_queue_len; /* Max frames per queue allowed */ | 411 | unsigned long tx_queue_len; /* Max frames per queue allowed */ |
399 | 412 | ||
413 | /* Partially transmitted GSO packet. */ | ||
414 | struct sk_buff *gso_skb; | ||
415 | |||
400 | /* ingress path synchronizer */ | 416 | /* ingress path synchronizer */ |
401 | spinlock_t ingress_lock; | 417 | spinlock_t ingress_lock; |
402 | struct Qdisc *qdisc_ingress; | 418 | struct Qdisc *qdisc_ingress; |
@@ -405,7 +421,7 @@ struct net_device | |||
405 | * One part is mostly used on xmit path (device) | 421 | * One part is mostly used on xmit path (device) |
406 | */ | 422 | */ |
407 | /* hard_start_xmit synchronizer */ | 423 | /* hard_start_xmit synchronizer */ |
408 | spinlock_t xmit_lock ____cacheline_aligned_in_smp; | 424 | spinlock_t _xmit_lock ____cacheline_aligned_in_smp; |
409 | /* cpu id of processor entered to hard_start_xmit or -1, | 425 | /* cpu id of processor entered to hard_start_xmit or -1, |
410 | if nobody entered there. | 426 | if nobody entered there. |
411 | */ | 427 | */ |
@@ -531,6 +547,8 @@ struct packet_type { | |||
531 | struct net_device *, | 547 | struct net_device *, |
532 | struct packet_type *, | 548 | struct packet_type *, |
533 | struct net_device *); | 549 | struct net_device *); |
550 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, | ||
551 | int features); | ||
534 | void *af_packet_priv; | 552 | void *af_packet_priv; |
535 | struct list_head list; | 553 | struct list_head list; |
536 | }; | 554 | }; |
@@ -592,6 +610,9 @@ struct softnet_data | |||
592 | struct sk_buff *completion_queue; | 610 | struct sk_buff *completion_queue; |
593 | 611 | ||
594 | struct net_device backlog_dev; /* Sorry. 8) */ | 612 | struct net_device backlog_dev; /* Sorry. 8) */ |
613 | #ifdef CONFIG_NET_DMA | ||
614 | struct dma_chan *net_dma; | ||
615 | #endif | ||
595 | }; | 616 | }; |
596 | 617 | ||
597 | DECLARE_PER_CPU(struct softnet_data,softnet_data); | 618 | DECLARE_PER_CPU(struct softnet_data,softnet_data); |
@@ -678,11 +699,11 @@ extern int dev_change_name(struct net_device *, char *); | |||
678 | extern int dev_set_mtu(struct net_device *, int); | 699 | extern int dev_set_mtu(struct net_device *, int); |
679 | extern int dev_set_mac_address(struct net_device *, | 700 | extern int dev_set_mac_address(struct net_device *, |
680 | struct sockaddr *); | 701 | struct sockaddr *); |
681 | extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); | 702 | extern int dev_hard_start_xmit(struct sk_buff *skb, |
703 | struct net_device *dev); | ||
682 | 704 | ||
683 | extern void dev_init(void); | 705 | extern void dev_init(void); |
684 | 706 | ||
685 | extern int netdev_nit; | ||
686 | extern int netdev_budget; | 707 | extern int netdev_budget; |
687 | 708 | ||
688 | /* Called by rtnetlink.c:rtnl_unlock() */ | 709 | /* Called by rtnetlink.c:rtnl_unlock() */ |
@@ -888,11 +909,43 @@ static inline void __netif_rx_complete(struct net_device *dev) | |||
888 | clear_bit(__LINK_STATE_RX_SCHED, &dev->state); | 909 | clear_bit(__LINK_STATE_RX_SCHED, &dev->state); |
889 | } | 910 | } |
890 | 911 | ||
912 | static inline void netif_tx_lock(struct net_device *dev) | ||
913 | { | ||
914 | spin_lock(&dev->_xmit_lock); | ||
915 | dev->xmit_lock_owner = smp_processor_id(); | ||
916 | } | ||
917 | |||
918 | static inline void netif_tx_lock_bh(struct net_device *dev) | ||
919 | { | ||
920 | spin_lock_bh(&dev->_xmit_lock); | ||
921 | dev->xmit_lock_owner = smp_processor_id(); | ||
922 | } | ||
923 | |||
924 | static inline int netif_tx_trylock(struct net_device *dev) | ||
925 | { | ||
926 | int err = spin_trylock(&dev->_xmit_lock); | ||
927 | if (!err) | ||
928 | dev->xmit_lock_owner = smp_processor_id(); | ||
929 | return err; | ||
930 | } | ||
931 | |||
932 | static inline void netif_tx_unlock(struct net_device *dev) | ||
933 | { | ||
934 | dev->xmit_lock_owner = -1; | ||
935 | spin_unlock(&dev->_xmit_lock); | ||
936 | } | ||
937 | |||
938 | static inline void netif_tx_unlock_bh(struct net_device *dev) | ||
939 | { | ||
940 | dev->xmit_lock_owner = -1; | ||
941 | spin_unlock_bh(&dev->_xmit_lock); | ||
942 | } | ||
943 | |||
891 | static inline void netif_tx_disable(struct net_device *dev) | 944 | static inline void netif_tx_disable(struct net_device *dev) |
892 | { | 945 | { |
893 | spin_lock_bh(&dev->xmit_lock); | 946 | netif_tx_lock_bh(dev); |
894 | netif_stop_queue(dev); | 947 | netif_stop_queue(dev); |
895 | spin_unlock_bh(&dev->xmit_lock); | 948 | netif_tx_unlock_bh(dev); |
896 | } | 949 | } |
897 | 950 | ||
898 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ | 951 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ |
@@ -920,6 +973,7 @@ extern int netdev_max_backlog; | |||
920 | extern int weight_p; | 973 | extern int weight_p; |
921 | extern int netdev_set_master(struct net_device *dev, struct net_device *master); | 974 | extern int netdev_set_master(struct net_device *dev, struct net_device *master); |
922 | extern int skb_checksum_help(struct sk_buff *skb, int inward); | 975 | extern int skb_checksum_help(struct sk_buff *skb, int inward); |
976 | extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features); | ||
923 | #ifdef CONFIG_BUG | 977 | #ifdef CONFIG_BUG |
924 | extern void netdev_rx_csum_fault(struct net_device *dev); | 978 | extern void netdev_rx_csum_fault(struct net_device *dev); |
925 | #else | 979 | #else |
@@ -939,6 +993,23 @@ extern void dev_seq_stop(struct seq_file *seq, void *v); | |||
939 | 993 | ||
940 | extern void linkwatch_run_queue(void); | 994 | extern void linkwatch_run_queue(void); |
941 | 995 | ||
996 | static inline int net_gso_ok(int features, int gso_type) | ||
997 | { | ||
998 | int feature = gso_type << NETIF_F_GSO_SHIFT; | ||
999 | return (features & feature) == feature; | ||
1000 | } | ||
1001 | |||
1002 | static inline int skb_gso_ok(struct sk_buff *skb, int features) | ||
1003 | { | ||
1004 | return net_gso_ok(features, skb_shinfo(skb)->gso_size ? | ||
1005 | skb_shinfo(skb)->gso_type : 0); | ||
1006 | } | ||
1007 | |||
1008 | static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) | ||
1009 | { | ||
1010 | return !skb_gso_ok(skb, dev->features); | ||
1011 | } | ||
1012 | |||
942 | #endif /* __KERNEL__ */ | 1013 | #endif /* __KERNEL__ */ |
943 | 1014 | ||
944 | #endif /* _LINUX_DEV_H */ | 1015 | #endif /* _LINUX_DEV_H */ |