aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorDmitry Torokhov <dtor_core@ameritech.net>2006-06-26 01:31:38 -0400
committerDmitry Torokhov <dtor_core@ameritech.net>2006-06-26 01:31:38 -0400
commit4854c7b27f0975a2b629f35ea3996d2968eb7c4f (patch)
tree4102bdb70289764a2058aff0f907b13d7cf0e0d1 /include/linux/netdevice.h
parent3cbd5b32cb625f5c0f1b1476d154fac873dd49ce (diff)
parentfcc18e83e1f6fd9fa6b333735bf0fcd530655511 (diff)
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h70
1 files changed, 63 insertions, 7 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f4169bbb60eb..bc747e5d7138 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -34,9 +34,9 @@
34#include <asm/cache.h> 34#include <asm/cache.h>
35#include <asm/byteorder.h> 35#include <asm/byteorder.h>
36 36
37#include <linux/config.h>
38#include <linux/device.h> 37#include <linux/device.h>
39#include <linux/percpu.h> 38#include <linux/percpu.h>
39#include <linux/dmaengine.h>
40 40
41struct divert_blk; 41struct divert_blk;
42struct vlan_group; 42struct vlan_group;
@@ -232,6 +232,7 @@ enum netdev_state_t
232 __LINK_STATE_RX_SCHED, 232 __LINK_STATE_RX_SCHED,
233 __LINK_STATE_LINKWATCH_PENDING, 233 __LINK_STATE_LINKWATCH_PENDING,
234 __LINK_STATE_DORMANT, 234 __LINK_STATE_DORMANT,
235 __LINK_STATE_QDISC_RUNNING,
235}; 236};
236 237
237 238
@@ -307,9 +308,16 @@ struct net_device
307#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ 308#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
308#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ 309#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
309#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ 310#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
310#define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */ 311#define NETIF_F_GSO 2048 /* Enable software GSO. */
311#define NETIF_F_LLTX 4096 /* LockLess TX */ 312#define NETIF_F_LLTX 4096 /* LockLess TX */
312#define NETIF_F_UFO 8192 /* Can offload UDP Large Send*/ 313
314 /* Segmentation offload features */
315#define NETIF_F_GSO_SHIFT 16
316#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
317#define NETIF_F_UFO (SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT)
318
319#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
320#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
313 321
314 struct net_device *next_sched; 322 struct net_device *next_sched;
315 323
@@ -398,6 +406,9 @@ struct net_device
398 struct list_head qdisc_list; 406 struct list_head qdisc_list;
399 unsigned long tx_queue_len; /* Max frames per queue allowed */ 407 unsigned long tx_queue_len; /* Max frames per queue allowed */
400 408
409 /* Partially transmitted GSO packet. */
410 struct sk_buff *gso_skb;
411
401 /* ingress path synchronizer */ 412 /* ingress path synchronizer */
402 spinlock_t ingress_lock; 413 spinlock_t ingress_lock;
403 struct Qdisc *qdisc_ingress; 414 struct Qdisc *qdisc_ingress;
@@ -406,7 +417,7 @@ struct net_device
406 * One part is mostly used on xmit path (device) 417 * One part is mostly used on xmit path (device)
407 */ 418 */
408 /* hard_start_xmit synchronizer */ 419 /* hard_start_xmit synchronizer */
409 spinlock_t xmit_lock ____cacheline_aligned_in_smp; 420 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
410 /* cpu id of processor entered to hard_start_xmit or -1, 421 /* cpu id of processor entered to hard_start_xmit or -1,
411 if nobody entered there. 422 if nobody entered there.
412 */ 423 */
@@ -532,6 +543,7 @@ struct packet_type {
532 struct net_device *, 543 struct net_device *,
533 struct packet_type *, 544 struct packet_type *,
534 struct net_device *); 545 struct net_device *);
546 struct sk_buff *(*gso_segment)(struct sk_buff *skb, int sg);
535 void *af_packet_priv; 547 void *af_packet_priv;
536 struct list_head list; 548 struct list_head list;
537}; 549};
@@ -593,6 +605,9 @@ struct softnet_data
593 struct sk_buff *completion_queue; 605 struct sk_buff *completion_queue;
594 606
595 struct net_device backlog_dev; /* Sorry. 8) */ 607 struct net_device backlog_dev; /* Sorry. 8) */
608#ifdef CONFIG_NET_DMA
609 struct dma_chan *net_dma;
610#endif
596}; 611};
597 612
598DECLARE_PER_CPU(struct softnet_data,softnet_data); 613DECLARE_PER_CPU(struct softnet_data,softnet_data);
@@ -679,7 +694,8 @@ extern int dev_change_name(struct net_device *, char *);
679extern int dev_set_mtu(struct net_device *, int); 694extern int dev_set_mtu(struct net_device *, int);
680extern int dev_set_mac_address(struct net_device *, 695extern int dev_set_mac_address(struct net_device *,
681 struct sockaddr *); 696 struct sockaddr *);
682extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 697extern int dev_hard_start_xmit(struct sk_buff *skb,
698 struct net_device *dev);
683 699
684extern void dev_init(void); 700extern void dev_init(void);
685 701
@@ -889,11 +905,43 @@ static inline void __netif_rx_complete(struct net_device *dev)
889 clear_bit(__LINK_STATE_RX_SCHED, &dev->state); 905 clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
890} 906}
891 907
908static inline void netif_tx_lock(struct net_device *dev)
909{
910 spin_lock(&dev->_xmit_lock);
911 dev->xmit_lock_owner = smp_processor_id();
912}
913
914static inline void netif_tx_lock_bh(struct net_device *dev)
915{
916 spin_lock_bh(&dev->_xmit_lock);
917 dev->xmit_lock_owner = smp_processor_id();
918}
919
920static inline int netif_tx_trylock(struct net_device *dev)
921{
922 int err = spin_trylock(&dev->_xmit_lock);
923 if (!err)
924 dev->xmit_lock_owner = smp_processor_id();
925 return err;
926}
927
928static inline void netif_tx_unlock(struct net_device *dev)
929{
930 dev->xmit_lock_owner = -1;
931 spin_unlock(&dev->_xmit_lock);
932}
933
934static inline void netif_tx_unlock_bh(struct net_device *dev)
935{
936 dev->xmit_lock_owner = -1;
937 spin_unlock_bh(&dev->_xmit_lock);
938}
939
892static inline void netif_tx_disable(struct net_device *dev) 940static inline void netif_tx_disable(struct net_device *dev)
893{ 941{
894 spin_lock_bh(&dev->xmit_lock); 942 netif_tx_lock_bh(dev);
895 netif_stop_queue(dev); 943 netif_stop_queue(dev);
896 spin_unlock_bh(&dev->xmit_lock); 944 netif_tx_unlock_bh(dev);
897} 945}
898 946
899/* These functions live elsewhere (drivers/net/net_init.c, but related) */ 947/* These functions live elsewhere (drivers/net/net_init.c, but related) */
@@ -921,6 +969,7 @@ extern int netdev_max_backlog;
921extern int weight_p; 969extern int weight_p;
922extern int netdev_set_master(struct net_device *dev, struct net_device *master); 970extern int netdev_set_master(struct net_device *dev, struct net_device *master);
923extern int skb_checksum_help(struct sk_buff *skb, int inward); 971extern int skb_checksum_help(struct sk_buff *skb, int inward);
972extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int sg);
924#ifdef CONFIG_BUG 973#ifdef CONFIG_BUG
925extern void netdev_rx_csum_fault(struct net_device *dev); 974extern void netdev_rx_csum_fault(struct net_device *dev);
926#else 975#else
@@ -940,6 +989,13 @@ extern void dev_seq_stop(struct seq_file *seq, void *v);
940 989
941extern void linkwatch_run_queue(void); 990extern void linkwatch_run_queue(void);
942 991
992static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
993{
994 int feature = skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT;
995 return skb_shinfo(skb)->gso_size &&
996 (dev->features & feature) != feature;
997}
998
943#endif /* __KERNEL__ */ 999#endif /* __KERNEL__ */
944 1000
945#endif /* _LINUX_DEV_H */ 1001#endif /* _LINUX_DEV_H */