aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h115
1 files changed, 107 insertions, 8 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f4169bbb60e..50a4719512e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -34,9 +34,9 @@
34#include <asm/cache.h> 34#include <asm/cache.h>
35#include <asm/byteorder.h> 35#include <asm/byteorder.h>
36 36
37#include <linux/config.h>
38#include <linux/device.h> 37#include <linux/device.h>
39#include <linux/percpu.h> 38#include <linux/percpu.h>
39#include <linux/dmaengine.h>
40 40
41struct divert_blk; 41struct divert_blk;
42struct vlan_group; 42struct vlan_group;
@@ -232,6 +232,7 @@ enum netdev_state_t
232 __LINK_STATE_RX_SCHED, 232 __LINK_STATE_RX_SCHED,
233 __LINK_STATE_LINKWATCH_PENDING, 233 __LINK_STATE_LINKWATCH_PENDING,
234 __LINK_STATE_DORMANT, 234 __LINK_STATE_DORMANT,
235 __LINK_STATE_QDISC_RUNNING,
235}; 236};
236 237
237 238
@@ -307,9 +308,23 @@ struct net_device
307#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ 308#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
308#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ 309#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
309#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ 310#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
310#define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */ 311#define NETIF_F_GSO 2048 /* Enable software GSO. */
311#define NETIF_F_LLTX 4096 /* LockLess TX */ 312#define NETIF_F_LLTX 4096 /* LockLess TX */
312#define NETIF_F_UFO 8192 /* Can offload UDP Large Send*/ 313
314 /* Segmentation offload features */
315#define NETIF_F_GSO_SHIFT 16
316#define NETIF_F_GSO_MASK 0xffff0000
317#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
318#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
319#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
320#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
321#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
322
323 /* List of features with software fallbacks. */
324#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
325
326#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
327#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
313 328
314 struct net_device *next_sched; 329 struct net_device *next_sched;
315 330
@@ -398,6 +413,9 @@ struct net_device
398 struct list_head qdisc_list; 413 struct list_head qdisc_list;
399 unsigned long tx_queue_len; /* Max frames per queue allowed */ 414 unsigned long tx_queue_len; /* Max frames per queue allowed */
400 415
416 /* Partially transmitted GSO packet. */
417 struct sk_buff *gso_skb;
418
401 /* ingress path synchronizer */ 419 /* ingress path synchronizer */
402 spinlock_t ingress_lock; 420 spinlock_t ingress_lock;
403 struct Qdisc *qdisc_ingress; 421 struct Qdisc *qdisc_ingress;
@@ -406,7 +424,7 @@ struct net_device
406 * One part is mostly used on xmit path (device) 424 * One part is mostly used on xmit path (device)
407 */ 425 */
408 /* hard_start_xmit synchronizer */ 426 /* hard_start_xmit synchronizer */
409 spinlock_t xmit_lock ____cacheline_aligned_in_smp; 427 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
410 /* cpu id of processor entered to hard_start_xmit or -1, 428 /* cpu id of processor entered to hard_start_xmit or -1,
411 if nobody entered there. 429 if nobody entered there.
412 */ 430 */
@@ -532,6 +550,9 @@ struct packet_type {
532 struct net_device *, 550 struct net_device *,
533 struct packet_type *, 551 struct packet_type *,
534 struct net_device *); 552 struct net_device *);
553 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
554 int features);
555 int (*gso_send_check)(struct sk_buff *skb);
535 void *af_packet_priv; 556 void *af_packet_priv;
536 struct list_head list; 557 struct list_head list;
537}; 558};
@@ -593,6 +614,9 @@ struct softnet_data
593 struct sk_buff *completion_queue; 614 struct sk_buff *completion_queue;
594 615
595 struct net_device backlog_dev; /* Sorry. 8) */ 616 struct net_device backlog_dev; /* Sorry. 8) */
617#ifdef CONFIG_NET_DMA
618 struct dma_chan *net_dma;
619#endif
596}; 620};
597 621
598DECLARE_PER_CPU(struct softnet_data,softnet_data); 622DECLARE_PER_CPU(struct softnet_data,softnet_data);
@@ -679,11 +703,11 @@ extern int dev_change_name(struct net_device *, char *);
679extern int dev_set_mtu(struct net_device *, int); 703extern int dev_set_mtu(struct net_device *, int);
680extern int dev_set_mac_address(struct net_device *, 704extern int dev_set_mac_address(struct net_device *,
681 struct sockaddr *); 705 struct sockaddr *);
682extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 706extern int dev_hard_start_xmit(struct sk_buff *skb,
707 struct net_device *dev);
683 708
684extern void dev_init(void); 709extern void dev_init(void);
685 710
686extern int netdev_nit;
687extern int netdev_budget; 711extern int netdev_budget;
688 712
689/* Called by rtnetlink.c:rtnl_unlock() */ 713/* Called by rtnetlink.c:rtnl_unlock() */
@@ -889,11 +913,43 @@ static inline void __netif_rx_complete(struct net_device *dev)
889 clear_bit(__LINK_STATE_RX_SCHED, &dev->state); 913 clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
890} 914}
891 915
916static inline void netif_tx_lock(struct net_device *dev)
917{
918 spin_lock(&dev->_xmit_lock);
919 dev->xmit_lock_owner = smp_processor_id();
920}
921
922static inline void netif_tx_lock_bh(struct net_device *dev)
923{
924 spin_lock_bh(&dev->_xmit_lock);
925 dev->xmit_lock_owner = smp_processor_id();
926}
927
928static inline int netif_tx_trylock(struct net_device *dev)
929{
930 int ok = spin_trylock(&dev->_xmit_lock);
931 if (likely(ok))
932 dev->xmit_lock_owner = smp_processor_id();
933 return ok;
934}
935
936static inline void netif_tx_unlock(struct net_device *dev)
937{
938 dev->xmit_lock_owner = -1;
939 spin_unlock(&dev->_xmit_lock);
940}
941
942static inline void netif_tx_unlock_bh(struct net_device *dev)
943{
944 dev->xmit_lock_owner = -1;
945 spin_unlock_bh(&dev->_xmit_lock);
946}
947
892static inline void netif_tx_disable(struct net_device *dev) 948static inline void netif_tx_disable(struct net_device *dev)
893{ 949{
894 spin_lock_bh(&dev->xmit_lock); 950 netif_tx_lock_bh(dev);
895 netif_stop_queue(dev); 951 netif_stop_queue(dev);
896 spin_unlock_bh(&dev->xmit_lock); 952 netif_tx_unlock_bh(dev);
897} 953}
898 954
899/* These functions live elsewhere (drivers/net/net_init.c, but related) */ 955/* These functions live elsewhere (drivers/net/net_init.c, but related) */
@@ -921,6 +977,7 @@ extern int netdev_max_backlog;
921extern int weight_p; 977extern int weight_p;
922extern int netdev_set_master(struct net_device *dev, struct net_device *master); 978extern int netdev_set_master(struct net_device *dev, struct net_device *master);
923extern int skb_checksum_help(struct sk_buff *skb, int inward); 979extern int skb_checksum_help(struct sk_buff *skb, int inward);
980extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
924#ifdef CONFIG_BUG 981#ifdef CONFIG_BUG
925extern void netdev_rx_csum_fault(struct net_device *dev); 982extern void netdev_rx_csum_fault(struct net_device *dev);
926#else 983#else
@@ -940,6 +997,48 @@ extern void dev_seq_stop(struct seq_file *seq, void *v);
940 997
941extern void linkwatch_run_queue(void); 998extern void linkwatch_run_queue(void);
942 999
1000static inline int net_gso_ok(int features, int gso_type)
1001{
1002 int feature = gso_type << NETIF_F_GSO_SHIFT;
1003 return (features & feature) == feature;
1004}
1005
1006static inline int skb_gso_ok(struct sk_buff *skb, int features)
1007{
1008 return net_gso_ok(features, skb_shinfo(skb)->gso_type);
1009}
1010
1011static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
1012{
1013 return skb_is_gso(skb) &&
1014 (!skb_gso_ok(skb, dev->features) ||
1015 unlikely(skb->ip_summed != CHECKSUM_HW));
1016}
1017
1018/* On bonding slaves other than the currently active slave, suppress
1019 * duplicates except for 802.3ad ETH_P_SLOW and alb non-mcast/bcast.
1020 */
1021static inline int skb_bond_should_drop(struct sk_buff *skb)
1022{
1023 struct net_device *dev = skb->dev;
1024 struct net_device *master = dev->master;
1025
1026 if (master &&
1027 (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
1028 if (master->priv_flags & IFF_MASTER_ALB) {
1029 if (skb->pkt_type != PACKET_BROADCAST &&
1030 skb->pkt_type != PACKET_MULTICAST)
1031 return 0;
1032 }
1033 if (master->priv_flags & IFF_MASTER_8023AD &&
1034 skb->protocol == __constant_htons(ETH_P_SLOW))
1035 return 0;
1036
1037 return 1;
1038 }
1039 return 0;
1040}
1041
943#endif /* __KERNEL__ */ 1042#endif /* __KERNEL__ */
944 1043
945#endif /* _LINUX_DEV_H */ 1044#endif /* _LINUX_DEV_H */