diff options
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 130 |
1 files changed, 72 insertions, 58 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 659366734f3f..2e7783f4a755 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -32,6 +32,7 @@ | |||
32 | #ifdef __KERNEL__ | 32 | #ifdef __KERNEL__ |
33 | #include <linux/timer.h> | 33 | #include <linux/timer.h> |
34 | #include <linux/delay.h> | 34 | #include <linux/delay.h> |
35 | #include <linux/mm.h> | ||
35 | #include <asm/atomic.h> | 36 | #include <asm/atomic.h> |
36 | #include <asm/cache.h> | 37 | #include <asm/cache.h> |
37 | #include <asm/byteorder.h> | 38 | #include <asm/byteorder.h> |
@@ -96,7 +97,7 @@ struct wireless_dev; | |||
96 | * Compute the worst case header length according to the protocols | 97 | * Compute the worst case header length according to the protocols |
97 | * used. | 98 | * used. |
98 | */ | 99 | */ |
99 | 100 | ||
100 | #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) | 101 | #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) |
101 | # if defined(CONFIG_MAC80211_MESH) | 102 | # if defined(CONFIG_MAC80211_MESH) |
102 | # define LL_MAX_HEADER 128 | 103 | # define LL_MAX_HEADER 128 |
@@ -124,7 +125,7 @@ struct wireless_dev; | |||
124 | * Network device statistics. Akin to the 2.0 ether stats but | 125 | * Network device statistics. Akin to the 2.0 ether stats but |
125 | * with byte counters. | 126 | * with byte counters. |
126 | */ | 127 | */ |
127 | 128 | ||
128 | struct net_device_stats | 129 | struct net_device_stats |
129 | { | 130 | { |
130 | unsigned long rx_packets; /* total packets received */ | 131 | unsigned long rx_packets; /* total packets received */ |
@@ -285,7 +286,7 @@ enum netdev_state_t | |||
285 | 286 | ||
286 | /* | 287 | /* |
287 | * This structure holds at boot time configured netdevice settings. They | 288 | * This structure holds at boot time configured netdevice settings. They |
288 | * are then used in the device probing. | 289 | * are then used in the device probing. |
289 | */ | 290 | */ |
290 | struct netdev_boot_setup { | 291 | struct netdev_boot_setup { |
291 | char name[IFNAMSIZ]; | 292 | char name[IFNAMSIZ]; |
@@ -314,6 +315,9 @@ struct napi_struct { | |||
314 | spinlock_t poll_lock; | 315 | spinlock_t poll_lock; |
315 | int poll_owner; | 316 | int poll_owner; |
316 | #endif | 317 | #endif |
318 | |||
319 | unsigned int gro_count; | ||
320 | |||
317 | struct net_device *dev; | 321 | struct net_device *dev; |
318 | struct list_head dev_list; | 322 | struct list_head dev_list; |
319 | struct sk_buff *gro_list; | 323 | struct sk_buff *gro_list; |
@@ -327,6 +331,14 @@ enum | |||
327 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ | 331 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ |
328 | }; | 332 | }; |
329 | 333 | ||
334 | enum { | ||
335 | GRO_MERGED, | ||
336 | GRO_MERGED_FREE, | ||
337 | GRO_HELD, | ||
338 | GRO_NORMAL, | ||
339 | GRO_DROP, | ||
340 | }; | ||
341 | |||
330 | extern void __napi_schedule(struct napi_struct *n); | 342 | extern void __napi_schedule(struct napi_struct *n); |
331 | 343 | ||
332 | static inline int napi_disable_pending(struct napi_struct *n) | 344 | static inline int napi_disable_pending(struct napi_struct *n) |
@@ -582,6 +594,14 @@ struct net_device_ops { | |||
582 | #define HAVE_NETDEV_POLL | 594 | #define HAVE_NETDEV_POLL |
583 | void (*ndo_poll_controller)(struct net_device *dev); | 595 | void (*ndo_poll_controller)(struct net_device *dev); |
584 | #endif | 596 | #endif |
597 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | ||
598 | int (*ndo_fcoe_ddp_setup)(struct net_device *dev, | ||
599 | u16 xid, | ||
600 | struct scatterlist *sgl, | ||
601 | unsigned int sgc); | ||
602 | int (*ndo_fcoe_ddp_done)(struct net_device *dev, | ||
603 | u16 xid); | ||
604 | #endif | ||
585 | }; | 605 | }; |
586 | 606 | ||
587 | /* | 607 | /* |
@@ -650,14 +670,17 @@ struct net_device | |||
650 | #define NETIF_F_GRO 16384 /* Generic receive offload */ | 670 | #define NETIF_F_GRO 16384 /* Generic receive offload */ |
651 | #define NETIF_F_LRO 32768 /* large receive offload */ | 671 | #define NETIF_F_LRO 32768 /* large receive offload */ |
652 | 672 | ||
673 | #define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */ | ||
674 | |||
653 | /* Segmentation offload features */ | 675 | /* Segmentation offload features */ |
654 | #define NETIF_F_GSO_SHIFT 16 | 676 | #define NETIF_F_GSO_SHIFT 16 |
655 | #define NETIF_F_GSO_MASK 0xffff0000 | 677 | #define NETIF_F_GSO_MASK 0x00ff0000 |
656 | #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) | 678 | #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) |
657 | #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) | 679 | #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) |
658 | #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) | 680 | #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) |
659 | #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) | 681 | #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) |
660 | #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) | 682 | #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) |
683 | #define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT) | ||
661 | 684 | ||
662 | /* List of features with software fallbacks. */ | 685 | /* List of features with software fallbacks. */ |
663 | #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) | 686 | #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) |
@@ -740,7 +763,7 @@ struct net_device | |||
740 | void *dsa_ptr; /* dsa specific data */ | 763 | void *dsa_ptr; /* dsa specific data */ |
741 | #endif | 764 | #endif |
742 | void *atalk_ptr; /* AppleTalk link */ | 765 | void *atalk_ptr; /* AppleTalk link */ |
743 | void *ip_ptr; /* IPv4 specific data */ | 766 | void *ip_ptr; /* IPv4 specific data */ |
744 | void *dn_ptr; /* DECnet specific data */ | 767 | void *dn_ptr; /* DECnet specific data */ |
745 | void *ip6_ptr; /* IPv6 specific data */ | 768 | void *ip6_ptr; /* IPv6 specific data */ |
746 | void *ec_ptr; /* Econet specific data */ | 769 | void *ec_ptr; /* Econet specific data */ |
@@ -753,7 +776,7 @@ struct net_device | |||
753 | */ | 776 | */ |
754 | unsigned long last_rx; /* Time of last Rx */ | 777 | unsigned long last_rx; /* Time of last Rx */ |
755 | /* Interface address info used in eth_type_trans() */ | 778 | /* Interface address info used in eth_type_trans() */ |
756 | unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast | 779 | unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast |
757 | because most packets are unicast) */ | 780 | because most packets are unicast) */ |
758 | 781 | ||
759 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ | 782 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ |
@@ -840,6 +863,11 @@ struct net_device | |||
840 | struct dcbnl_rtnl_ops *dcbnl_ops; | 863 | struct dcbnl_rtnl_ops *dcbnl_ops; |
841 | #endif | 864 | #endif |
842 | 865 | ||
866 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | ||
867 | /* max exchange id for FCoE LRO by ddp */ | ||
868 | unsigned int fcoe_ddp_xid; | ||
869 | #endif | ||
870 | |||
843 | #ifdef CONFIG_COMPAT_NET_DEV_OPS | 871 | #ifdef CONFIG_COMPAT_NET_DEV_OPS |
844 | struct { | 872 | struct { |
845 | int (*init)(struct net_device *dev); | 873 | int (*init)(struct net_device *dev); |
@@ -984,6 +1012,9 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, | |||
984 | void netif_napi_del(struct napi_struct *napi); | 1012 | void netif_napi_del(struct napi_struct *napi); |
985 | 1013 | ||
986 | struct napi_gro_cb { | 1014 | struct napi_gro_cb { |
1015 | /* This indicates where we are processing relative to skb->data. */ | ||
1016 | int data_offset; | ||
1017 | |||
987 | /* This is non-zero if the packet may be of the same flow. */ | 1018 | /* This is non-zero if the packet may be of the same flow. */ |
988 | int same_flow; | 1019 | int same_flow; |
989 | 1020 | ||
@@ -1088,6 +1119,36 @@ extern int dev_restart(struct net_device *dev); | |||
1088 | #ifdef CONFIG_NETPOLL_TRAP | 1119 | #ifdef CONFIG_NETPOLL_TRAP |
1089 | extern int netpoll_trap(void); | 1120 | extern int netpoll_trap(void); |
1090 | #endif | 1121 | #endif |
1122 | extern void *skb_gro_header(struct sk_buff *skb, unsigned int hlen); | ||
1123 | extern int skb_gro_receive(struct sk_buff **head, | ||
1124 | struct sk_buff *skb); | ||
1125 | |||
1126 | static inline unsigned int skb_gro_offset(const struct sk_buff *skb) | ||
1127 | { | ||
1128 | return NAPI_GRO_CB(skb)->data_offset; | ||
1129 | } | ||
1130 | |||
1131 | static inline unsigned int skb_gro_len(const struct sk_buff *skb) | ||
1132 | { | ||
1133 | return skb->len - NAPI_GRO_CB(skb)->data_offset; | ||
1134 | } | ||
1135 | |||
1136 | static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) | ||
1137 | { | ||
1138 | NAPI_GRO_CB(skb)->data_offset += len; | ||
1139 | } | ||
1140 | |||
1141 | static inline void skb_gro_reset_offset(struct sk_buff *skb) | ||
1142 | { | ||
1143 | NAPI_GRO_CB(skb)->data_offset = 0; | ||
1144 | } | ||
1145 | |||
1146 | static inline void *skb_gro_mac_header(struct sk_buff *skb) | ||
1147 | { | ||
1148 | return skb_mac_header(skb) < skb->data ? skb_mac_header(skb) : | ||
1149 | page_address(skb_shinfo(skb)->frags[0].page) + | ||
1150 | skb_shinfo(skb)->frags[0].page_offset; | ||
1151 | } | ||
1091 | 1152 | ||
1092 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, | 1153 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, |
1093 | unsigned short type, | 1154 | unsigned short type, |
@@ -1376,12 +1437,15 @@ extern int netif_receive_skb(struct sk_buff *skb); | |||
1376 | extern void napi_gro_flush(struct napi_struct *napi); | 1437 | extern void napi_gro_flush(struct napi_struct *napi); |
1377 | extern int dev_gro_receive(struct napi_struct *napi, | 1438 | extern int dev_gro_receive(struct napi_struct *napi, |
1378 | struct sk_buff *skb); | 1439 | struct sk_buff *skb); |
1440 | extern int napi_skb_finish(int ret, struct sk_buff *skb); | ||
1379 | extern int napi_gro_receive(struct napi_struct *napi, | 1441 | extern int napi_gro_receive(struct napi_struct *napi, |
1380 | struct sk_buff *skb); | 1442 | struct sk_buff *skb); |
1381 | extern void napi_reuse_skb(struct napi_struct *napi, | 1443 | extern void napi_reuse_skb(struct napi_struct *napi, |
1382 | struct sk_buff *skb); | 1444 | struct sk_buff *skb); |
1383 | extern struct sk_buff * napi_fraginfo_skb(struct napi_struct *napi, | 1445 | extern struct sk_buff * napi_fraginfo_skb(struct napi_struct *napi, |
1384 | struct napi_gro_fraginfo *info); | 1446 | struct napi_gro_fraginfo *info); |
1447 | extern int napi_frags_finish(struct napi_struct *napi, | ||
1448 | struct sk_buff *skb, int ret); | ||
1385 | extern int napi_gro_frags(struct napi_struct *napi, | 1449 | extern int napi_gro_frags(struct napi_struct *napi, |
1386 | struct napi_gro_fraginfo *info); | 1450 | struct napi_gro_fraginfo *info); |
1387 | extern void netif_nit_deliver(struct sk_buff *skb); | 1451 | extern void netif_nit_deliver(struct sk_buff *skb); |
@@ -1575,56 +1639,6 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | |||
1575 | return (1 << debug_value) - 1; | 1639 | return (1 << debug_value) - 1; |
1576 | } | 1640 | } |
1577 | 1641 | ||
1578 | /* Test if receive needs to be scheduled but only if up */ | ||
1579 | static inline int netif_rx_schedule_prep(struct napi_struct *napi) | ||
1580 | { | ||
1581 | return napi_schedule_prep(napi); | ||
1582 | } | ||
1583 | |||
1584 | /* Add interface to tail of rx poll list. This assumes that _prep has | ||
1585 | * already been called and returned 1. | ||
1586 | */ | ||
1587 | static inline void __netif_rx_schedule(struct napi_struct *napi) | ||
1588 | { | ||
1589 | __napi_schedule(napi); | ||
1590 | } | ||
1591 | |||
1592 | /* Try to reschedule poll. Called by irq handler. */ | ||
1593 | |||
1594 | static inline void netif_rx_schedule(struct napi_struct *napi) | ||
1595 | { | ||
1596 | if (netif_rx_schedule_prep(napi)) | ||
1597 | __netif_rx_schedule(napi); | ||
1598 | } | ||
1599 | |||
1600 | /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */ | ||
1601 | static inline int netif_rx_reschedule(struct napi_struct *napi) | ||
1602 | { | ||
1603 | if (napi_schedule_prep(napi)) { | ||
1604 | __netif_rx_schedule(napi); | ||
1605 | return 1; | ||
1606 | } | ||
1607 | return 0; | ||
1608 | } | ||
1609 | |||
1610 | /* same as netif_rx_complete, except that local_irq_save(flags) | ||
1611 | * has already been issued | ||
1612 | */ | ||
1613 | static inline void __netif_rx_complete(struct napi_struct *napi) | ||
1614 | { | ||
1615 | __napi_complete(napi); | ||
1616 | } | ||
1617 | |||
1618 | /* Remove interface from poll list: it must be in the poll list | ||
1619 | * on current cpu. This primitive is called by dev->poll(), when | ||
1620 | * it completes the work. The device cannot be out of poll list at this | ||
1621 | * moment, it is BUG(). | ||
1622 | */ | ||
1623 | static inline void netif_rx_complete(struct napi_struct *napi) | ||
1624 | { | ||
1625 | napi_complete(napi); | ||
1626 | } | ||
1627 | |||
1628 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) | 1642 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
1629 | { | 1643 | { |
1630 | spin_lock(&txq->_xmit_lock); | 1644 | spin_lock(&txq->_xmit_lock); |
@@ -1875,7 +1889,7 @@ static inline int skb_bond_should_drop(struct sk_buff *skb) | |||
1875 | 1889 | ||
1876 | if (dev->priv_flags & IFF_SLAVE_INACTIVE) { | 1890 | if (dev->priv_flags & IFF_SLAVE_INACTIVE) { |
1877 | if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && | 1891 | if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && |
1878 | skb->protocol == __constant_htons(ETH_P_ARP)) | 1892 | skb->protocol == __cpu_to_be16(ETH_P_ARP)) |
1879 | return 0; | 1893 | return 0; |
1880 | 1894 | ||
1881 | if (master->priv_flags & IFF_MASTER_ALB) { | 1895 | if (master->priv_flags & IFF_MASTER_ALB) { |
@@ -1884,7 +1898,7 @@ static inline int skb_bond_should_drop(struct sk_buff *skb) | |||
1884 | return 0; | 1898 | return 0; |
1885 | } | 1899 | } |
1886 | if (master->priv_flags & IFF_MASTER_8023AD && | 1900 | if (master->priv_flags & IFF_MASTER_8023AD && |
1887 | skb->protocol == __constant_htons(ETH_P_SLOW)) | 1901 | skb->protocol == __cpu_to_be16(ETH_P_SLOW)) |
1888 | return 0; | 1902 | return 0; |
1889 | 1903 | ||
1890 | return 1; | 1904 | return 1; |