diff options
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 112 |
1 files changed, 55 insertions, 57 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ec54785d34f9..be3ebd7e8ce5 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -96,7 +96,7 @@ struct wireless_dev; | |||
96 | * Compute the worst case header length according to the protocols | 96 | * Compute the worst case header length according to the protocols |
97 | * used. | 97 | * used. |
98 | */ | 98 | */ |
99 | 99 | ||
100 | #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) | 100 | #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) |
101 | # if defined(CONFIG_MAC80211_MESH) | 101 | # if defined(CONFIG_MAC80211_MESH) |
102 | # define LL_MAX_HEADER 128 | 102 | # define LL_MAX_HEADER 128 |
@@ -124,7 +124,7 @@ struct wireless_dev; | |||
124 | * Network device statistics. Akin to the 2.0 ether stats but | 124 | * Network device statistics. Akin to the 2.0 ether stats but |
125 | * with byte counters. | 125 | * with byte counters. |
126 | */ | 126 | */ |
127 | 127 | ||
128 | struct net_device_stats | 128 | struct net_device_stats |
129 | { | 129 | { |
130 | unsigned long rx_packets; /* total packets received */ | 130 | unsigned long rx_packets; /* total packets received */ |
@@ -285,7 +285,7 @@ enum netdev_state_t | |||
285 | 285 | ||
286 | /* | 286 | /* |
287 | * This structure holds at boot time configured netdevice settings. They | 287 | * This structure holds at boot time configured netdevice settings. They |
288 | * are then used in the device probing. | 288 | * are then used in the device probing. |
289 | */ | 289 | */ |
290 | struct netdev_boot_setup { | 290 | struct netdev_boot_setup { |
291 | char name[IFNAMSIZ]; | 291 | char name[IFNAMSIZ]; |
@@ -314,6 +314,9 @@ struct napi_struct { | |||
314 | spinlock_t poll_lock; | 314 | spinlock_t poll_lock; |
315 | int poll_owner; | 315 | int poll_owner; |
316 | #endif | 316 | #endif |
317 | |||
318 | unsigned int gro_count; | ||
319 | |||
317 | struct net_device *dev; | 320 | struct net_device *dev; |
318 | struct list_head dev_list; | 321 | struct list_head dev_list; |
319 | struct sk_buff *gro_list; | 322 | struct sk_buff *gro_list; |
@@ -327,6 +330,14 @@ enum | |||
327 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ | 330 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ |
328 | }; | 331 | }; |
329 | 332 | ||
333 | enum { | ||
334 | GRO_MERGED, | ||
335 | GRO_MERGED_FREE, | ||
336 | GRO_HELD, | ||
337 | GRO_NORMAL, | ||
338 | GRO_DROP, | ||
339 | }; | ||
340 | |||
330 | extern void __napi_schedule(struct napi_struct *n); | 341 | extern void __napi_schedule(struct napi_struct *n); |
331 | 342 | ||
332 | static inline int napi_disable_pending(struct napi_struct *n) | 343 | static inline int napi_disable_pending(struct napi_struct *n) |
@@ -740,7 +751,7 @@ struct net_device | |||
740 | void *dsa_ptr; /* dsa specific data */ | 751 | void *dsa_ptr; /* dsa specific data */ |
741 | #endif | 752 | #endif |
742 | void *atalk_ptr; /* AppleTalk link */ | 753 | void *atalk_ptr; /* AppleTalk link */ |
743 | void *ip_ptr; /* IPv4 specific data */ | 754 | void *ip_ptr; /* IPv4 specific data */ |
744 | void *dn_ptr; /* DECnet specific data */ | 755 | void *dn_ptr; /* DECnet specific data */ |
745 | void *ip6_ptr; /* IPv6 specific data */ | 756 | void *ip6_ptr; /* IPv6 specific data */ |
746 | void *ec_ptr; /* Econet specific data */ | 757 | void *ec_ptr; /* Econet specific data */ |
@@ -753,7 +764,7 @@ struct net_device | |||
753 | */ | 764 | */ |
754 | unsigned long last_rx; /* Time of last Rx */ | 765 | unsigned long last_rx; /* Time of last Rx */ |
755 | /* Interface address info used in eth_type_trans() */ | 766 | /* Interface address info used in eth_type_trans() */ |
756 | unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast | 767 | unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast |
757 | because most packets are unicast) */ | 768 | because most packets are unicast) */ |
758 | 769 | ||
759 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ | 770 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ |
@@ -984,6 +995,9 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, | |||
984 | void netif_napi_del(struct napi_struct *napi); | 995 | void netif_napi_del(struct napi_struct *napi); |
985 | 996 | ||
986 | struct napi_gro_cb { | 997 | struct napi_gro_cb { |
998 | /* This indicates where we are processing relative to skb->data. */ | ||
999 | int data_offset; | ||
1000 | |||
987 | /* This is non-zero if the packet may be of the same flow. */ | 1001 | /* This is non-zero if the packet may be of the same flow. */ |
988 | int same_flow; | 1002 | int same_flow; |
989 | 1003 | ||
@@ -1079,6 +1093,7 @@ extern void synchronize_net(void); | |||
1079 | extern int register_netdevice_notifier(struct notifier_block *nb); | 1093 | extern int register_netdevice_notifier(struct notifier_block *nb); |
1080 | extern int unregister_netdevice_notifier(struct notifier_block *nb); | 1094 | extern int unregister_netdevice_notifier(struct notifier_block *nb); |
1081 | extern int init_dummy_netdev(struct net_device *dev); | 1095 | extern int init_dummy_netdev(struct net_device *dev); |
1096 | extern void netdev_resync_ops(struct net_device *dev); | ||
1082 | 1097 | ||
1083 | extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); | 1098 | extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); |
1084 | extern struct net_device *dev_get_by_index(struct net *net, int ifindex); | 1099 | extern struct net_device *dev_get_by_index(struct net *net, int ifindex); |
@@ -1087,6 +1102,36 @@ extern int dev_restart(struct net_device *dev); | |||
1087 | #ifdef CONFIG_NETPOLL_TRAP | 1102 | #ifdef CONFIG_NETPOLL_TRAP |
1088 | extern int netpoll_trap(void); | 1103 | extern int netpoll_trap(void); |
1089 | #endif | 1104 | #endif |
1105 | extern void *skb_gro_header(struct sk_buff *skb, unsigned int hlen); | ||
1106 | extern int skb_gro_receive(struct sk_buff **head, | ||
1107 | struct sk_buff *skb); | ||
1108 | |||
1109 | static inline unsigned int skb_gro_offset(const struct sk_buff *skb) | ||
1110 | { | ||
1111 | return NAPI_GRO_CB(skb)->data_offset; | ||
1112 | } | ||
1113 | |||
1114 | static inline unsigned int skb_gro_len(const struct sk_buff *skb) | ||
1115 | { | ||
1116 | return skb->len - NAPI_GRO_CB(skb)->data_offset; | ||
1117 | } | ||
1118 | |||
1119 | static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) | ||
1120 | { | ||
1121 | NAPI_GRO_CB(skb)->data_offset += len; | ||
1122 | } | ||
1123 | |||
1124 | static inline void skb_gro_reset_offset(struct sk_buff *skb) | ||
1125 | { | ||
1126 | NAPI_GRO_CB(skb)->data_offset = 0; | ||
1127 | } | ||
1128 | |||
1129 | static inline void *skb_gro_mac_header(struct sk_buff *skb) | ||
1130 | { | ||
1131 | return skb_mac_header(skb) < skb->data ? skb_mac_header(skb) : | ||
1132 | page_address(skb_shinfo(skb)->frags[0].page) + | ||
1133 | skb_shinfo(skb)->frags[0].page_offset; | ||
1134 | } | ||
1090 | 1135 | ||
1091 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, | 1136 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, |
1092 | unsigned short type, | 1137 | unsigned short type, |
@@ -1375,12 +1420,15 @@ extern int netif_receive_skb(struct sk_buff *skb); | |||
1375 | extern void napi_gro_flush(struct napi_struct *napi); | 1420 | extern void napi_gro_flush(struct napi_struct *napi); |
1376 | extern int dev_gro_receive(struct napi_struct *napi, | 1421 | extern int dev_gro_receive(struct napi_struct *napi, |
1377 | struct sk_buff *skb); | 1422 | struct sk_buff *skb); |
1423 | extern int napi_skb_finish(int ret, struct sk_buff *skb); | ||
1378 | extern int napi_gro_receive(struct napi_struct *napi, | 1424 | extern int napi_gro_receive(struct napi_struct *napi, |
1379 | struct sk_buff *skb); | 1425 | struct sk_buff *skb); |
1380 | extern void napi_reuse_skb(struct napi_struct *napi, | 1426 | extern void napi_reuse_skb(struct napi_struct *napi, |
1381 | struct sk_buff *skb); | 1427 | struct sk_buff *skb); |
1382 | extern struct sk_buff * napi_fraginfo_skb(struct napi_struct *napi, | 1428 | extern struct sk_buff * napi_fraginfo_skb(struct napi_struct *napi, |
1383 | struct napi_gro_fraginfo *info); | 1429 | struct napi_gro_fraginfo *info); |
1430 | extern int napi_frags_finish(struct napi_struct *napi, | ||
1431 | struct sk_buff *skb, int ret); | ||
1384 | extern int napi_gro_frags(struct napi_struct *napi, | 1432 | extern int napi_gro_frags(struct napi_struct *napi, |
1385 | struct napi_gro_fraginfo *info); | 1433 | struct napi_gro_fraginfo *info); |
1386 | extern void netif_nit_deliver(struct sk_buff *skb); | 1434 | extern void netif_nit_deliver(struct sk_buff *skb); |
@@ -1574,56 +1622,6 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | |||
1574 | return (1 << debug_value) - 1; | 1622 | return (1 << debug_value) - 1; |
1575 | } | 1623 | } |
1576 | 1624 | ||
1577 | /* Test if receive needs to be scheduled but only if up */ | ||
1578 | static inline int netif_rx_schedule_prep(struct napi_struct *napi) | ||
1579 | { | ||
1580 | return napi_schedule_prep(napi); | ||
1581 | } | ||
1582 | |||
1583 | /* Add interface to tail of rx poll list. This assumes that _prep has | ||
1584 | * already been called and returned 1. | ||
1585 | */ | ||
1586 | static inline void __netif_rx_schedule(struct napi_struct *napi) | ||
1587 | { | ||
1588 | __napi_schedule(napi); | ||
1589 | } | ||
1590 | |||
1591 | /* Try to reschedule poll. Called by irq handler. */ | ||
1592 | |||
1593 | static inline void netif_rx_schedule(struct napi_struct *napi) | ||
1594 | { | ||
1595 | if (netif_rx_schedule_prep(napi)) | ||
1596 | __netif_rx_schedule(napi); | ||
1597 | } | ||
1598 | |||
1599 | /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */ | ||
1600 | static inline int netif_rx_reschedule(struct napi_struct *napi) | ||
1601 | { | ||
1602 | if (napi_schedule_prep(napi)) { | ||
1603 | __netif_rx_schedule(napi); | ||
1604 | return 1; | ||
1605 | } | ||
1606 | return 0; | ||
1607 | } | ||
1608 | |||
1609 | /* same as netif_rx_complete, except that local_irq_save(flags) | ||
1610 | * has already been issued | ||
1611 | */ | ||
1612 | static inline void __netif_rx_complete(struct napi_struct *napi) | ||
1613 | { | ||
1614 | __napi_complete(napi); | ||
1615 | } | ||
1616 | |||
1617 | /* Remove interface from poll list: it must be in the poll list | ||
1618 | * on current cpu. This primitive is called by dev->poll(), when | ||
1619 | * it completes the work. The device cannot be out of poll list at this | ||
1620 | * moment, it is BUG(). | ||
1621 | */ | ||
1622 | static inline void netif_rx_complete(struct napi_struct *napi) | ||
1623 | { | ||
1624 | napi_complete(napi); | ||
1625 | } | ||
1626 | |||
1627 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) | 1625 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
1628 | { | 1626 | { |
1629 | spin_lock(&txq->_xmit_lock); | 1627 | spin_lock(&txq->_xmit_lock); |
@@ -1874,7 +1872,7 @@ static inline int skb_bond_should_drop(struct sk_buff *skb) | |||
1874 | 1872 | ||
1875 | if (dev->priv_flags & IFF_SLAVE_INACTIVE) { | 1873 | if (dev->priv_flags & IFF_SLAVE_INACTIVE) { |
1876 | if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && | 1874 | if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && |
1877 | skb->protocol == __constant_htons(ETH_P_ARP)) | 1875 | skb->protocol == __cpu_to_be16(ETH_P_ARP)) |
1878 | return 0; | 1876 | return 0; |
1879 | 1877 | ||
1880 | if (master->priv_flags & IFF_MASTER_ALB) { | 1878 | if (master->priv_flags & IFF_MASTER_ALB) { |
@@ -1883,7 +1881,7 @@ static inline int skb_bond_should_drop(struct sk_buff *skb) | |||
1883 | return 0; | 1881 | return 0; |
1884 | } | 1882 | } |
1885 | if (master->priv_flags & IFF_MASTER_8023AD && | 1883 | if (master->priv_flags & IFF_MASTER_8023AD && |
1886 | skb->protocol == __constant_htons(ETH_P_SLOW)) | 1884 | skb->protocol == __cpu_to_be16(ETH_P_SLOW)) |
1887 | return 0; | 1885 | return 0; |
1888 | 1886 | ||
1889 | return 1; | 1887 | return 1; |