aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h100
1 files changed, 44 insertions, 56 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 9e19477991a..ddee79bb8f1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -34,8 +34,7 @@
34#include <linux/pm_qos_params.h> 34#include <linux/pm_qos_params.h>
35#include <linux/timer.h> 35#include <linux/timer.h>
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/mm.h> 37#include <linux/atomic.h>
38#include <asm/atomic.h>
39#include <asm/cache.h> 38#include <asm/cache.h>
40#include <asm/byteorder.h> 39#include <asm/byteorder.h>
41 40
@@ -61,11 +60,6 @@ struct wireless_dev;
61#define SET_ETHTOOL_OPS(netdev,ops) \ 60#define SET_ETHTOOL_OPS(netdev,ops) \
62 ( (netdev)->ethtool_ops = (ops) ) 61 ( (netdev)->ethtool_ops = (ops) )
63 62
64#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
65 functions are available. */
66#define HAVE_FREE_NETDEV /* free_netdev() */
67#define HAVE_NETDEV_PRIV /* netdev_priv() */
68
69/* hardware address assignment types */ 63/* hardware address assignment types */
70#define NET_ADDR_PERM 0 /* address is permanent (default) */ 64#define NET_ADDR_PERM 0 /* address is permanent (default) */
71#define NET_ADDR_RANDOM 1 /* address is generated randomly */ 65#define NET_ADDR_RANDOM 1 /* address is generated randomly */
@@ -258,21 +252,8 @@ struct netdev_hw_addr_list {
258 netdev_hw_addr_list_for_each(ha, &(dev)->mc) 252 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
259 253
260struct hh_cache { 254struct hh_cache {
261 struct hh_cache *hh_next; /* Next entry */ 255 u16 hh_len;
262 atomic_t hh_refcnt; /* number of users */ 256 u16 __pad;
263/*
264 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
265 * cache line on SMP.
266 * They are mostly read, but hh_refcnt may be changed quite frequently,
267 * incurring cache line ping pongs.
268 */
269 __be16 hh_type ____cacheline_aligned_in_smp;
270 /* protocol identifier, f.e ETH_P_IP
271 * NOTE: For VLANs, this will be the
272 * encapuslated type. --BLG
273 */
274 u16 hh_len; /* length of header */
275 int (*hh_output)(struct sk_buff *skb);
276 seqlock_t hh_lock; 257 seqlock_t hh_lock;
277 258
278 /* cached hardware header; allow for machine alignment needs. */ 259 /* cached hardware header; allow for machine alignment needs. */
@@ -284,12 +265,6 @@ struct hh_cache {
284 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; 265 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
285}; 266};
286 267
287static inline void hh_cache_put(struct hh_cache *hh)
288{
289 if (atomic_dec_and_test(&hh->hh_refcnt))
290 kfree(hh);
291}
292
293/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. 268/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
294 * Alternative is: 269 * Alternative is:
295 * dev->hard_header_len ? (dev->hard_header_len + 270 * dev->hard_header_len ? (dev->hard_header_len +
@@ -314,8 +289,7 @@ struct header_ops {
314 const void *saddr, unsigned len); 289 const void *saddr, unsigned len);
315 int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 290 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
316 int (*rebuild)(struct sk_buff *skb); 291 int (*rebuild)(struct sk_buff *skb);
317#define HAVE_HEADER_CACHE 292 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
318 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
319 void (*cache_update)(struct hh_cache *hh, 293 void (*cache_update)(struct hh_cache *hh,
320 const struct net_device *dev, 294 const struct net_device *dev,
321 const unsigned char *haddr); 295 const unsigned char *haddr);
@@ -556,7 +530,7 @@ struct netdev_queue {
556 struct Qdisc *qdisc; 530 struct Qdisc *qdisc;
557 unsigned long state; 531 unsigned long state;
558 struct Qdisc *qdisc_sleeping; 532 struct Qdisc *qdisc_sleeping;
559#ifdef CONFIG_RPS 533#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
560 struct kobject kobj; 534 struct kobject kobj;
561#endif 535#endif
562#if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 536#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
@@ -794,12 +768,6 @@ struct netdev_tc_txq {
794 * 3. Update dev->stats asynchronously and atomically, and define 768 * 3. Update dev->stats asynchronously and atomically, and define
795 * neither operation. 769 * neither operation.
796 * 770 *
797 * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
798 * If device support VLAN receive acceleration
799 * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
800 * when vlan groups for the device changes. Note: grp is NULL
801 * if no vlan's groups are being used.
802 *
803 * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); 771 * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
804 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) 772 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
805 * this function is called when a VLAN id is registered. 773 * this function is called when a VLAN id is registered.
@@ -888,7 +856,6 @@ struct netdev_tc_txq {
888 * Must return >0 or -errno if it changed dev->features itself. 856 * Must return >0 or -errno if it changed dev->features itself.
889 * 857 *
890 */ 858 */
891#define HAVE_NET_DEVICE_OPS
892struct net_device_ops { 859struct net_device_ops {
893 int (*ndo_init)(struct net_device *dev); 860 int (*ndo_init)(struct net_device *dev);
894 void (*ndo_uninit)(struct net_device *dev); 861 void (*ndo_uninit)(struct net_device *dev);
@@ -919,8 +886,6 @@ struct net_device_ops {
919 struct rtnl_link_stats64 *storage); 886 struct rtnl_link_stats64 *storage);
920 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 887 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
921 888
922 void (*ndo_vlan_rx_register)(struct net_device *dev,
923 struct vlan_group *grp);
924 void (*ndo_vlan_rx_add_vid)(struct net_device *dev, 889 void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
925 unsigned short vid); 890 unsigned short vid);
926 void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, 891 void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
@@ -1167,7 +1132,7 @@ struct net_device {
1167 spinlock_t addr_list_lock; 1132 spinlock_t addr_list_lock;
1168 struct netdev_hw_addr_list uc; /* Unicast mac addresses */ 1133 struct netdev_hw_addr_list uc; /* Unicast mac addresses */
1169 struct netdev_hw_addr_list mc; /* Multicast mac addresses */ 1134 struct netdev_hw_addr_list mc; /* Multicast mac addresses */
1170 int uc_promisc; 1135 bool uc_promisc;
1171 unsigned int promiscuity; 1136 unsigned int promiscuity;
1172 unsigned int allmulti; 1137 unsigned int allmulti;
1173 1138
@@ -1214,7 +1179,7 @@ struct net_device {
1214 1179
1215 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ 1180 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
1216 1181
1217#ifdef CONFIG_RPS 1182#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
1218 struct kset *queues_kset; 1183 struct kset *queues_kset;
1219 1184
1220 struct netdev_rx_queue *_rx; 1185 struct netdev_rx_queue *_rx;
@@ -1342,9 +1307,6 @@ struct net_device {
1342 /* max exchange id for FCoE LRO by ddp */ 1307 /* max exchange id for FCoE LRO by ddp */
1343 unsigned int fcoe_ddp_xid; 1308 unsigned int fcoe_ddp_xid;
1344#endif 1309#endif
1345 /* n-tuple filter list attached to this device */
1346 struct ethtool_rx_ntuple_list ethtool_ntuple_list;
1347
1348 /* phy device may attach itself for hardware timestamping */ 1310 /* phy device may attach itself for hardware timestamping */
1349 struct phy_device *phydev; 1311 struct phy_device *phydev;
1350 1312
@@ -1557,9 +1519,41 @@ struct packet_type {
1557 struct list_head list; 1519 struct list_head list;
1558}; 1520};
1559 1521
1560#include <linux/interrupt.h>
1561#include <linux/notifier.h> 1522#include <linux/notifier.h>
1562 1523
1524/* netdevice notifier chain. Please remember to update the rtnetlink
1525 * notification exclusion list in rtnetlink_event() when adding new
1526 * types.
1527 */
1528#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
1529#define NETDEV_DOWN 0x0002
1530#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
1531 detected a hardware crash and restarted
1532 - we can use this eg to kick tcp sessions
1533 once done */
1534#define NETDEV_CHANGE 0x0004 /* Notify device state change */
1535#define NETDEV_REGISTER 0x0005
1536#define NETDEV_UNREGISTER 0x0006
1537#define NETDEV_CHANGEMTU 0x0007
1538#define NETDEV_CHANGEADDR 0x0008
1539#define NETDEV_GOING_DOWN 0x0009
1540#define NETDEV_CHANGENAME 0x000A
1541#define NETDEV_FEAT_CHANGE 0x000B
1542#define NETDEV_BONDING_FAILOVER 0x000C
1543#define NETDEV_PRE_UP 0x000D
1544#define NETDEV_PRE_TYPE_CHANGE 0x000E
1545#define NETDEV_POST_TYPE_CHANGE 0x000F
1546#define NETDEV_POST_INIT 0x0010
1547#define NETDEV_UNREGISTER_BATCH 0x0011
1548#define NETDEV_RELEASE 0x0012
1549#define NETDEV_NOTIFY_PEERS 0x0013
1550#define NETDEV_JOIN 0x0014
1551
1552extern int register_netdevice_notifier(struct notifier_block *nb);
1553extern int unregister_netdevice_notifier(struct notifier_block *nb);
1554extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1555
1556
1563extern rwlock_t dev_base_lock; /* Device list lock */ 1557extern rwlock_t dev_base_lock; /* Device list lock */
1564 1558
1565 1559
@@ -1642,12 +1636,9 @@ static inline void unregister_netdevice(struct net_device *dev)
1642extern int netdev_refcnt_read(const struct net_device *dev); 1636extern int netdev_refcnt_read(const struct net_device *dev);
1643extern void free_netdev(struct net_device *dev); 1637extern void free_netdev(struct net_device *dev);
1644extern void synchronize_net(void); 1638extern void synchronize_net(void);
1645extern int register_netdevice_notifier(struct notifier_block *nb);
1646extern int unregister_netdevice_notifier(struct notifier_block *nb);
1647extern int init_dummy_netdev(struct net_device *dev); 1639extern int init_dummy_netdev(struct net_device *dev);
1648extern void netdev_resync_ops(struct net_device *dev); 1640extern void netdev_resync_ops(struct net_device *dev);
1649 1641
1650extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1651extern struct net_device *dev_get_by_index(struct net *net, int ifindex); 1642extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1652extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); 1643extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1653extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 1644extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
@@ -1688,9 +1679,12 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1688static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, 1679static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1689 unsigned int offset) 1680 unsigned int offset)
1690{ 1681{
1682 if (!pskb_may_pull(skb, hlen))
1683 return NULL;
1684
1691 NAPI_GRO_CB(skb)->frag0 = NULL; 1685 NAPI_GRO_CB(skb)->frag0 = NULL;
1692 NAPI_GRO_CB(skb)->frag0_len = 0; 1686 NAPI_GRO_CB(skb)->frag0_len = 0;
1693 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL; 1687 return skb->data + offset;
1694} 1688}
1695 1689
1696static inline void *skb_gro_mac_header(struct sk_buff *skb) 1690static inline void *skb_gro_mac_header(struct sk_buff *skb)
@@ -1780,8 +1774,6 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1780 1774
1781DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 1775DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1782 1776
1783#define HAVE_NETIF_QUEUE
1784
1785extern void __netif_schedule(struct Qdisc *q); 1777extern void __netif_schedule(struct Qdisc *q);
1786 1778
1787static inline void netif_schedule_queue(struct netdev_queue *txq) 1779static inline void netif_schedule_queue(struct netdev_queue *txq)
@@ -2057,10 +2049,8 @@ extern void dev_kfree_skb_irq(struct sk_buff *skb);
2057 */ 2049 */
2058extern void dev_kfree_skb_any(struct sk_buff *skb); 2050extern void dev_kfree_skb_any(struct sk_buff *skb);
2059 2051
2060#define HAVE_NETIF_RX 1
2061extern int netif_rx(struct sk_buff *skb); 2052extern int netif_rx(struct sk_buff *skb);
2062extern int netif_rx_ni(struct sk_buff *skb); 2053extern int netif_rx_ni(struct sk_buff *skb);
2063#define HAVE_NETIF_RECEIVE_SKB 1
2064extern int netif_receive_skb(struct sk_buff *skb); 2054extern int netif_receive_skb(struct sk_buff *skb);
2065extern gro_result_t dev_gro_receive(struct napi_struct *napi, 2055extern gro_result_t dev_gro_receive(struct napi_struct *napi,
2066 struct sk_buff *skb); 2056 struct sk_buff *skb);
@@ -2240,7 +2230,6 @@ extern void netif_device_attach(struct net_device *dev);
2240/* 2230/*
2241 * Network interface message level settings 2231 * Network interface message level settings
2242 */ 2232 */
2243#define HAVE_NETIF_MSG 1
2244 2233
2245enum { 2234enum {
2246 NETIF_MSG_DRV = 0x0001, 2235 NETIF_MSG_DRV = 0x0001,
@@ -2558,7 +2547,6 @@ static inline u32 netdev_get_wanted_features(struct net_device *dev)
2558 return (dev->features & ~dev->hw_features) | dev->wanted_features; 2547 return (dev->features & ~dev->hw_features) | dev->wanted_features;
2559} 2548}
2560u32 netdev_increment_features(u32 all, u32 one, u32 mask); 2549u32 netdev_increment_features(u32 all, u32 one, u32 mask);
2561u32 netdev_fix_features(struct net_device *dev, u32 features);
2562int __netdev_update_features(struct net_device *dev); 2550int __netdev_update_features(struct net_device *dev);
2563void netdev_update_features(struct net_device *dev); 2551void netdev_update_features(struct net_device *dev);
2564void netdev_change_features(struct net_device *dev); 2552void netdev_change_features(struct net_device *dev);