aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h108
1 files changed, 45 insertions, 63 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ca333e79e10f..ddee79bb8f15 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -34,8 +34,7 @@
34#include <linux/pm_qos_params.h> 34#include <linux/pm_qos_params.h>
35#include <linux/timer.h> 35#include <linux/timer.h>
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/mm.h> 37#include <linux/atomic.h>
38#include <asm/atomic.h>
39#include <asm/cache.h> 38#include <asm/cache.h>
40#include <asm/byteorder.h> 39#include <asm/byteorder.h>
41 40
@@ -61,11 +60,6 @@ struct wireless_dev;
61#define SET_ETHTOOL_OPS(netdev,ops) \ 60#define SET_ETHTOOL_OPS(netdev,ops) \
62 ( (netdev)->ethtool_ops = (ops) ) 61 ( (netdev)->ethtool_ops = (ops) )
63 62
64#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
65 functions are available. */
66#define HAVE_FREE_NETDEV /* free_netdev() */
67#define HAVE_NETDEV_PRIV /* netdev_priv() */
68
69/* hardware address assignment types */ 63/* hardware address assignment types */
70#define NET_ADDR_PERM 0 /* address is permanent (default) */ 64#define NET_ADDR_PERM 0 /* address is permanent (default) */
71#define NET_ADDR_RANDOM 1 /* address is generated randomly */ 65#define NET_ADDR_RANDOM 1 /* address is generated randomly */
@@ -258,21 +252,8 @@ struct netdev_hw_addr_list {
258 netdev_hw_addr_list_for_each(ha, &(dev)->mc) 252 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
259 253
260struct hh_cache { 254struct hh_cache {
261 struct hh_cache *hh_next; /* Next entry */ 255 u16 hh_len;
262 atomic_t hh_refcnt; /* number of users */ 256 u16 __pad;
263/*
264 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
265 * cache line on SMP.
266 * They are mostly read, but hh_refcnt may be changed quite frequently,
267 * incurring cache line ping pongs.
268 */
269 __be16 hh_type ____cacheline_aligned_in_smp;
270 /* protocol identifier, f.e ETH_P_IP
271 * NOTE: For VLANs, this will be the
272 * encapuslated type. --BLG
273 */
274 u16 hh_len; /* length of header */
275 int (*hh_output)(struct sk_buff *skb);
276 seqlock_t hh_lock; 257 seqlock_t hh_lock;
277 258
278 /* cached hardware header; allow for machine alignment needs. */ 259 /* cached hardware header; allow for machine alignment needs. */
@@ -284,12 +265,6 @@ struct hh_cache {
284 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; 265 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
285}; 266};
286 267
287static inline void hh_cache_put(struct hh_cache *hh)
288{
289 if (atomic_dec_and_test(&hh->hh_refcnt))
290 kfree(hh);
291}
292
293/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. 268/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
294 * Alternative is: 269 * Alternative is:
295 * dev->hard_header_len ? (dev->hard_header_len + 270 * dev->hard_header_len ? (dev->hard_header_len +
@@ -314,8 +289,7 @@ struct header_ops {
314 const void *saddr, unsigned len); 289 const void *saddr, unsigned len);
315 int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 290 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
316 int (*rebuild)(struct sk_buff *skb); 291 int (*rebuild)(struct sk_buff *skb);
317#define HAVE_HEADER_CACHE 292 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
318 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
319 void (*cache_update)(struct hh_cache *hh, 293 void (*cache_update)(struct hh_cache *hh,
320 const struct net_device *dev, 294 const struct net_device *dev,
321 const unsigned char *haddr); 295 const unsigned char *haddr);
@@ -556,7 +530,7 @@ struct netdev_queue {
556 struct Qdisc *qdisc; 530 struct Qdisc *qdisc;
557 unsigned long state; 531 unsigned long state;
558 struct Qdisc *qdisc_sleeping; 532 struct Qdisc *qdisc_sleeping;
559#ifdef CONFIG_RPS 533#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
560 struct kobject kobj; 534 struct kobject kobj;
561#endif 535#endif
562#if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 536#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
@@ -794,12 +768,6 @@ struct netdev_tc_txq {
794 * 3. Update dev->stats asynchronously and atomically, and define 768 * 3. Update dev->stats asynchronously and atomically, and define
795 * neither operation. 769 * neither operation.
796 * 770 *
797 * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
798 * If device support VLAN receive acceleration
799 * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
800 * when vlan groups for the device changes. Note: grp is NULL
801 * if no vlan's groups are being used.
802 *
803 * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); 771 * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
804 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) 772 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
805 * this function is called when a VLAN id is registered. 773 * this function is called when a VLAN id is registered.
@@ -888,7 +856,6 @@ struct netdev_tc_txq {
888 * Must return >0 or -errno if it changed dev->features itself. 856 * Must return >0 or -errno if it changed dev->features itself.
889 * 857 *
890 */ 858 */
891#define HAVE_NET_DEVICE_OPS
892struct net_device_ops { 859struct net_device_ops {
893 int (*ndo_init)(struct net_device *dev); 860 int (*ndo_init)(struct net_device *dev);
894 void (*ndo_uninit)(struct net_device *dev); 861 void (*ndo_uninit)(struct net_device *dev);
@@ -919,8 +886,6 @@ struct net_device_ops {
919 struct rtnl_link_stats64 *storage); 886 struct rtnl_link_stats64 *storage);
920 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 887 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
921 888
922 void (*ndo_vlan_rx_register)(struct net_device *dev,
923 struct vlan_group *grp);
924 void (*ndo_vlan_rx_add_vid)(struct net_device *dev, 889 void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
925 unsigned short vid); 890 unsigned short vid);
926 void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, 891 void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
@@ -1097,12 +1062,6 @@ struct net_device {
1097#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ 1062#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
1098 NETIF_F_FSO) 1063 NETIF_F_FSO)
1099 1064
1100#define NETIF_F_ALL_TX_OFFLOADS (NETIF_F_ALL_CSUM | NETIF_F_SG | \
1101 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
1102 NETIF_F_HIGHDMA | \
1103 NETIF_F_SCTP_CSUM | \
1104 NETIF_F_ALL_FCOE)
1105
1106 /* 1065 /*
1107 * If one device supports one of these features, then enable them 1066 * If one device supports one of these features, then enable them
1108 * for all in netdev_increment_features. 1067 * for all in netdev_increment_features.
@@ -1173,7 +1132,7 @@ struct net_device {
1173 spinlock_t addr_list_lock; 1132 spinlock_t addr_list_lock;
1174 struct netdev_hw_addr_list uc; /* Unicast mac addresses */ 1133 struct netdev_hw_addr_list uc; /* Unicast mac addresses */
1175 struct netdev_hw_addr_list mc; /* Multicast mac addresses */ 1134 struct netdev_hw_addr_list mc; /* Multicast mac addresses */
1176 int uc_promisc; 1135 bool uc_promisc;
1177 unsigned int promiscuity; 1136 unsigned int promiscuity;
1178 unsigned int allmulti; 1137 unsigned int allmulti;
1179 1138
@@ -1220,7 +1179,7 @@ struct net_device {
1220 1179
1221 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ 1180 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
1222 1181
1223#ifdef CONFIG_RPS 1182#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
1224 struct kset *queues_kset; 1183 struct kset *queues_kset;
1225 1184
1226 struct netdev_rx_queue *_rx; 1185 struct netdev_rx_queue *_rx;
@@ -1348,9 +1307,6 @@ struct net_device {
1348 /* max exchange id for FCoE LRO by ddp */ 1307 /* max exchange id for FCoE LRO by ddp */
1349 unsigned int fcoe_ddp_xid; 1308 unsigned int fcoe_ddp_xid;
1350#endif 1309#endif
1351 /* n-tuple filter list attached to this device */
1352 struct ethtool_rx_ntuple_list ethtool_ntuple_list;
1353
1354 /* phy device may attach itself for hardware timestamping */ 1310 /* phy device may attach itself for hardware timestamping */
1355 struct phy_device *phydev; 1311 struct phy_device *phydev;
1356 1312
@@ -1563,9 +1519,41 @@ struct packet_type {
1563 struct list_head list; 1519 struct list_head list;
1564}; 1520};
1565 1521
1566#include <linux/interrupt.h>
1567#include <linux/notifier.h> 1522#include <linux/notifier.h>
1568 1523
1524/* netdevice notifier chain. Please remember to update the rtnetlink
1525 * notification exclusion list in rtnetlink_event() when adding new
1526 * types.
1527 */
1528#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
1529#define NETDEV_DOWN 0x0002
1530#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
1531 detected a hardware crash and restarted
1532 - we can use this eg to kick tcp sessions
1533 once done */
1534#define NETDEV_CHANGE 0x0004 /* Notify device state change */
1535#define NETDEV_REGISTER 0x0005
1536#define NETDEV_UNREGISTER 0x0006
1537#define NETDEV_CHANGEMTU 0x0007
1538#define NETDEV_CHANGEADDR 0x0008
1539#define NETDEV_GOING_DOWN 0x0009
1540#define NETDEV_CHANGENAME 0x000A
1541#define NETDEV_FEAT_CHANGE 0x000B
1542#define NETDEV_BONDING_FAILOVER 0x000C
1543#define NETDEV_PRE_UP 0x000D
1544#define NETDEV_PRE_TYPE_CHANGE 0x000E
1545#define NETDEV_POST_TYPE_CHANGE 0x000F
1546#define NETDEV_POST_INIT 0x0010
1547#define NETDEV_UNREGISTER_BATCH 0x0011
1548#define NETDEV_RELEASE 0x0012
1549#define NETDEV_NOTIFY_PEERS 0x0013
1550#define NETDEV_JOIN 0x0014
1551
1552extern int register_netdevice_notifier(struct notifier_block *nb);
1553extern int unregister_netdevice_notifier(struct notifier_block *nb);
1554extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1555
1556
1569extern rwlock_t dev_base_lock; /* Device list lock */ 1557extern rwlock_t dev_base_lock; /* Device list lock */
1570 1558
1571 1559
@@ -1648,12 +1636,9 @@ static inline void unregister_netdevice(struct net_device *dev)
1648extern int netdev_refcnt_read(const struct net_device *dev); 1636extern int netdev_refcnt_read(const struct net_device *dev);
1649extern void free_netdev(struct net_device *dev); 1637extern void free_netdev(struct net_device *dev);
1650extern void synchronize_net(void); 1638extern void synchronize_net(void);
1651extern int register_netdevice_notifier(struct notifier_block *nb);
1652extern int unregister_netdevice_notifier(struct notifier_block *nb);
1653extern int init_dummy_netdev(struct net_device *dev); 1639extern int init_dummy_netdev(struct net_device *dev);
1654extern void netdev_resync_ops(struct net_device *dev); 1640extern void netdev_resync_ops(struct net_device *dev);
1655 1641
1656extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1657extern struct net_device *dev_get_by_index(struct net *net, int ifindex); 1642extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1658extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); 1643extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1659extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 1644extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
@@ -1694,9 +1679,12 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1694static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, 1679static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1695 unsigned int offset) 1680 unsigned int offset)
1696{ 1681{
1682 if (!pskb_may_pull(skb, hlen))
1683 return NULL;
1684
1697 NAPI_GRO_CB(skb)->frag0 = NULL; 1685 NAPI_GRO_CB(skb)->frag0 = NULL;
1698 NAPI_GRO_CB(skb)->frag0_len = 0; 1686 NAPI_GRO_CB(skb)->frag0_len = 0;
1699 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL; 1687 return skb->data + offset;
1700} 1688}
1701 1689
1702static inline void *skb_gro_mac_header(struct sk_buff *skb) 1690static inline void *skb_gro_mac_header(struct sk_buff *skb)
@@ -1786,8 +1774,6 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1786 1774
1787DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 1775DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1788 1776
1789#define HAVE_NETIF_QUEUE
1790
1791extern void __netif_schedule(struct Qdisc *q); 1777extern void __netif_schedule(struct Qdisc *q);
1792 1778
1793static inline void netif_schedule_queue(struct netdev_queue *txq) 1779static inline void netif_schedule_queue(struct netdev_queue *txq)
@@ -2063,10 +2049,8 @@ extern void dev_kfree_skb_irq(struct sk_buff *skb);
2063 */ 2049 */
2064extern void dev_kfree_skb_any(struct sk_buff *skb); 2050extern void dev_kfree_skb_any(struct sk_buff *skb);
2065 2051
2066#define HAVE_NETIF_RX 1
2067extern int netif_rx(struct sk_buff *skb); 2052extern int netif_rx(struct sk_buff *skb);
2068extern int netif_rx_ni(struct sk_buff *skb); 2053extern int netif_rx_ni(struct sk_buff *skb);
2069#define HAVE_NETIF_RECEIVE_SKB 1
2070extern int netif_receive_skb(struct sk_buff *skb); 2054extern int netif_receive_skb(struct sk_buff *skb);
2071extern gro_result_t dev_gro_receive(struct napi_struct *napi, 2055extern gro_result_t dev_gro_receive(struct napi_struct *napi,
2072 struct sk_buff *skb); 2056 struct sk_buff *skb);
@@ -2246,7 +2230,6 @@ extern void netif_device_attach(struct net_device *dev);
2246/* 2230/*
2247 * Network interface message level settings 2231 * Network interface message level settings
2248 */ 2232 */
2249#define HAVE_NETIF_MSG 1
2250 2233
2251enum { 2234enum {
2252 NETIF_MSG_DRV = 0x0001, 2235 NETIF_MSG_DRV = 0x0001,
@@ -2555,7 +2538,7 @@ extern void netdev_class_remove_file(struct class_attribute *class_attr);
2555 2538
2556extern struct kobj_ns_type_operations net_ns_type_operations; 2539extern struct kobj_ns_type_operations net_ns_type_operations;
2557 2540
2558extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len); 2541extern const char *netdev_drivername(const struct net_device *dev);
2559 2542
2560extern void linkwatch_run_queue(void); 2543extern void linkwatch_run_queue(void);
2561 2544
@@ -2564,7 +2547,6 @@ static inline u32 netdev_get_wanted_features(struct net_device *dev)
2564 return (dev->features & ~dev->hw_features) | dev->wanted_features; 2547 return (dev->features & ~dev->hw_features) | dev->wanted_features;
2565} 2548}
2566u32 netdev_increment_features(u32 all, u32 one, u32 mask); 2549u32 netdev_increment_features(u32 all, u32 one, u32 mask);
2567u32 netdev_fix_features(struct net_device *dev, u32 features);
2568int __netdev_update_features(struct net_device *dev); 2550int __netdev_update_features(struct net_device *dev);
2569void netdev_update_features(struct net_device *dev); 2551void netdev_update_features(struct net_device *dev);
2570void netdev_change_features(struct net_device *dev); 2552void netdev_change_features(struct net_device *dev);