diff options
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 149 |
1 files changed, 106 insertions, 43 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 812a5f3c2abe..97873e31661c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -63,30 +63,69 @@ struct wireless_dev; | |||
63 | #define HAVE_FREE_NETDEV /* free_netdev() */ | 63 | #define HAVE_FREE_NETDEV /* free_netdev() */ |
64 | #define HAVE_NETDEV_PRIV /* netdev_priv() */ | 64 | #define HAVE_NETDEV_PRIV /* netdev_priv() */ |
65 | 65 | ||
66 | #define NET_XMIT_SUCCESS 0 | ||
67 | #define NET_XMIT_DROP 1 /* skb dropped */ | ||
68 | #define NET_XMIT_CN 2 /* congestion notification */ | ||
69 | #define NET_XMIT_POLICED 3 /* skb is shot by police */ | ||
70 | #define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */ | ||
71 | |||
72 | /* Backlog congestion levels */ | 66 | /* Backlog congestion levels */ |
73 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ | 67 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ |
74 | #define NET_RX_DROP 1 /* packet dropped */ | 68 | #define NET_RX_DROP 1 /* packet dropped */ |
69 | |||
70 | /* | ||
71 | * Transmit return codes: transmit return codes originate from three different | ||
72 | * namespaces: | ||
73 | * | ||
74 | * - qdisc return codes | ||
75 | * - driver transmit return codes | ||
76 | * - errno values | ||
77 | * | ||
78 | * Drivers are allowed to return any one of those in their hard_start_xmit() | ||
79 | * function. Real network devices commonly used with qdiscs should only return | ||
80 | * the driver transmit return codes though - when qdiscs are used, the actual | ||
81 | * transmission happens asynchronously, so the value is not propagated to | ||
82 | * higher layers. Virtual network devices transmit synchronously, in this case | ||
83 | * the driver transmit return codes are consumed by dev_queue_xmit(), all | ||
84 | * others are propagated to higher layers. | ||
85 | */ | ||
86 | |||
87 | /* qdisc ->enqueue() return codes. */ | ||
88 | #define NET_XMIT_SUCCESS 0x00 | ||
89 | #define NET_XMIT_DROP 0x01 /* skb dropped */ | ||
90 | #define NET_XMIT_CN 0x02 /* congestion notification */ | ||
91 | #define NET_XMIT_POLICED 0x03 /* skb is shot by police */ | ||
92 | #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ | ||
75 | 93 | ||
76 | /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It | 94 | /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It |
77 | * indicates that the device will soon be dropping packets, or already drops | 95 | * indicates that the device will soon be dropping packets, or already drops |
78 | * some packets of the same priority; prompting us to send less aggressively. */ | 96 | * some packets of the same priority; prompting us to send less aggressively. */ |
79 | #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e)) | 97 | #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) |
80 | #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) | 98 | #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) |
81 | 99 | ||
82 | /* Driver transmit return codes */ | 100 | /* Driver transmit return codes */ |
101 | #define NETDEV_TX_MASK 0xf0 | ||
102 | |||
83 | enum netdev_tx { | 103 | enum netdev_tx { |
84 | NETDEV_TX_OK = 0, /* driver took care of packet */ | 104 | __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ |
85 | NETDEV_TX_BUSY, /* driver tx path was busy*/ | 105 | NETDEV_TX_OK = 0x00, /* driver took care of packet */ |
86 | NETDEV_TX_LOCKED = -1, /* driver tx lock was already taken */ | 106 | NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ |
107 | NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */ | ||
87 | }; | 108 | }; |
88 | typedef enum netdev_tx netdev_tx_t; | 109 | typedef enum netdev_tx netdev_tx_t; |
89 | 110 | ||
111 | /* | ||
112 | * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; | ||
113 | * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. | ||
114 | */ | ||
115 | static inline bool dev_xmit_complete(int rc) | ||
116 | { | ||
117 | /* | ||
118 | * Positive cases with an skb consumed by a driver: | ||
119 | * - successful transmission (rc == NETDEV_TX_OK) | ||
120 | * - error while transmitting (rc < 0) | ||
121 | * - error while queueing to a different device (rc & NET_XMIT_MASK) | ||
122 | */ | ||
123 | if (likely(rc < NET_XMIT_MASK)) | ||
124 | return true; | ||
125 | |||
126 | return false; | ||
127 | } | ||
128 | |||
90 | #endif | 129 | #endif |
91 | 130 | ||
92 | #define MAX_ADDR_LEN 32 /* Largest hardware address length */ | 131 | #define MAX_ADDR_LEN 32 /* Largest hardware address length */ |
@@ -125,8 +164,7 @@ typedef enum netdev_tx netdev_tx_t; | |||
125 | * with byte counters. | 164 | * with byte counters. |
126 | */ | 165 | */ |
127 | 166 | ||
128 | struct net_device_stats | 167 | struct net_device_stats { |
129 | { | ||
130 | unsigned long rx_packets; /* total packets received */ | 168 | unsigned long rx_packets; /* total packets received */ |
131 | unsigned long tx_packets; /* total packets transmitted */ | 169 | unsigned long tx_packets; /* total packets transmitted */ |
132 | unsigned long rx_bytes; /* total bytes received */ | 170 | unsigned long rx_bytes; /* total bytes received */ |
@@ -179,8 +217,7 @@ struct neighbour; | |||
179 | struct neigh_parms; | 217 | struct neigh_parms; |
180 | struct sk_buff; | 218 | struct sk_buff; |
181 | 219 | ||
182 | struct netif_rx_stats | 220 | struct netif_rx_stats { |
183 | { | ||
184 | unsigned total; | 221 | unsigned total; |
185 | unsigned dropped; | 222 | unsigned dropped; |
186 | unsigned time_squeeze; | 223 | unsigned time_squeeze; |
@@ -189,8 +226,7 @@ struct netif_rx_stats | |||
189 | 226 | ||
190 | DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); | 227 | DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); |
191 | 228 | ||
192 | struct dev_addr_list | 229 | struct dev_addr_list { |
193 | { | ||
194 | struct dev_addr_list *next; | 230 | struct dev_addr_list *next; |
195 | u8 da_addr[MAX_ADDR_LEN]; | 231 | u8 da_addr[MAX_ADDR_LEN]; |
196 | u8 da_addrlen; | 232 | u8 da_addrlen; |
@@ -227,8 +263,7 @@ struct netdev_hw_addr_list { | |||
227 | int count; | 263 | int count; |
228 | }; | 264 | }; |
229 | 265 | ||
230 | struct hh_cache | 266 | struct hh_cache { |
231 | { | ||
232 | struct hh_cache *hh_next; /* Next entry */ | 267 | struct hh_cache *hh_next; /* Next entry */ |
233 | atomic_t hh_refcnt; /* number of users */ | 268 | atomic_t hh_refcnt; /* number of users */ |
234 | /* | 269 | /* |
@@ -291,8 +326,7 @@ struct header_ops { | |||
291 | * code. | 326 | * code. |
292 | */ | 327 | */ |
293 | 328 | ||
294 | enum netdev_state_t | 329 | enum netdev_state_t { |
295 | { | ||
296 | __LINK_STATE_START, | 330 | __LINK_STATE_START, |
297 | __LINK_STATE_PRESENT, | 331 | __LINK_STATE_PRESENT, |
298 | __LINK_STATE_NOCARRIER, | 332 | __LINK_STATE_NOCARRIER, |
@@ -341,20 +375,20 @@ struct napi_struct { | |||
341 | struct sk_buff *skb; | 375 | struct sk_buff *skb; |
342 | }; | 376 | }; |
343 | 377 | ||
344 | enum | 378 | enum { |
345 | { | ||
346 | NAPI_STATE_SCHED, /* Poll is scheduled */ | 379 | NAPI_STATE_SCHED, /* Poll is scheduled */ |
347 | NAPI_STATE_DISABLE, /* Disable pending */ | 380 | NAPI_STATE_DISABLE, /* Disable pending */ |
348 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ | 381 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ |
349 | }; | 382 | }; |
350 | 383 | ||
351 | enum { | 384 | enum gro_result { |
352 | GRO_MERGED, | 385 | GRO_MERGED, |
353 | GRO_MERGED_FREE, | 386 | GRO_MERGED_FREE, |
354 | GRO_HELD, | 387 | GRO_HELD, |
355 | GRO_NORMAL, | 388 | GRO_NORMAL, |
356 | GRO_DROP, | 389 | GRO_DROP, |
357 | }; | 390 | }; |
391 | typedef enum gro_result gro_result_t; | ||
358 | 392 | ||
359 | extern void __napi_schedule(struct napi_struct *n); | 393 | extern void __napi_schedule(struct napi_struct *n); |
360 | 394 | ||
@@ -457,8 +491,7 @@ static inline void napi_synchronize(const struct napi_struct *n) | |||
457 | # define napi_synchronize(n) barrier() | 491 | # define napi_synchronize(n) barrier() |
458 | #endif | 492 | #endif |
459 | 493 | ||
460 | enum netdev_queue_state_t | 494 | enum netdev_queue_state_t { |
461 | { | ||
462 | __QUEUE_STATE_XOFF, | 495 | __QUEUE_STATE_XOFF, |
463 | __QUEUE_STATE_FROZEN, | 496 | __QUEUE_STATE_FROZEN, |
464 | }; | 497 | }; |
@@ -635,6 +668,10 @@ struct net_device_ops { | |||
635 | unsigned int sgc); | 668 | unsigned int sgc); |
636 | int (*ndo_fcoe_ddp_done)(struct net_device *dev, | 669 | int (*ndo_fcoe_ddp_done)(struct net_device *dev, |
637 | u16 xid); | 670 | u16 xid); |
671 | #define NETDEV_FCOE_WWNN 0 | ||
672 | #define NETDEV_FCOE_WWPN 1 | ||
673 | int (*ndo_fcoe_get_wwn)(struct net_device *dev, | ||
674 | u64 *wwn, int type); | ||
638 | #endif | 675 | #endif |
639 | }; | 676 | }; |
640 | 677 | ||
@@ -648,8 +685,7 @@ struct net_device_ops { | |||
648 | * moves out. | 685 | * moves out. |
649 | */ | 686 | */ |
650 | 687 | ||
651 | struct net_device | 688 | struct net_device { |
652 | { | ||
653 | 689 | ||
654 | /* | 690 | /* |
655 | * This is the first field of the "visible" part of this structure | 691 | * This is the first field of the "visible" part of this structure |
@@ -683,6 +719,7 @@ struct net_device | |||
683 | 719 | ||
684 | struct list_head dev_list; | 720 | struct list_head dev_list; |
685 | struct list_head napi_list; | 721 | struct list_head napi_list; |
722 | struct list_head unreg_list; | ||
686 | 723 | ||
687 | /* Net device features */ | 724 | /* Net device features */ |
688 | unsigned long features; | 725 | unsigned long features; |
@@ -859,7 +896,7 @@ struct net_device | |||
859 | /* device index hash chain */ | 896 | /* device index hash chain */ |
860 | struct hlist_node index_hlist; | 897 | struct hlist_node index_hlist; |
861 | 898 | ||
862 | struct net_device *link_watch_next; | 899 | struct list_head link_watch_list; |
863 | 900 | ||
864 | /* register/unregister state machine */ | 901 | /* register/unregister state machine */ |
865 | enum { NETREG_UNINITIALIZED=0, | 902 | enum { NETREG_UNINITIALIZED=0, |
@@ -894,8 +931,8 @@ struct net_device | |||
894 | 931 | ||
895 | /* class/net/name entry */ | 932 | /* class/net/name entry */ |
896 | struct device dev; | 933 | struct device dev; |
897 | /* space for optional statistics and wireless sysfs groups */ | 934 | /* space for optional device, statistics, and wireless sysfs groups */ |
898 | const struct attribute_group *sysfs_groups[3]; | 935 | const struct attribute_group *sysfs_groups[4]; |
899 | 936 | ||
900 | /* rtnetlink link ops */ | 937 | /* rtnetlink link ops */ |
901 | const struct rtnl_link_ops *rtnl_link_ops; | 938 | const struct rtnl_link_ops *rtnl_link_ops; |
@@ -909,7 +946,7 @@ struct net_device | |||
909 | 946 | ||
910 | #ifdef CONFIG_DCB | 947 | #ifdef CONFIG_DCB |
911 | /* Data Center Bridging netlink ops */ | 948 | /* Data Center Bridging netlink ops */ |
912 | struct dcbnl_rtnl_ops *dcbnl_ops; | 949 | const struct dcbnl_rtnl_ops *dcbnl_ops; |
913 | #endif | 950 | #endif |
914 | 951 | ||
915 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 952 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
@@ -1075,10 +1112,14 @@ extern rwlock_t dev_base_lock; /* Device list lock */ | |||
1075 | 1112 | ||
1076 | #define for_each_netdev(net, d) \ | 1113 | #define for_each_netdev(net, d) \ |
1077 | list_for_each_entry(d, &(net)->dev_base_head, dev_list) | 1114 | list_for_each_entry(d, &(net)->dev_base_head, dev_list) |
1115 | #define for_each_netdev_rcu(net, d) \ | ||
1116 | list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) | ||
1078 | #define for_each_netdev_safe(net, d, n) \ | 1117 | #define for_each_netdev_safe(net, d, n) \ |
1079 | list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) | 1118 | list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) |
1080 | #define for_each_netdev_continue(net, d) \ | 1119 | #define for_each_netdev_continue(net, d) \ |
1081 | list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) | 1120 | list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) |
1121 | #define for_each_netdev_continue_rcu(net, d) \ | ||
1122 | list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) | ||
1082 | #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) | 1123 | #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) |
1083 | 1124 | ||
1084 | static inline struct net_device *next_net_device(struct net_device *dev) | 1125 | static inline struct net_device *next_net_device(struct net_device *dev) |
@@ -1091,6 +1132,16 @@ static inline struct net_device *next_net_device(struct net_device *dev) | |||
1091 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | 1132 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); |
1092 | } | 1133 | } |
1093 | 1134 | ||
1135 | static inline struct net_device *next_net_device_rcu(struct net_device *dev) | ||
1136 | { | ||
1137 | struct list_head *lh; | ||
1138 | struct net *net; | ||
1139 | |||
1140 | net = dev_net(dev); | ||
1141 | lh = rcu_dereference(dev->dev_list.next); | ||
1142 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | ||
1143 | } | ||
1144 | |||
1094 | static inline struct net_device *first_net_device(struct net *net) | 1145 | static inline struct net_device *first_net_device(struct net *net) |
1095 | { | 1146 | { |
1096 | return list_empty(&net->dev_base_head) ? NULL : | 1147 | return list_empty(&net->dev_base_head) ? NULL : |
@@ -1109,6 +1160,7 @@ extern void __dev_remove_pack(struct packet_type *pt); | |||
1109 | extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags, | 1160 | extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags, |
1110 | unsigned short mask); | 1161 | unsigned short mask); |
1111 | extern struct net_device *dev_get_by_name(struct net *net, const char *name); | 1162 | extern struct net_device *dev_get_by_name(struct net *net, const char *name); |
1163 | extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); | ||
1112 | extern struct net_device *__dev_get_by_name(struct net *net, const char *name); | 1164 | extern struct net_device *__dev_get_by_name(struct net *net, const char *name); |
1113 | extern int dev_alloc_name(struct net_device *dev, const char *name); | 1165 | extern int dev_alloc_name(struct net_device *dev, const char *name); |
1114 | extern int dev_open(struct net_device *dev); | 1166 | extern int dev_open(struct net_device *dev); |
@@ -1116,7 +1168,14 @@ extern int dev_close(struct net_device *dev); | |||
1116 | extern void dev_disable_lro(struct net_device *dev); | 1168 | extern void dev_disable_lro(struct net_device *dev); |
1117 | extern int dev_queue_xmit(struct sk_buff *skb); | 1169 | extern int dev_queue_xmit(struct sk_buff *skb); |
1118 | extern int register_netdevice(struct net_device *dev); | 1170 | extern int register_netdevice(struct net_device *dev); |
1119 | extern void unregister_netdevice(struct net_device *dev); | 1171 | extern void unregister_netdevice_queue(struct net_device *dev, |
1172 | struct list_head *head); | ||
1173 | extern void unregister_netdevice_many(struct list_head *head); | ||
1174 | static inline void unregister_netdevice(struct net_device *dev) | ||
1175 | { | ||
1176 | unregister_netdevice_queue(dev, NULL); | ||
1177 | } | ||
1178 | |||
1120 | extern void free_netdev(struct net_device *dev); | 1179 | extern void free_netdev(struct net_device *dev); |
1121 | extern void synchronize_net(void); | 1180 | extern void synchronize_net(void); |
1122 | extern int register_netdevice_notifier(struct notifier_block *nb); | 1181 | extern int register_netdevice_notifier(struct notifier_block *nb); |
@@ -1127,6 +1186,7 @@ extern void netdev_resync_ops(struct net_device *dev); | |||
1127 | extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); | 1186 | extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); |
1128 | extern struct net_device *dev_get_by_index(struct net *net, int ifindex); | 1187 | extern struct net_device *dev_get_by_index(struct net *net, int ifindex); |
1129 | extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); | 1188 | extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); |
1189 | extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); | ||
1130 | extern int dev_restart(struct net_device *dev); | 1190 | extern int dev_restart(struct net_device *dev); |
1131 | #ifdef CONFIG_NETPOLL_TRAP | 1191 | #ifdef CONFIG_NETPOLL_TRAP |
1132 | extern int netpoll_trap(void); | 1192 | extern int netpoll_trap(void); |
@@ -1212,8 +1272,7 @@ static inline int unregister_gifconf(unsigned int family) | |||
1212 | * Incoming packets are placed on per-cpu queues so that | 1272 | * Incoming packets are placed on per-cpu queues so that |
1213 | * no locking is needed. | 1273 | * no locking is needed. |
1214 | */ | 1274 | */ |
1215 | struct softnet_data | 1275 | struct softnet_data { |
1216 | { | ||
1217 | struct Qdisc *output_queue; | 1276 | struct Qdisc *output_queue; |
1218 | struct sk_buff_head input_pkt_queue; | 1277 | struct sk_buff_head input_pkt_queue; |
1219 | struct list_head poll_list; | 1278 | struct list_head poll_list; |
@@ -1467,18 +1526,19 @@ extern int netif_rx_ni(struct sk_buff *skb); | |||
1467 | #define HAVE_NETIF_RECEIVE_SKB 1 | 1526 | #define HAVE_NETIF_RECEIVE_SKB 1 |
1468 | extern int netif_receive_skb(struct sk_buff *skb); | 1527 | extern int netif_receive_skb(struct sk_buff *skb); |
1469 | extern void napi_gro_flush(struct napi_struct *napi); | 1528 | extern void napi_gro_flush(struct napi_struct *napi); |
1470 | extern int dev_gro_receive(struct napi_struct *napi, | 1529 | extern gro_result_t dev_gro_receive(struct napi_struct *napi, |
1471 | struct sk_buff *skb); | 1530 | struct sk_buff *skb); |
1472 | extern int napi_skb_finish(int ret, struct sk_buff *skb); | 1531 | extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb); |
1473 | extern int napi_gro_receive(struct napi_struct *napi, | 1532 | extern gro_result_t napi_gro_receive(struct napi_struct *napi, |
1474 | struct sk_buff *skb); | 1533 | struct sk_buff *skb); |
1475 | extern void napi_reuse_skb(struct napi_struct *napi, | 1534 | extern void napi_reuse_skb(struct napi_struct *napi, |
1476 | struct sk_buff *skb); | 1535 | struct sk_buff *skb); |
1477 | extern struct sk_buff * napi_get_frags(struct napi_struct *napi); | 1536 | extern struct sk_buff * napi_get_frags(struct napi_struct *napi); |
1478 | extern int napi_frags_finish(struct napi_struct *napi, | 1537 | extern gro_result_t napi_frags_finish(struct napi_struct *napi, |
1479 | struct sk_buff *skb, int ret); | 1538 | struct sk_buff *skb, |
1539 | gro_result_t ret); | ||
1480 | extern struct sk_buff * napi_frags_skb(struct napi_struct *napi); | 1540 | extern struct sk_buff * napi_frags_skb(struct napi_struct *napi); |
1481 | extern int napi_gro_frags(struct napi_struct *napi); | 1541 | extern gro_result_t napi_gro_frags(struct napi_struct *napi); |
1482 | 1542 | ||
1483 | static inline void napi_free_frags(struct napi_struct *napi) | 1543 | static inline void napi_free_frags(struct napi_struct *napi) |
1484 | { | 1544 | { |
@@ -1540,6 +1600,7 @@ static inline void dev_hold(struct net_device *dev) | |||
1540 | */ | 1600 | */ |
1541 | 1601 | ||
1542 | extern void linkwatch_fire_event(struct net_device *dev); | 1602 | extern void linkwatch_fire_event(struct net_device *dev); |
1603 | extern void linkwatch_forget_dev(struct net_device *dev); | ||
1543 | 1604 | ||
1544 | /** | 1605 | /** |
1545 | * netif_carrier_ok - test if carrier present | 1606 | * netif_carrier_ok - test if carrier present |
@@ -1609,7 +1670,8 @@ static inline int netif_dormant(const struct net_device *dev) | |||
1609 | * | 1670 | * |
1610 | * Check if carrier is operational | 1671 | * Check if carrier is operational |
1611 | */ | 1672 | */ |
1612 | static inline int netif_oper_up(const struct net_device *dev) { | 1673 | static inline int netif_oper_up(const struct net_device *dev) |
1674 | { | ||
1613 | return (dev->operstate == IF_OPER_UP || | 1675 | return (dev->operstate == IF_OPER_UP || |
1614 | dev->operstate == IF_OPER_UNKNOWN /* backward compat */); | 1676 | dev->operstate == IF_OPER_UNKNOWN /* backward compat */); |
1615 | } | 1677 | } |
@@ -1880,6 +1942,7 @@ extern void netdev_features_change(struct net_device *dev); | |||
1880 | extern void dev_load(struct net *net, const char *name); | 1942 | extern void dev_load(struct net *net, const char *name); |
1881 | extern void dev_mcast_init(void); | 1943 | extern void dev_mcast_init(void); |
1882 | extern const struct net_device_stats *dev_get_stats(struct net_device *dev); | 1944 | extern const struct net_device_stats *dev_get_stats(struct net_device *dev); |
1945 | extern void dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats); | ||
1883 | 1946 | ||
1884 | extern int netdev_max_backlog; | 1947 | extern int netdev_max_backlog; |
1885 | extern int weight_p; | 1948 | extern int weight_p; |