diff options
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 51 |
1 files changed, 32 insertions, 19 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d4a4d9867794..65ee1929b2b1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -72,10 +72,6 @@ struct wireless_dev; | |||
72 | /* Backlog congestion levels */ | 72 | /* Backlog congestion levels */ |
73 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ | 73 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ |
74 | #define NET_RX_DROP 1 /* packet dropped */ | 74 | #define NET_RX_DROP 1 /* packet dropped */ |
75 | #define NET_RX_CN_LOW 2 /* storm alert, just in case */ | ||
76 | #define NET_RX_CN_MOD 3 /* Storm on its way! */ | ||
77 | #define NET_RX_CN_HIGH 4 /* The storm is here */ | ||
78 | #define NET_RX_BAD 5 /* packet dropped due to kernel error */ | ||
79 | 75 | ||
80 | /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It | 76 | /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It |
81 | * indicates that the device will soon be dropping packets, or already drops | 77 | * indicates that the device will soon be dropping packets, or already drops |
@@ -83,17 +79,19 @@ struct wireless_dev; | |||
83 | #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e)) | 79 | #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e)) |
84 | #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) | 80 | #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) |
85 | 81 | ||
82 | /* Driver transmit return codes */ | ||
83 | enum netdev_tx { | ||
84 | NETDEV_TX_OK = 0, /* driver took care of packet */ | ||
85 | NETDEV_TX_BUSY, /* driver tx path was busy*/ | ||
86 | NETDEV_TX_LOCKED = -1, /* driver tx lock was already taken */ | ||
87 | }; | ||
88 | typedef enum netdev_tx netdev_tx_t; | ||
89 | |||
86 | #endif | 90 | #endif |
87 | 91 | ||
88 | #define MAX_ADDR_LEN 32 /* Largest hardware address length */ | 92 | #define MAX_ADDR_LEN 32 /* Largest hardware address length */ |
89 | 93 | ||
90 | /* Driver transmit return codes */ | ||
91 | #define NETDEV_TX_OK 0 /* driver took care of packet */ | ||
92 | #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/ | ||
93 | #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */ | ||
94 | |||
95 | #ifdef __KERNEL__ | 94 | #ifdef __KERNEL__ |
96 | |||
97 | /* | 95 | /* |
98 | * Compute the worst case header length according to the protocols | 96 | * Compute the worst case header length according to the protocols |
99 | * used. | 97 | * used. |
@@ -511,9 +509,11 @@ struct netdev_queue { | |||
511 | * This function is called when network device transistions to the down | 509 | * This function is called when network device transistions to the down |
512 | * state. | 510 | * state. |
513 | * | 511 | * |
514 | * int (*ndo_start_xmit)(struct sk_buff *skb, struct net_device *dev); | 512 | * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, |
513 | * struct net_device *dev); | ||
515 | * Called when a packet needs to be transmitted. | 514 | * Called when a packet needs to be transmitted. |
516 | * Must return NETDEV_TX_OK , NETDEV_TX_BUSY, or NETDEV_TX_LOCKED, | 515 | * Must return NETDEV_TX_OK , NETDEV_TX_BUSY. |
516 | * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) | ||
517 | * Required can not be NULL. | 517 | * Required can not be NULL. |
518 | * | 518 | * |
519 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); | 519 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); |
@@ -584,7 +584,7 @@ struct net_device_ops { | |||
584 | void (*ndo_uninit)(struct net_device *dev); | 584 | void (*ndo_uninit)(struct net_device *dev); |
585 | int (*ndo_open)(struct net_device *dev); | 585 | int (*ndo_open)(struct net_device *dev); |
586 | int (*ndo_stop)(struct net_device *dev); | 586 | int (*ndo_stop)(struct net_device *dev); |
587 | int (*ndo_start_xmit) (struct sk_buff *skb, | 587 | netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, |
588 | struct net_device *dev); | 588 | struct net_device *dev); |
589 | u16 (*ndo_select_queue)(struct net_device *dev, | 589 | u16 (*ndo_select_queue)(struct net_device *dev, |
590 | struct sk_buff *skb); | 590 | struct sk_buff *skb); |
@@ -627,6 +627,8 @@ struct net_device_ops { | |||
627 | void (*ndo_poll_controller)(struct net_device *dev); | 627 | void (*ndo_poll_controller)(struct net_device *dev); |
628 | #endif | 628 | #endif |
629 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 629 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
630 | int (*ndo_fcoe_enable)(struct net_device *dev); | ||
631 | int (*ndo_fcoe_disable)(struct net_device *dev); | ||
630 | int (*ndo_fcoe_ddp_setup)(struct net_device *dev, | 632 | int (*ndo_fcoe_ddp_setup)(struct net_device *dev, |
631 | u16 xid, | 633 | u16 xid, |
632 | struct scatterlist *sgl, | 634 | struct scatterlist *sgl, |
@@ -705,6 +707,7 @@ struct net_device | |||
705 | /* the GSO_MASK reserves bits 16 through 23 */ | 707 | /* the GSO_MASK reserves bits 16 through 23 */ |
706 | #define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */ | 708 | #define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */ |
707 | #define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */ | 709 | #define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */ |
710 | #define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/ | ||
708 | 711 | ||
709 | /* Segmentation offload features */ | 712 | /* Segmentation offload features */ |
710 | #define NETIF_F_GSO_SHIFT 16 | 713 | #define NETIF_F_GSO_SHIFT 16 |
@@ -829,6 +832,9 @@ struct net_device | |||
829 | /* Number of TX queues currently active in device */ | 832 | /* Number of TX queues currently active in device */ |
830 | unsigned int real_num_tx_queues; | 833 | unsigned int real_num_tx_queues; |
831 | 834 | ||
835 | /* root qdisc from userspace point of view */ | ||
836 | struct Qdisc *qdisc; | ||
837 | |||
832 | unsigned long tx_queue_len; /* Max frames per queue allowed */ | 838 | unsigned long tx_queue_len; /* Max frames per queue allowed */ |
833 | spinlock_t tx_global_lock; | 839 | spinlock_t tx_global_lock; |
834 | /* | 840 | /* |
@@ -992,6 +998,12 @@ static inline void *netdev_priv(const struct net_device *dev) | |||
992 | */ | 998 | */ |
993 | #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) | 999 | #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) |
994 | 1000 | ||
1001 | /* Set the sysfs device type for the network logical device to allow | ||
1002 | * fin grained indentification of different network device types. For | ||
1003 | * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc. | ||
1004 | */ | ||
1005 | #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) | ||
1006 | |||
995 | /** | 1007 | /** |
996 | * netif_napi_add - initialize a napi context | 1008 | * netif_napi_add - initialize a napi context |
997 | * @dev: network device | 1009 | * @dev: network device |
@@ -1260,7 +1272,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) | |||
1260 | { | 1272 | { |
1261 | #ifdef CONFIG_NETPOLL_TRAP | 1273 | #ifdef CONFIG_NETPOLL_TRAP |
1262 | if (netpoll_trap()) { | 1274 | if (netpoll_trap()) { |
1263 | clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | 1275 | netif_tx_start_queue(dev_queue); |
1264 | return; | 1276 | return; |
1265 | } | 1277 | } |
1266 | #endif | 1278 | #endif |
@@ -1366,7 +1378,8 @@ static inline int netif_running(const struct net_device *dev) | |||
1366 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) | 1378 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) |
1367 | { | 1379 | { |
1368 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | 1380 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
1369 | clear_bit(__QUEUE_STATE_XOFF, &txq->state); | 1381 | |
1382 | netif_tx_start_queue(txq); | ||
1370 | } | 1383 | } |
1371 | 1384 | ||
1372 | /** | 1385 | /** |
@@ -1383,7 +1396,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) | |||
1383 | if (netpoll_trap()) | 1396 | if (netpoll_trap()) |
1384 | return; | 1397 | return; |
1385 | #endif | 1398 | #endif |
1386 | set_bit(__QUEUE_STATE_XOFF, &txq->state); | 1399 | netif_tx_stop_queue(txq); |
1387 | } | 1400 | } |
1388 | 1401 | ||
1389 | /** | 1402 | /** |
@@ -1397,7 +1410,8 @@ static inline int __netif_subqueue_stopped(const struct net_device *dev, | |||
1397 | u16 queue_index) | 1410 | u16 queue_index) |
1398 | { | 1411 | { |
1399 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | 1412 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
1400 | return test_bit(__QUEUE_STATE_XOFF, &txq->state); | 1413 | |
1414 | return netif_tx_queue_stopped(txq); | ||
1401 | } | 1415 | } |
1402 | 1416 | ||
1403 | static inline int netif_subqueue_stopped(const struct net_device *dev, | 1417 | static inline int netif_subqueue_stopped(const struct net_device *dev, |
@@ -1749,8 +1763,7 @@ static inline void netif_tx_unlock(struct net_device *dev) | |||
1749 | * force a schedule. | 1763 | * force a schedule. |
1750 | */ | 1764 | */ |
1751 | clear_bit(__QUEUE_STATE_FROZEN, &txq->state); | 1765 | clear_bit(__QUEUE_STATE_FROZEN, &txq->state); |
1752 | if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) | 1766 | netif_schedule_queue(txq); |
1753 | __netif_schedule(txq->qdisc); | ||
1754 | } | 1767 | } |
1755 | spin_unlock(&dev->tx_global_lock); | 1768 | spin_unlock(&dev->tx_global_lock); |
1756 | } | 1769 | } |