diff options
-rw-r--r-- | include/linux/netdevice.h | 58 | ||||
-rw-r--r-- | net/core/dev.c | 51 |
2 files changed, 54 insertions, 55 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f5ea445f89f0..b4d056ceab96 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -996,17 +996,17 @@ static inline void netif_tx_schedule_all(struct net_device *dev) | |||
996 | netif_schedule_queue(netdev_get_tx_queue(dev, i)); | 996 | netif_schedule_queue(netdev_get_tx_queue(dev, i)); |
997 | } | 997 | } |
998 | 998 | ||
999 | static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) | ||
1000 | { | ||
1001 | clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
1002 | } | ||
1003 | |||
999 | /** | 1004 | /** |
1000 | * netif_start_queue - allow transmit | 1005 | * netif_start_queue - allow transmit |
1001 | * @dev: network device | 1006 | * @dev: network device |
1002 | * | 1007 | * |
1003 | * Allow upper layers to call the device hard_start_xmit routine. | 1008 | * Allow upper layers to call the device hard_start_xmit routine. |
1004 | */ | 1009 | */ |
1005 | static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) | ||
1006 | { | ||
1007 | clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
1008 | } | ||
1009 | |||
1010 | static inline void netif_start_queue(struct net_device *dev) | 1010 | static inline void netif_start_queue(struct net_device *dev) |
1011 | { | 1011 | { |
1012 | netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); | 1012 | netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); |
@@ -1022,13 +1022,6 @@ static inline void netif_tx_start_all_queues(struct net_device *dev) | |||
1022 | } | 1022 | } |
1023 | } | 1023 | } |
1024 | 1024 | ||
1025 | /** | ||
1026 | * netif_wake_queue - restart transmit | ||
1027 | * @dev: network device | ||
1028 | * | ||
1029 | * Allow upper layers to call the device hard_start_xmit routine. | ||
1030 | * Used for flow control when transmit resources are available. | ||
1031 | */ | ||
1032 | static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) | 1025 | static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) |
1033 | { | 1026 | { |
1034 | #ifdef CONFIG_NETPOLL_TRAP | 1027 | #ifdef CONFIG_NETPOLL_TRAP |
@@ -1041,6 +1034,13 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) | |||
1041 | __netif_schedule(dev_queue->qdisc); | 1034 | __netif_schedule(dev_queue->qdisc); |
1042 | } | 1035 | } |
1043 | 1036 | ||
1037 | /** | ||
1038 | * netif_wake_queue - restart transmit | ||
1039 | * @dev: network device | ||
1040 | * | ||
1041 | * Allow upper layers to call the device hard_start_xmit routine. | ||
1042 | * Used for flow control when transmit resources are available. | ||
1043 | */ | ||
1044 | static inline void netif_wake_queue(struct net_device *dev) | 1044 | static inline void netif_wake_queue(struct net_device *dev) |
1045 | { | 1045 | { |
1046 | netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); | 1046 | netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); |
@@ -1056,6 +1056,11 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev) | |||
1056 | } | 1056 | } |
1057 | } | 1057 | } |
1058 | 1058 | ||
1059 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) | ||
1060 | { | ||
1061 | set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
1062 | } | ||
1063 | |||
1059 | /** | 1064 | /** |
1060 | * netif_stop_queue - stop transmitted packets | 1065 | * netif_stop_queue - stop transmitted packets |
1061 | * @dev: network device | 1066 | * @dev: network device |
@@ -1063,11 +1068,6 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev) | |||
1063 | * Stop upper layers calling the device hard_start_xmit routine. | 1068 | * Stop upper layers calling the device hard_start_xmit routine. |
1064 | * Used for flow control when transmit resources are unavailable. | 1069 | * Used for flow control when transmit resources are unavailable. |
1065 | */ | 1070 | */ |
1066 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) | ||
1067 | { | ||
1068 | set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
1069 | } | ||
1070 | |||
1071 | static inline void netif_stop_queue(struct net_device *dev) | 1071 | static inline void netif_stop_queue(struct net_device *dev) |
1072 | { | 1072 | { |
1073 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); | 1073 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); |
@@ -1083,17 +1083,17 @@ static inline void netif_tx_stop_all_queues(struct net_device *dev) | |||
1083 | } | 1083 | } |
1084 | } | 1084 | } |
1085 | 1085 | ||
1086 | static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) | ||
1087 | { | ||
1088 | return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
1089 | } | ||
1090 | |||
1086 | /** | 1091 | /** |
1087 | * netif_queue_stopped - test if transmit queue is flowblocked | 1092 | * netif_queue_stopped - test if transmit queue is flowblocked |
1088 | * @dev: network device | 1093 | * @dev: network device |
1089 | * | 1094 | * |
1090 | * Test if transmit queue on device is currently unable to send. | 1095 | * Test if transmit queue on device is currently unable to send. |
1091 | */ | 1096 | */ |
1092 | static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) | ||
1093 | { | ||
1094 | return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
1095 | } | ||
1096 | |||
1097 | static inline int netif_queue_stopped(const struct net_device *dev) | 1097 | static inline int netif_queue_stopped(const struct net_device *dev) |
1098 | { | 1098 | { |
1099 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); | 1099 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); |
@@ -1463,13 +1463,6 @@ static inline void netif_rx_complete(struct net_device *dev, | |||
1463 | local_irq_restore(flags); | 1463 | local_irq_restore(flags); |
1464 | } | 1464 | } |
1465 | 1465 | ||
1466 | /** | ||
1467 | * netif_tx_lock - grab network device transmit lock | ||
1468 | * @dev: network device | ||
1469 | * @cpu: cpu number of lock owner | ||
1470 | * | ||
1471 | * Get network device transmit lock | ||
1472 | */ | ||
1473 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) | 1466 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
1474 | { | 1467 | { |
1475 | spin_lock(&txq->_xmit_lock); | 1468 | spin_lock(&txq->_xmit_lock); |
@@ -1482,6 +1475,13 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq) | |||
1482 | txq->xmit_lock_owner = smp_processor_id(); | 1475 | txq->xmit_lock_owner = smp_processor_id(); |
1483 | } | 1476 | } |
1484 | 1477 | ||
1478 | /** | ||
1479 | * netif_tx_lock - grab network device transmit lock | ||
1480 | * @dev: network device | ||
1481 | * @cpu: cpu number of lock owner | ||
1482 | * | ||
1483 | * Get network device transmit lock | ||
1484 | */ | ||
1485 | static inline void netif_tx_lock(struct net_device *dev) | 1485 | static inline void netif_tx_lock(struct net_device *dev) |
1486 | { | 1486 | { |
1487 | int cpu = smp_processor_id(); | 1487 | int cpu = smp_processor_id(); |
diff --git a/net/core/dev.c b/net/core/dev.c index ad5598d2bb37..65eea83613ef 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1645,32 +1645,6 @@ out_kfree_skb: | |||
1645 | return 0; | 1645 | return 0; |
1646 | } | 1646 | } |
1647 | 1647 | ||
1648 | /** | ||
1649 | * dev_queue_xmit - transmit a buffer | ||
1650 | * @skb: buffer to transmit | ||
1651 | * | ||
1652 | * Queue a buffer for transmission to a network device. The caller must | ||
1653 | * have set the device and priority and built the buffer before calling | ||
1654 | * this function. The function can be called from an interrupt. | ||
1655 | * | ||
1656 | * A negative errno code is returned on a failure. A success does not | ||
1657 | * guarantee the frame will be transmitted as it may be dropped due | ||
1658 | * to congestion or traffic shaping. | ||
1659 | * | ||
1660 | * ----------------------------------------------------------------------------------- | ||
1661 | * I notice this method can also return errors from the queue disciplines, | ||
1662 | * including NET_XMIT_DROP, which is a positive value. So, errors can also | ||
1663 | * be positive. | ||
1664 | * | ||
1665 | * Regardless of the return value, the skb is consumed, so it is currently | ||
1666 | * difficult to retry a send to this method. (You can bump the ref count | ||
1667 | * before sending to hold a reference for retry if you are careful.) | ||
1668 | * | ||
1669 | * When calling this method, interrupts MUST be enabled. This is because | ||
1670 | * the BH enable code must have IRQs enabled so that it will not deadlock. | ||
1671 | * --BLG | ||
1672 | */ | ||
1673 | |||
1674 | static u32 simple_tx_hashrnd; | 1648 | static u32 simple_tx_hashrnd; |
1675 | static int simple_tx_hashrnd_initialized = 0; | 1649 | static int simple_tx_hashrnd_initialized = 0; |
1676 | 1650 | ||
@@ -1738,6 +1712,31 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, | |||
1738 | return netdev_get_tx_queue(dev, queue_index); | 1712 | return netdev_get_tx_queue(dev, queue_index); |
1739 | } | 1713 | } |
1740 | 1714 | ||
1715 | /** | ||
1716 | * dev_queue_xmit - transmit a buffer | ||
1717 | * @skb: buffer to transmit | ||
1718 | * | ||
1719 | * Queue a buffer for transmission to a network device. The caller must | ||
1720 | * have set the device and priority and built the buffer before calling | ||
1721 | * this function. The function can be called from an interrupt. | ||
1722 | * | ||
1723 | * A negative errno code is returned on a failure. A success does not | ||
1724 | * guarantee the frame will be transmitted as it may be dropped due | ||
1725 | * to congestion or traffic shaping. | ||
1726 | * | ||
1727 | * ----------------------------------------------------------------------------------- | ||
1728 | * I notice this method can also return errors from the queue disciplines, | ||
1729 | * including NET_XMIT_DROP, which is a positive value. So, errors can also | ||
1730 | * be positive. | ||
1731 | * | ||
1732 | * Regardless of the return value, the skb is consumed, so it is currently | ||
1733 | * difficult to retry a send to this method. (You can bump the ref count | ||
1734 | * before sending to hold a reference for retry if you are careful.) | ||
1735 | * | ||
1736 | * When calling this method, interrupts MUST be enabled. This is because | ||
1737 | * the BH enable code must have IRQs enabled so that it will not deadlock. | ||
1738 | * --BLG | ||
1739 | */ | ||
1741 | int dev_queue_xmit(struct sk_buff *skb) | 1740 | int dev_queue_xmit(struct sk_buff *skb) |
1742 | { | 1741 | { |
1743 | struct net_device *dev = skb->dev; | 1742 | struct net_device *dev = skb->dev; |