aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-09 02:14:46 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-09 02:14:46 -0400
commit79d16385c7f287a33ea771c4dbe60ae43f791b49 (patch)
tree858bfe84e52d88356d5d0b49efc5148a0870ccf9 /include/linux/netdevice.h
parentb19fa1fa91845234961c64dbd564671aa7c0fd27 (diff)
netdev: Move atomic queue state bits into netdev_queue.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h55
1 files changed, 39 insertions, 16 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e2d931f9b700..203c5504fe43 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -281,14 +281,12 @@ struct header_ops {
281 281
282enum netdev_state_t 282enum netdev_state_t
283{ 283{
284 __LINK_STATE_XOFF=0,
285 __LINK_STATE_START, 284 __LINK_STATE_START,
286 __LINK_STATE_PRESENT, 285 __LINK_STATE_PRESENT,
287 __LINK_STATE_SCHED, 286 __LINK_STATE_SCHED,
288 __LINK_STATE_NOCARRIER, 287 __LINK_STATE_NOCARRIER,
289 __LINK_STATE_LINKWATCH_PENDING, 288 __LINK_STATE_LINKWATCH_PENDING,
290 __LINK_STATE_DORMANT, 289 __LINK_STATE_DORMANT,
291 __LINK_STATE_QDISC_RUNNING,
292}; 290};
293 291
294 292
@@ -448,10 +446,17 @@ static inline void napi_synchronize(const struct napi_struct *n)
448# define napi_synchronize(n) barrier() 446# define napi_synchronize(n) barrier()
449#endif 447#endif
450 448
449enum netdev_queue_state_t
450{
451 __QUEUE_STATE_XOFF,
452 __QUEUE_STATE_QDISC_RUNNING,
453};
454
451struct netdev_queue { 455struct netdev_queue {
452 spinlock_t lock; 456 spinlock_t lock;
453 struct net_device *dev; 457 struct net_device *dev;
454 struct Qdisc *qdisc; 458 struct Qdisc *qdisc;
459 unsigned long state;
455 struct sk_buff *gso_skb; 460 struct sk_buff *gso_skb;
456 spinlock_t _xmit_lock; 461 spinlock_t _xmit_lock;
457 int xmit_lock_owner; 462 int xmit_lock_owner;
@@ -952,9 +957,7 @@ extern void __netif_schedule(struct netdev_queue *txq);
952 957
953static inline void netif_schedule_queue(struct netdev_queue *txq) 958static inline void netif_schedule_queue(struct netdev_queue *txq)
954{ 959{
955 struct net_device *dev = txq->dev; 960 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
956
957 if (!test_bit(__LINK_STATE_XOFF, &dev->state))
958 __netif_schedule(txq); 961 __netif_schedule(txq);
959} 962}
960 963
@@ -969,9 +972,14 @@ static inline void netif_schedule(struct net_device *dev)
969 * 972 *
970 * Allow upper layers to call the device hard_start_xmit routine. 973 * Allow upper layers to call the device hard_start_xmit routine.
971 */ 974 */
975static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
976{
977 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
978}
979
972static inline void netif_start_queue(struct net_device *dev) 980static inline void netif_start_queue(struct net_device *dev)
973{ 981{
974 clear_bit(__LINK_STATE_XOFF, &dev->state); 982 netif_tx_start_queue(&dev->tx_queue);
975} 983}
976 984
977/** 985/**
@@ -981,16 +989,21 @@ static inline void netif_start_queue(struct net_device *dev)
981 * Allow upper layers to call the device hard_start_xmit routine. 989 * Allow upper layers to call the device hard_start_xmit routine.
982 * Used for flow control when transmit resources are available. 990 * Used for flow control when transmit resources are available.
983 */ 991 */
984static inline void netif_wake_queue(struct net_device *dev) 992static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
985{ 993{
986#ifdef CONFIG_NETPOLL_TRAP 994#ifdef CONFIG_NETPOLL_TRAP
987 if (netpoll_trap()) { 995 if (netpoll_trap()) {
988 clear_bit(__LINK_STATE_XOFF, &dev->state); 996 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
989 return; 997 return;
990 } 998 }
991#endif 999#endif
992 if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) 1000 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
993 __netif_schedule(&dev->tx_queue); 1001 __netif_schedule(dev_queue);
1002}
1003
1004static inline void netif_wake_queue(struct net_device *dev)
1005{
1006 netif_tx_wake_queue(&dev->tx_queue);
994} 1007}
995 1008
996/** 1009/**
@@ -1000,9 +1013,14 @@ static inline void netif_wake_queue(struct net_device *dev)
1000 * Stop upper layers calling the device hard_start_xmit routine. 1013 * Stop upper layers calling the device hard_start_xmit routine.
1001 * Used for flow control when transmit resources are unavailable. 1014 * Used for flow control when transmit resources are unavailable.
1002 */ 1015 */
1016static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1017{
1018 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1019}
1020
1003static inline void netif_stop_queue(struct net_device *dev) 1021static inline void netif_stop_queue(struct net_device *dev)
1004{ 1022{
1005 set_bit(__LINK_STATE_XOFF, &dev->state); 1023 netif_tx_stop_queue(&dev->tx_queue);
1006} 1024}
1007 1025
1008/** 1026/**
@@ -1011,9 +1029,14 @@ static inline void netif_stop_queue(struct net_device *dev)
1011 * 1029 *
1012 * Test if transmit queue on device is currently unable to send. 1030 * Test if transmit queue on device is currently unable to send.
1013 */ 1031 */
1032static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1033{
1034 return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1035}
1036
1014static inline int netif_queue_stopped(const struct net_device *dev) 1037static inline int netif_queue_stopped(const struct net_device *dev)
1015{ 1038{
1016 return test_bit(__LINK_STATE_XOFF, &dev->state); 1039 return netif_tx_queue_stopped(&dev->tx_queue);
1017} 1040}
1018 1041
1019/** 1042/**
@@ -1043,7 +1066,7 @@ static inline int netif_running(const struct net_device *dev)
1043 */ 1066 */
1044static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) 1067static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1045{ 1068{
1046 clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); 1069 clear_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
1047} 1070}
1048 1071
1049/** 1072/**
@@ -1059,7 +1082,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1059 if (netpoll_trap()) 1082 if (netpoll_trap())
1060 return; 1083 return;
1061#endif 1084#endif
1062 set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); 1085 set_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
1063} 1086}
1064 1087
1065/** 1088/**
@@ -1072,7 +1095,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1072static inline int __netif_subqueue_stopped(const struct net_device *dev, 1095static inline int __netif_subqueue_stopped(const struct net_device *dev,
1073 u16 queue_index) 1096 u16 queue_index)
1074{ 1097{
1075 return test_bit(__LINK_STATE_XOFF, 1098 return test_bit(__QUEUE_STATE_XOFF,
1076 &dev->egress_subqueue[queue_index].state); 1099 &dev->egress_subqueue[queue_index].state);
1077} 1100}
1078 1101
@@ -1095,7 +1118,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1095 if (netpoll_trap()) 1118 if (netpoll_trap())
1096 return; 1119 return;
1097#endif 1120#endif
1098 if (test_and_clear_bit(__LINK_STATE_XOFF, 1121 if (test_and_clear_bit(__QUEUE_STATE_XOFF,
1099 &dev->egress_subqueue[queue_index].state)) 1122 &dev->egress_subqueue[queue_index].state))
1100 __netif_schedule(&dev->tx_queue); 1123 __netif_schedule(&dev->tx_queue);
1101} 1124}