diff options
-rw-r--r-- | include/linux/netdevice.h | 55 | ||||
-rw-r--r-- | include/net/pkt_sched.h | 2 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 20 |
3 files changed, 51 insertions, 26 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e2d931f9b700..203c5504fe43 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -281,14 +281,12 @@ struct header_ops { | |||
281 | 281 | ||
282 | enum netdev_state_t | 282 | enum netdev_state_t |
283 | { | 283 | { |
284 | __LINK_STATE_XOFF=0, | ||
285 | __LINK_STATE_START, | 284 | __LINK_STATE_START, |
286 | __LINK_STATE_PRESENT, | 285 | __LINK_STATE_PRESENT, |
287 | __LINK_STATE_SCHED, | 286 | __LINK_STATE_SCHED, |
288 | __LINK_STATE_NOCARRIER, | 287 | __LINK_STATE_NOCARRIER, |
289 | __LINK_STATE_LINKWATCH_PENDING, | 288 | __LINK_STATE_LINKWATCH_PENDING, |
290 | __LINK_STATE_DORMANT, | 289 | __LINK_STATE_DORMANT, |
291 | __LINK_STATE_QDISC_RUNNING, | ||
292 | }; | 290 | }; |
293 | 291 | ||
294 | 292 | ||
@@ -448,10 +446,17 @@ static inline void napi_synchronize(const struct napi_struct *n) | |||
448 | # define napi_synchronize(n) barrier() | 446 | # define napi_synchronize(n) barrier() |
449 | #endif | 447 | #endif |
450 | 448 | ||
449 | enum netdev_queue_state_t | ||
450 | { | ||
451 | __QUEUE_STATE_XOFF, | ||
452 | __QUEUE_STATE_QDISC_RUNNING, | ||
453 | }; | ||
454 | |||
451 | struct netdev_queue { | 455 | struct netdev_queue { |
452 | spinlock_t lock; | 456 | spinlock_t lock; |
453 | struct net_device *dev; | 457 | struct net_device *dev; |
454 | struct Qdisc *qdisc; | 458 | struct Qdisc *qdisc; |
459 | unsigned long state; | ||
455 | struct sk_buff *gso_skb; | 460 | struct sk_buff *gso_skb; |
456 | spinlock_t _xmit_lock; | 461 | spinlock_t _xmit_lock; |
457 | int xmit_lock_owner; | 462 | int xmit_lock_owner; |
@@ -952,9 +957,7 @@ extern void __netif_schedule(struct netdev_queue *txq); | |||
952 | 957 | ||
953 | static inline void netif_schedule_queue(struct netdev_queue *txq) | 958 | static inline void netif_schedule_queue(struct netdev_queue *txq) |
954 | { | 959 | { |
955 | struct net_device *dev = txq->dev; | 960 | if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) |
956 | |||
957 | if (!test_bit(__LINK_STATE_XOFF, &dev->state)) | ||
958 | __netif_schedule(txq); | 961 | __netif_schedule(txq); |
959 | } | 962 | } |
960 | 963 | ||
@@ -969,9 +972,14 @@ static inline void netif_schedule(struct net_device *dev) | |||
969 | * | 972 | * |
970 | * Allow upper layers to call the device hard_start_xmit routine. | 973 | * Allow upper layers to call the device hard_start_xmit routine. |
971 | */ | 974 | */ |
975 | static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) | ||
976 | { | ||
977 | clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
978 | } | ||
979 | |||
972 | static inline void netif_start_queue(struct net_device *dev) | 980 | static inline void netif_start_queue(struct net_device *dev) |
973 | { | 981 | { |
974 | clear_bit(__LINK_STATE_XOFF, &dev->state); | 982 | netif_tx_start_queue(&dev->tx_queue); |
975 | } | 983 | } |
976 | 984 | ||
977 | /** | 985 | /** |
@@ -981,16 +989,21 @@ static inline void netif_start_queue(struct net_device *dev) | |||
981 | * Allow upper layers to call the device hard_start_xmit routine. | 989 | * Allow upper layers to call the device hard_start_xmit routine. |
982 | * Used for flow control when transmit resources are available. | 990 | * Used for flow control when transmit resources are available. |
983 | */ | 991 | */ |
984 | static inline void netif_wake_queue(struct net_device *dev) | 992 | static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) |
985 | { | 993 | { |
986 | #ifdef CONFIG_NETPOLL_TRAP | 994 | #ifdef CONFIG_NETPOLL_TRAP |
987 | if (netpoll_trap()) { | 995 | if (netpoll_trap()) { |
988 | clear_bit(__LINK_STATE_XOFF, &dev->state); | 996 | clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); |
989 | return; | 997 | return; |
990 | } | 998 | } |
991 | #endif | 999 | #endif |
992 | if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) | 1000 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state)) |
993 | __netif_schedule(&dev->tx_queue); | 1001 | __netif_schedule(dev_queue); |
1002 | } | ||
1003 | |||
1004 | static inline void netif_wake_queue(struct net_device *dev) | ||
1005 | { | ||
1006 | netif_tx_wake_queue(&dev->tx_queue); | ||
994 | } | 1007 | } |
995 | 1008 | ||
996 | /** | 1009 | /** |
@@ -1000,9 +1013,14 @@ static inline void netif_wake_queue(struct net_device *dev) | |||
1000 | * Stop upper layers calling the device hard_start_xmit routine. | 1013 | * Stop upper layers calling the device hard_start_xmit routine. |
1001 | * Used for flow control when transmit resources are unavailable. | 1014 | * Used for flow control when transmit resources are unavailable. |
1002 | */ | 1015 | */ |
1016 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) | ||
1017 | { | ||
1018 | set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
1019 | } | ||
1020 | |||
1003 | static inline void netif_stop_queue(struct net_device *dev) | 1021 | static inline void netif_stop_queue(struct net_device *dev) |
1004 | { | 1022 | { |
1005 | set_bit(__LINK_STATE_XOFF, &dev->state); | 1023 | netif_tx_stop_queue(&dev->tx_queue); |
1006 | } | 1024 | } |
1007 | 1025 | ||
1008 | /** | 1026 | /** |
@@ -1011,9 +1029,14 @@ static inline void netif_stop_queue(struct net_device *dev) | |||
1011 | * | 1029 | * |
1012 | * Test if transmit queue on device is currently unable to send. | 1030 | * Test if transmit queue on device is currently unable to send. |
1013 | */ | 1031 | */ |
1032 | static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) | ||
1033 | { | ||
1034 | return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
1035 | } | ||
1036 | |||
1014 | static inline int netif_queue_stopped(const struct net_device *dev) | 1037 | static inline int netif_queue_stopped(const struct net_device *dev) |
1015 | { | 1038 | { |
1016 | return test_bit(__LINK_STATE_XOFF, &dev->state); | 1039 | return netif_tx_queue_stopped(&dev->tx_queue); |
1017 | } | 1040 | } |
1018 | 1041 | ||
1019 | /** | 1042 | /** |
@@ -1043,7 +1066,7 @@ static inline int netif_running(const struct net_device *dev) | |||
1043 | */ | 1066 | */ |
1044 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) | 1067 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) |
1045 | { | 1068 | { |
1046 | clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); | 1069 | clear_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state); |
1047 | } | 1070 | } |
1048 | 1071 | ||
1049 | /** | 1072 | /** |
@@ -1059,7 +1082,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) | |||
1059 | if (netpoll_trap()) | 1082 | if (netpoll_trap()) |
1060 | return; | 1083 | return; |
1061 | #endif | 1084 | #endif |
1062 | set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); | 1085 | set_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state); |
1063 | } | 1086 | } |
1064 | 1087 | ||
1065 | /** | 1088 | /** |
@@ -1072,7 +1095,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) | |||
1072 | static inline int __netif_subqueue_stopped(const struct net_device *dev, | 1095 | static inline int __netif_subqueue_stopped(const struct net_device *dev, |
1073 | u16 queue_index) | 1096 | u16 queue_index) |
1074 | { | 1097 | { |
1075 | return test_bit(__LINK_STATE_XOFF, | 1098 | return test_bit(__QUEUE_STATE_XOFF, |
1076 | &dev->egress_subqueue[queue_index].state); | 1099 | &dev->egress_subqueue[queue_index].state); |
1077 | } | 1100 | } |
1078 | 1101 | ||
@@ -1095,7 +1118,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | |||
1095 | if (netpoll_trap()) | 1118 | if (netpoll_trap()) |
1096 | return; | 1119 | return; |
1097 | #endif | 1120 | #endif |
1098 | if (test_and_clear_bit(__LINK_STATE_XOFF, | 1121 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, |
1099 | &dev->egress_subqueue[queue_index].state)) | 1122 | &dev->egress_subqueue[queue_index].state)) |
1100 | __netif_schedule(&dev->tx_queue); | 1123 | __netif_schedule(&dev->tx_queue); |
1101 | } | 1124 | } |
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 2311d242bb35..d58c1a5eb845 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h | |||
@@ -91,7 +91,7 @@ static inline void qdisc_run(struct netdev_queue *txq) | |||
91 | struct net_device *dev = txq->dev; | 91 | struct net_device *dev = txq->dev; |
92 | 92 | ||
93 | if (!netif_queue_stopped(dev) && | 93 | if (!netif_queue_stopped(dev) && |
94 | !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) | 94 | !test_and_set_bit(__QUEUE_STATE_QDISC_RUNNING, &txq->state)) |
95 | __qdisc_run(txq); | 95 | __qdisc_run(txq); |
96 | } | 96 | } |
97 | 97 | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index b6a36d394663..243de935b182 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -121,9 +121,9 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, | |||
121 | /* | 121 | /* |
122 | * NOTE: Called under queue->lock with locally disabled BH. | 122 | * NOTE: Called under queue->lock with locally disabled BH. |
123 | * | 123 | * |
124 | * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this | 124 | * __QUEUE_STATE_QDISC_RUNNING guarantees only one CPU can process |
125 | * device at a time. queue->lock serializes queue accesses for | 125 | * this queue at a time. queue->lock serializes queue accesses for |
126 | * this device AND txq->qdisc pointer itself. | 126 | * this queue AND txq->qdisc pointer itself. |
127 | * | 127 | * |
128 | * netif_tx_lock serializes accesses to device driver. | 128 | * netif_tx_lock serializes accesses to device driver. |
129 | * | 129 | * |
@@ -206,7 +206,7 @@ void __qdisc_run(struct netdev_queue *txq) | |||
206 | } | 206 | } |
207 | } | 207 | } |
208 | 208 | ||
209 | clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); | 209 | clear_bit(__QUEUE_STATE_QDISC_RUNNING, &txq->state); |
210 | } | 210 | } |
211 | 211 | ||
212 | static void dev_watchdog(unsigned long arg) | 212 | static void dev_watchdog(unsigned long arg) |
@@ -605,9 +605,10 @@ static void dev_deactivate_queue(struct netdev_queue *dev_queue, | |||
605 | 605 | ||
606 | void dev_deactivate(struct net_device *dev) | 606 | void dev_deactivate(struct net_device *dev) |
607 | { | 607 | { |
608 | struct netdev_queue *dev_queue = &dev->tx_queue; | ||
608 | int running; | 609 | int running; |
609 | 610 | ||
610 | dev_deactivate_queue(&dev->tx_queue, &noop_qdisc); | 611 | dev_deactivate_queue(dev_queue, &noop_qdisc); |
611 | 612 | ||
612 | dev_watchdog_down(dev); | 613 | dev_watchdog_down(dev); |
613 | 614 | ||
@@ -616,16 +617,17 @@ void dev_deactivate(struct net_device *dev) | |||
616 | 617 | ||
617 | /* Wait for outstanding qdisc_run calls. */ | 618 | /* Wait for outstanding qdisc_run calls. */ |
618 | do { | 619 | do { |
619 | while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) | 620 | while (test_bit(__QUEUE_STATE_QDISC_RUNNING, &dev_queue->state)) |
620 | yield(); | 621 | yield(); |
621 | 622 | ||
622 | /* | 623 | /* |
623 | * Double-check inside queue lock to ensure that all effects | 624 | * Double-check inside queue lock to ensure that all effects |
624 | * of the queue run are visible when we return. | 625 | * of the queue run are visible when we return. |
625 | */ | 626 | */ |
626 | spin_lock_bh(&dev->tx_queue.lock); | 627 | spin_lock_bh(&dev_queue->lock); |
627 | running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); | 628 | running = test_bit(__QUEUE_STATE_QDISC_RUNNING, |
628 | spin_unlock_bh(&dev->tx_queue.lock); | 629 | &dev_queue->state); |
630 | spin_unlock_bh(&dev_queue->lock); | ||
629 | 631 | ||
630 | /* | 632 | /* |
631 | * The running flag should never be set at this point because | 633 | * The running flag should never be set at this point because |