diff options
-rw-r--r-- | drivers/net/iseries_veth.c | 108 |
1 files changed, 64 insertions, 44 deletions
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index 122d60db4ff7..eaff17cc9fb8 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c | |||
@@ -158,10 +158,11 @@ struct veth_port { | |||
158 | u64 mac_addr; | 158 | u64 mac_addr; |
159 | HvLpIndexMap lpar_map; | 159 | HvLpIndexMap lpar_map; |
160 | 160 | ||
161 | spinlock_t pending_gate; | 161 | /* queue_lock protects the stopped_map and dev's queue. */ |
162 | struct sk_buff *pending_skb; | 162 | spinlock_t queue_lock; |
163 | HvLpIndexMap pending_lpmask; | 163 | HvLpIndexMap stopped_map; |
164 | 164 | ||
165 | /* mcast_gate protects promiscuous, num_mcast & mcast_addr. */ | ||
165 | rwlock_t mcast_gate; | 166 | rwlock_t mcast_gate; |
166 | int promiscuous; | 167 | int promiscuous; |
167 | int num_mcast; | 168 | int num_mcast; |
@@ -174,7 +175,8 @@ static struct net_device *veth_dev[HVMAXARCHITECTEDVIRTUALLANS]; /* = 0 */ | |||
174 | 175 | ||
175 | static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev); | 176 | static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev); |
176 | static void veth_recycle_msg(struct veth_lpar_connection *, struct veth_msg *); | 177 | static void veth_recycle_msg(struct veth_lpar_connection *, struct veth_msg *); |
177 | static void veth_flush_pending(struct veth_lpar_connection *cnx); | 178 | static void veth_wake_queues(struct veth_lpar_connection *cnx); |
179 | static void veth_stop_queues(struct veth_lpar_connection *cnx); | ||
178 | static void veth_receive(struct veth_lpar_connection *, struct VethLpEvent *); | 180 | static void veth_receive(struct veth_lpar_connection *, struct VethLpEvent *); |
179 | static void veth_release_connection(struct kobject *kobject); | 181 | static void veth_release_connection(struct kobject *kobject); |
180 | static void veth_timed_ack(unsigned long ptr); | 182 | static void veth_timed_ack(unsigned long ptr); |
@@ -221,6 +223,12 @@ static inline struct veth_msg *veth_stack_pop(struct veth_lpar_connection *cnx) | |||
221 | return msg; | 223 | return msg; |
222 | } | 224 | } |
223 | 225 | ||
226 | /* You must hold the connection's lock when you call this function. */ | ||
227 | static inline int veth_stack_is_empty(struct veth_lpar_connection *cnx) | ||
228 | { | ||
229 | return cnx->msg_stack_head == NULL; | ||
230 | } | ||
231 | |||
224 | static inline HvLpEvent_Rc | 232 | static inline HvLpEvent_Rc |
225 | veth_signalevent(struct veth_lpar_connection *cnx, u16 subtype, | 233 | veth_signalevent(struct veth_lpar_connection *cnx, u16 subtype, |
226 | HvLpEvent_AckInd ackind, HvLpEvent_AckType acktype, | 234 | HvLpEvent_AckInd ackind, HvLpEvent_AckType acktype, |
@@ -391,12 +399,12 @@ static void veth_handle_int(struct VethLpEvent *event) | |||
391 | } | 399 | } |
392 | } | 400 | } |
393 | 401 | ||
394 | if (acked > 0) | 402 | if (acked > 0) { |
395 | cnx->last_contact = jiffies; | 403 | cnx->last_contact = jiffies; |
404 | veth_wake_queues(cnx); | ||
405 | } | ||
396 | 406 | ||
397 | spin_unlock_irqrestore(&cnx->lock, flags); | 407 | spin_unlock_irqrestore(&cnx->lock, flags); |
398 | |||
399 | veth_flush_pending(cnx); | ||
400 | break; | 408 | break; |
401 | case VethEventTypeFrames: | 409 | case VethEventTypeFrames: |
402 | veth_receive(cnx, event); | 410 | veth_receive(cnx, event); |
@@ -492,7 +500,9 @@ static void veth_statemachine(void *p) | |||
492 | for (i = 0; i < VETH_NUMBUFFERS; ++i) | 500 | for (i = 0; i < VETH_NUMBUFFERS; ++i) |
493 | veth_recycle_msg(cnx, cnx->msgs + i); | 501 | veth_recycle_msg(cnx, cnx->msgs + i); |
494 | } | 502 | } |
503 | |||
495 | cnx->outstanding_tx = 0; | 504 | cnx->outstanding_tx = 0; |
505 | veth_wake_queues(cnx); | ||
496 | 506 | ||
497 | /* Drop the lock so we can do stuff that might sleep or | 507 | /* Drop the lock so we can do stuff that might sleep or |
498 | * take other locks. */ | 508 | * take other locks. */ |
@@ -501,8 +511,6 @@ static void veth_statemachine(void *p) | |||
501 | del_timer_sync(&cnx->ack_timer); | 511 | del_timer_sync(&cnx->ack_timer); |
502 | del_timer_sync(&cnx->reset_timer); | 512 | del_timer_sync(&cnx->reset_timer); |
503 | 513 | ||
504 | veth_flush_pending(cnx); | ||
505 | |||
506 | spin_lock_irq(&cnx->lock); | 514 | spin_lock_irq(&cnx->lock); |
507 | 515 | ||
508 | if (cnx->state & VETH_STATE_RESET) | 516 | if (cnx->state & VETH_STATE_RESET) |
@@ -869,8 +877,9 @@ static struct net_device * __init veth_probe_one(int vlan, struct device *vdev) | |||
869 | 877 | ||
870 | port = (struct veth_port *) dev->priv; | 878 | port = (struct veth_port *) dev->priv; |
871 | 879 | ||
872 | spin_lock_init(&port->pending_gate); | 880 | spin_lock_init(&port->queue_lock); |
873 | rwlock_init(&port->mcast_gate); | 881 | rwlock_init(&port->mcast_gate); |
882 | port->stopped_map = 0; | ||
874 | 883 | ||
875 | for (i = 0; i < HVMAXARCHITECTEDLPS; i++) { | 884 | for (i = 0; i < HVMAXARCHITECTEDLPS; i++) { |
876 | HvLpVirtualLanIndexMap map; | 885 | HvLpVirtualLanIndexMap map; |
@@ -980,6 +989,9 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp, | |||
980 | cnx->last_contact = jiffies; | 989 | cnx->last_contact = jiffies; |
981 | cnx->outstanding_tx++; | 990 | cnx->outstanding_tx++; |
982 | 991 | ||
992 | if (veth_stack_is_empty(cnx)) | ||
993 | veth_stop_queues(cnx); | ||
994 | |||
983 | spin_unlock_irqrestore(&cnx->lock, flags); | 995 | spin_unlock_irqrestore(&cnx->lock, flags); |
984 | return 0; | 996 | return 0; |
985 | 997 | ||
@@ -1023,7 +1035,6 @@ static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1023 | { | 1035 | { |
1024 | unsigned char *frame = skb->data; | 1036 | unsigned char *frame = skb->data; |
1025 | struct veth_port *port = (struct veth_port *) dev->priv; | 1037 | struct veth_port *port = (struct veth_port *) dev->priv; |
1026 | unsigned long flags; | ||
1027 | HvLpIndexMap lpmask; | 1038 | HvLpIndexMap lpmask; |
1028 | 1039 | ||
1029 | if (! (frame[0] & 0x01)) { | 1040 | if (! (frame[0] & 0x01)) { |
@@ -1040,27 +1051,9 @@ static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1040 | lpmask = port->lpar_map; | 1051 | lpmask = port->lpar_map; |
1041 | } | 1052 | } |
1042 | 1053 | ||
1043 | spin_lock_irqsave(&port->pending_gate, flags); | 1054 | veth_transmit_to_many(skb, lpmask, dev); |
1044 | |||
1045 | lpmask = veth_transmit_to_many(skb, lpmask, dev); | ||
1046 | 1055 | ||
1047 | if (! lpmask) { | 1056 | dev_kfree_skb(skb); |
1048 | dev_kfree_skb(skb); | ||
1049 | } else { | ||
1050 | if (port->pending_skb) { | ||
1051 | veth_error("%s: TX while skb was pending!\n", | ||
1052 | dev->name); | ||
1053 | dev_kfree_skb(skb); | ||
1054 | spin_unlock_irqrestore(&port->pending_gate, flags); | ||
1055 | return 1; | ||
1056 | } | ||
1057 | |||
1058 | port->pending_skb = skb; | ||
1059 | port->pending_lpmask = lpmask; | ||
1060 | netif_stop_queue(dev); | ||
1061 | } | ||
1062 | |||
1063 | spin_unlock_irqrestore(&port->pending_gate, flags); | ||
1064 | 1057 | ||
1065 | return 0; | 1058 | return 0; |
1066 | } | 1059 | } |
@@ -1093,9 +1086,10 @@ static void veth_recycle_msg(struct veth_lpar_connection *cnx, | |||
1093 | } | 1086 | } |
1094 | } | 1087 | } |
1095 | 1088 | ||
1096 | static void veth_flush_pending(struct veth_lpar_connection *cnx) | 1089 | static void veth_wake_queues(struct veth_lpar_connection *cnx) |
1097 | { | 1090 | { |
1098 | int i; | 1091 | int i; |
1092 | |||
1099 | for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) { | 1093 | for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) { |
1100 | struct net_device *dev = veth_dev[i]; | 1094 | struct net_device *dev = veth_dev[i]; |
1101 | struct veth_port *port; | 1095 | struct veth_port *port; |
@@ -1109,19 +1103,45 @@ static void veth_flush_pending(struct veth_lpar_connection *cnx) | |||
1109 | if (! (port->lpar_map & (1<<cnx->remote_lp))) | 1103 | if (! (port->lpar_map & (1<<cnx->remote_lp))) |
1110 | continue; | 1104 | continue; |
1111 | 1105 | ||
1112 | spin_lock_irqsave(&port->pending_gate, flags); | 1106 | spin_lock_irqsave(&port->queue_lock, flags); |
1113 | if (port->pending_skb) { | 1107 | |
1114 | port->pending_lpmask = | 1108 | port->stopped_map &= ~(1 << cnx->remote_lp); |
1115 | veth_transmit_to_many(port->pending_skb, | 1109 | |
1116 | port->pending_lpmask, | 1110 | if (0 == port->stopped_map && netif_queue_stopped(dev)) { |
1117 | dev); | 1111 | veth_debug("cnx %d: woke queue for %s.\n", |
1118 | if (! port->pending_lpmask) { | 1112 | cnx->remote_lp, dev->name); |
1119 | dev_kfree_skb_any(port->pending_skb); | 1113 | netif_wake_queue(dev); |
1120 | port->pending_skb = NULL; | ||
1121 | netif_wake_queue(dev); | ||
1122 | } | ||
1123 | } | 1114 | } |
1124 | spin_unlock_irqrestore(&port->pending_gate, flags); | 1115 | spin_unlock_irqrestore(&port->queue_lock, flags); |
1116 | } | ||
1117 | } | ||
1118 | |||
1119 | static void veth_stop_queues(struct veth_lpar_connection *cnx) | ||
1120 | { | ||
1121 | int i; | ||
1122 | |||
1123 | for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) { | ||
1124 | struct net_device *dev = veth_dev[i]; | ||
1125 | struct veth_port *port; | ||
1126 | |||
1127 | if (! dev) | ||
1128 | continue; | ||
1129 | |||
1130 | port = (struct veth_port *)dev->priv; | ||
1131 | |||
1132 | /* If this cnx is not on the vlan for this port, continue */ | ||
1133 | if (! (port->lpar_map & (1 << cnx->remote_lp))) | ||
1134 | continue; | ||
1135 | |||
1136 | spin_lock(&port->queue_lock); | ||
1137 | |||
1138 | netif_stop_queue(dev); | ||
1139 | port->stopped_map |= (1 << cnx->remote_lp); | ||
1140 | |||
1141 | veth_debug("cnx %d: stopped queue for %s, map = 0x%x.\n", | ||
1142 | cnx->remote_lp, dev->name, port->stopped_map); | ||
1143 | |||
1144 | spin_unlock(&port->queue_lock); | ||
1125 | } | 1145 | } |
1126 | } | 1146 | } |
1127 | 1147 | ||