aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/rionet.c20
1 files changed, 17 insertions, 3 deletions
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 91d25888a1b9..1470d3e86e3c 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -79,6 +79,7 @@ static int rionet_capable = 1;
79 * on system trade-offs. 79 * on system trade-offs.
80 */ 80 */
81static struct rio_dev **rionet_active; 81static struct rio_dev **rionet_active;
82static int nact; /* total number of active rionet peers */
82 83
83#define is_rionet_capable(src_ops, dst_ops) \ 84#define is_rionet_capable(src_ops, dst_ops) \
84 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 85 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
@@ -175,6 +176,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
175 struct ethhdr *eth = (struct ethhdr *)skb->data; 176 struct ethhdr *eth = (struct ethhdr *)skb->data;
176 u16 destid; 177 u16 destid;
177 unsigned long flags; 178 unsigned long flags;
179 int add_num = 1;
178 180
179 local_irq_save(flags); 181 local_irq_save(flags);
180 if (!spin_trylock(&rnet->tx_lock)) { 182 if (!spin_trylock(&rnet->tx_lock)) {
@@ -182,7 +184,10 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
182 return NETDEV_TX_LOCKED; 184 return NETDEV_TX_LOCKED;
183 } 185 }
184 186
185 if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) { 187 if (is_multicast_ether_addr(eth->h_dest))
188 add_num = nact;
189
190 if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) {
186 netif_stop_queue(ndev); 191 netif_stop_queue(ndev);
187 spin_unlock_irqrestore(&rnet->tx_lock, flags); 192 spin_unlock_irqrestore(&rnet->tx_lock, flags);
188 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n", 193 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
@@ -191,11 +196,16 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
191 } 196 }
192 197
193 if (is_multicast_ether_addr(eth->h_dest)) { 198 if (is_multicast_ether_addr(eth->h_dest)) {
199 int count = 0;
194 for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size); 200 for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
195 i++) 201 i++)
196 if (rionet_active[i]) 202 if (rionet_active[i]) {
197 rionet_queue_tx_msg(skb, ndev, 203 rionet_queue_tx_msg(skb, ndev,
198 rionet_active[i]); 204 rionet_active[i]);
205 if (count)
206 atomic_inc(&skb->users);
207 count++;
208 }
199 } else if (RIONET_MAC_MATCH(eth->h_dest)) { 209 } else if (RIONET_MAC_MATCH(eth->h_dest)) {
200 destid = RIONET_GET_DESTID(eth->h_dest); 210 destid = RIONET_GET_DESTID(eth->h_dest);
201 if (rionet_active[destid]) 211 if (rionet_active[destid])
@@ -220,14 +230,17 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
220 if (info == RIONET_DOORBELL_JOIN) { 230 if (info == RIONET_DOORBELL_JOIN) {
221 if (!rionet_active[sid]) { 231 if (!rionet_active[sid]) {
222 list_for_each_entry(peer, &rionet_peers, node) { 232 list_for_each_entry(peer, &rionet_peers, node) {
223 if (peer->rdev->destid == sid) 233 if (peer->rdev->destid == sid) {
224 rionet_active[sid] = peer->rdev; 234 rionet_active[sid] = peer->rdev;
235 nact++;
236 }
225 } 237 }
226 rio_mport_send_doorbell(mport, sid, 238 rio_mport_send_doorbell(mport, sid,
227 RIONET_DOORBELL_JOIN); 239 RIONET_DOORBELL_JOIN);
228 } 240 }
229 } else if (info == RIONET_DOORBELL_LEAVE) { 241 } else if (info == RIONET_DOORBELL_LEAVE) {
230 rionet_active[sid] = NULL; 242 rionet_active[sid] = NULL;
243 nact--;
231 } else { 244 } else {
232 if (netif_msg_intr(rnet)) 245 if (netif_msg_intr(rnet))
233 printk(KERN_WARNING "%s: unhandled doorbell\n", 246 printk(KERN_WARNING "%s: unhandled doorbell\n",
@@ -523,6 +536,7 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
523 536
524 rc = rionet_setup_netdev(rdev->net->hport, ndev); 537 rc = rionet_setup_netdev(rdev->net->hport, ndev);
525 rionet_check = 1; 538 rionet_check = 1;
539 nact = 0;
526 } 540 }
527 541
528 /* 542 /*