aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2006-02-02 01:12:54 -0500
committerJeff Garzik <jgarzik@pobox.com>2006-02-02 01:12:54 -0500
commit18ee3610040a4c008ce08a40a5dd025241cc7e97 (patch)
tree32a996a5123726b63c31a1522f230933fb967a32 /drivers/infiniband/ulp
parente4e7b89280d1d666e2c09e5ad36cf071796c4c7e (diff)
parentb4103333d7904310d34de18d85e51e3d74f00a3b (diff)
Merge branch 'master'
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c25
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c14
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h5
4 files changed, 33 insertions, 15 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index fd3f5c862a5d..c3b5f79d1168 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -505,7 +505,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
505 505
506 list_add_tail(&neigh->list, &path->neigh_list); 506 list_add_tail(&neigh->list, &path->neigh_list);
507 507
508 if (path->pathrec.dlid) { 508 if (path->ah) {
509 kref_get(&path->ah->ref); 509 kref_get(&path->ah->ref);
510 neigh->ah = path->ah; 510 neigh->ah = path->ah;
511 511
@@ -591,7 +591,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
591 return; 591 return;
592 } 592 }
593 593
594 if (path->pathrec.dlid) { 594 if (path->ah) {
595 ipoib_dbg(priv, "Send unicast ARP to %04x\n", 595 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
596 be16_to_cpu(path->pathrec.dlid)); 596 be16_to_cpu(path->pathrec.dlid));
597 597
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 98039da0caf0..ccaa0c387076 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -97,6 +97,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
97 struct ipoib_dev_priv *priv = netdev_priv(dev); 97 struct ipoib_dev_priv *priv = netdev_priv(dev);
98 struct ipoib_neigh *neigh, *tmp; 98 struct ipoib_neigh *neigh, *tmp;
99 unsigned long flags; 99 unsigned long flags;
100 int tx_dropped = 0;
100 101
101 ipoib_dbg_mcast(netdev_priv(dev), 102 ipoib_dbg_mcast(netdev_priv(dev),
102 "deleting multicast group " IPOIB_GID_FMT "\n", 103 "deleting multicast group " IPOIB_GID_FMT "\n",
@@ -123,8 +124,14 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
123 if (mcast->ah) 124 if (mcast->ah)
124 ipoib_put_ah(mcast->ah); 125 ipoib_put_ah(mcast->ah);
125 126
126 while (!skb_queue_empty(&mcast->pkt_queue)) 127 while (!skb_queue_empty(&mcast->pkt_queue)) {
128 ++tx_dropped;
127 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 129 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
130 }
131
132 spin_lock_irqsave(&priv->tx_lock, flags);
133 priv->stats.tx_dropped += tx_dropped;
134 spin_unlock_irqrestore(&priv->tx_lock, flags);
128 135
129 kfree(mcast); 136 kfree(mcast);
130} 137}
@@ -276,8 +283,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
276 } 283 }
277 284
278 /* actually send any queued packets */ 285 /* actually send any queued packets */
286 spin_lock_irq(&priv->tx_lock);
279 while (!skb_queue_empty(&mcast->pkt_queue)) { 287 while (!skb_queue_empty(&mcast->pkt_queue)) {
280 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); 288 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
289 spin_unlock_irq(&priv->tx_lock);
281 290
282 skb->dev = dev; 291 skb->dev = dev;
283 292
@@ -288,7 +297,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
288 297
289 if (dev_queue_xmit(skb)) 298 if (dev_queue_xmit(skb))
290 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); 299 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
300 spin_lock_irq(&priv->tx_lock);
291 } 301 }
302 spin_unlock_irq(&priv->tx_lock);
292 303
293 return 0; 304 return 0;
294} 305}
@@ -300,6 +311,7 @@ ipoib_mcast_sendonly_join_complete(int status,
300{ 311{
301 struct ipoib_mcast *mcast = mcast_ptr; 312 struct ipoib_mcast *mcast = mcast_ptr;
302 struct net_device *dev = mcast->dev; 313 struct net_device *dev = mcast->dev;
314 struct ipoib_dev_priv *priv = netdev_priv(dev);
303 315
304 if (!status) 316 if (!status)
305 ipoib_mcast_join_finish(mcast, mcmember); 317 ipoib_mcast_join_finish(mcast, mcmember);
@@ -310,8 +322,12 @@ ipoib_mcast_sendonly_join_complete(int status,
310 IPOIB_GID_ARG(mcast->mcmember.mgid), status); 322 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
311 323
312 /* Flush out any queued packets */ 324 /* Flush out any queued packets */
313 while (!skb_queue_empty(&mcast->pkt_queue)) 325 spin_lock_irq(&priv->tx_lock);
326 while (!skb_queue_empty(&mcast->pkt_queue)) {
327 ++priv->stats.tx_dropped;
314 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 328 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
329 }
330 spin_unlock_irq(&priv->tx_lock);
315 331
316 /* Clear the busy flag so we try again */ 332 /* Clear the busy flag so we try again */
317 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 333 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
@@ -687,6 +703,7 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
687 if (!mcast) { 703 if (!mcast) {
688 ipoib_warn(priv, "unable to allocate memory for " 704 ipoib_warn(priv, "unable to allocate memory for "
689 "multicast structure\n"); 705 "multicast structure\n");
706 ++priv->stats.tx_dropped;
690 dev_kfree_skb_any(skb); 707 dev_kfree_skb_any(skb);
691 goto out; 708 goto out;
692 } 709 }
@@ -700,8 +717,10 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
700 if (!mcast->ah) { 717 if (!mcast->ah) {
701 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) 718 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
702 skb_queue_tail(&mcast->pkt_queue, skb); 719 skb_queue_tail(&mcast->pkt_queue, skb);
703 else 720 else {
721 ++priv->stats.tx_dropped;
704 dev_kfree_skb_any(skb); 722 dev_kfree_skb_any(skb);
723 }
705 724
706 if (mcast->query) 725 if (mcast->query)
707 ipoib_dbg_mcast(priv, "no address vector, " 726 ipoib_dbg_mcast(priv, "no address vector, "
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 31207e664148..2d2d4ac3525a 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -357,9 +357,9 @@ static void srp_remove_work(void *target_ptr)
357 target->state = SRP_TARGET_REMOVED; 357 target->state = SRP_TARGET_REMOVED;
358 spin_unlock_irq(target->scsi_host->host_lock); 358 spin_unlock_irq(target->scsi_host->host_lock);
359 359
360 down(&target->srp_host->target_mutex); 360 mutex_lock(&target->srp_host->target_mutex);
361 list_del(&target->list); 361 list_del(&target->list);
362 up(&target->srp_host->target_mutex); 362 mutex_unlock(&target->srp_host->target_mutex);
363 363
364 scsi_remove_host(target->scsi_host); 364 scsi_remove_host(target->scsi_host);
365 ib_destroy_cm_id(target->cm_id); 365 ib_destroy_cm_id(target->cm_id);
@@ -1254,9 +1254,9 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1254 if (scsi_add_host(target->scsi_host, host->dev->dma_device)) 1254 if (scsi_add_host(target->scsi_host, host->dev->dma_device))
1255 return -ENODEV; 1255 return -ENODEV;
1256 1256
1257 down(&host->target_mutex); 1257 mutex_lock(&host->target_mutex);
1258 list_add_tail(&target->list, &host->target_list); 1258 list_add_tail(&target->list, &host->target_list);
1259 up(&host->target_mutex); 1259 mutex_unlock(&host->target_mutex);
1260 1260
1261 target->state = SRP_TARGET_LIVE; 1261 target->state = SRP_TARGET_LIVE;
1262 1262
@@ -1525,7 +1525,7 @@ static struct srp_host *srp_add_port(struct ib_device *device, u8 port)
1525 return NULL; 1525 return NULL;
1526 1526
1527 INIT_LIST_HEAD(&host->target_list); 1527 INIT_LIST_HEAD(&host->target_list);
1528 init_MUTEX(&host->target_mutex); 1528 mutex_init(&host->target_mutex);
1529 init_completion(&host->released); 1529 init_completion(&host->released);
1530 host->dev = device; 1530 host->dev = device;
1531 host->port = port; 1531 host->port = port;
@@ -1626,7 +1626,7 @@ static void srp_remove_one(struct ib_device *device)
1626 * Mark all target ports as removed, so we stop queueing 1626 * Mark all target ports as removed, so we stop queueing
1627 * commands and don't try to reconnect. 1627 * commands and don't try to reconnect.
1628 */ 1628 */
1629 down(&host->target_mutex); 1629 mutex_lock(&host->target_mutex);
1630 list_for_each_entry_safe(target, tmp_target, 1630 list_for_each_entry_safe(target, tmp_target,
1631 &host->target_list, list) { 1631 &host->target_list, list) {
1632 spin_lock_irqsave(target->scsi_host->host_lock, flags); 1632 spin_lock_irqsave(target->scsi_host->host_lock, flags);
@@ -1634,7 +1634,7 @@ static void srp_remove_one(struct ib_device *device)
1634 target->state = SRP_TARGET_REMOVED; 1634 target->state = SRP_TARGET_REMOVED;
1635 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 1635 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
1636 } 1636 }
1637 up(&host->target_mutex); 1637 mutex_unlock(&host->target_mutex);
1638 1638
1639 /* 1639 /*
1640 * Wait for any reconnection tasks that may have 1640 * Wait for any reconnection tasks that may have
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index b564f18caf78..4e7727df32f1 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -37,8 +37,7 @@
37 37
38#include <linux/types.h> 38#include <linux/types.h>
39#include <linux/list.h> 39#include <linux/list.h>
40 40#include <linux/mutex.h>
41#include <asm/semaphore.h>
42 41
43#include <scsi/scsi_host.h> 42#include <scsi/scsi_host.h>
44#include <scsi/scsi_cmnd.h> 43#include <scsi/scsi_cmnd.h>
@@ -85,7 +84,7 @@ struct srp_host {
85 struct ib_mr *mr; 84 struct ib_mr *mr;
86 struct class_device class_dev; 85 struct class_device class_dev;
87 struct list_head target_list; 86 struct list_head target_list;
88 struct semaphore target_mutex; 87 struct mutex target_mutex;
89 struct completion released; 88 struct completion released;
90 struct list_head list; 89 struct list_head list;
91}; 90};