aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h34
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c27
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c28
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c17
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c482
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h33
7 files changed, 414 insertions, 210 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 12a1e0572ef2..491d2afaf5b4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -272,8 +272,7 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
272void ipoib_dev_cleanup(struct net_device *dev); 272void ipoib_dev_cleanup(struct net_device *dev);
273 273
274void ipoib_mcast_join_task(void *dev_ptr); 274void ipoib_mcast_join_task(void *dev_ptr);
275void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid, 275void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
276 struct sk_buff *skb);
277 276
278void ipoib_mcast_restart_task(void *dev_ptr); 277void ipoib_mcast_restart_task(void *dev_ptr);
279int ipoib_mcast_start_thread(struct net_device *dev); 278int ipoib_mcast_start_thread(struct net_device *dev);
@@ -369,15 +368,26 @@ extern int ipoib_debug_level;
369#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG_DATA */ 368#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG_DATA */
370 369
371 370
372#define IPOIB_GID_FMT "%x:%x:%x:%x:%x:%x:%x:%x" 371#define IPOIB_GID_FMT "%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:" \
373 372 "%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x"
374#define IPOIB_GID_ARG(gid) be16_to_cpup((__be16 *) ((gid).raw + 0)), \ 373
375 be16_to_cpup((__be16 *) ((gid).raw + 2)), \ 374#define IPOIB_GID_RAW_ARG(gid) ((u8 *)(gid))[0], \
376 be16_to_cpup((__be16 *) ((gid).raw + 4)), \ 375 ((u8 *)(gid))[1], \
377 be16_to_cpup((__be16 *) ((gid).raw + 6)), \ 376 ((u8 *)(gid))[2], \
378 be16_to_cpup((__be16 *) ((gid).raw + 8)), \ 377 ((u8 *)(gid))[3], \
379 be16_to_cpup((__be16 *) ((gid).raw + 10)), \ 378 ((u8 *)(gid))[4], \
380 be16_to_cpup((__be16 *) ((gid).raw + 12)), \ 379 ((u8 *)(gid))[5], \
381 be16_to_cpup((__be16 *) ((gid).raw + 14)) 380 ((u8 *)(gid))[6], \
381 ((u8 *)(gid))[7], \
382 ((u8 *)(gid))[8], \
383 ((u8 *)(gid))[9], \
384 ((u8 *)(gid))[10],\
385 ((u8 *)(gid))[11],\
386 ((u8 *)(gid))[12],\
387 ((u8 *)(gid))[13],\
388 ((u8 *)(gid))[14],\
389 ((u8 *)(gid))[15]
390
391#define IPOIB_GID_ARG(gid) IPOIB_GID_RAW_ARG((gid).raw)
382 392
383#endif /* _IPOIB_H */ 393#endif /* _IPOIB_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 8406839b91cf..5033666b1481 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -84,15 +84,9 @@ void ipoib_free_ah(struct kref *kref)
84 84
85 unsigned long flags; 85 unsigned long flags;
86 86
87 if ((int) priv->tx_tail - (int) ah->last_send >= 0) { 87 spin_lock_irqsave(&priv->lock, flags);
88 ipoib_dbg(priv, "Freeing ah %p\n", ah->ah); 88 list_add_tail(&ah->list, &priv->dead_ahs);
89 ib_destroy_ah(ah->ah); 89 spin_unlock_irqrestore(&priv->lock, flags);
90 kfree(ah);
91 } else {
92 spin_lock_irqsave(&priv->lock, flags);
93 list_add_tail(&ah->list, &priv->dead_ahs);
94 spin_unlock_irqrestore(&priv->lock, flags);
95 }
96} 90}
97 91
98static int ipoib_ib_post_receive(struct net_device *dev, int id) 92static int ipoib_ib_post_receive(struct net_device *dev, int id)
@@ -377,19 +371,16 @@ static void __ipoib_reap_ah(struct net_device *dev)
377 struct ipoib_ah *ah, *tah; 371 struct ipoib_ah *ah, *tah;
378 LIST_HEAD(remove_list); 372 LIST_HEAD(remove_list);
379 373
380 spin_lock_irq(&priv->lock); 374 spin_lock_irq(&priv->tx_lock);
375 spin_lock(&priv->lock);
381 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) 376 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
382 if ((int) priv->tx_tail - (int) ah->last_send >= 0) { 377 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
383 list_del(&ah->list); 378 list_del(&ah->list);
384 list_add_tail(&ah->list, &remove_list); 379 ib_destroy_ah(ah->ah);
380 kfree(ah);
385 } 381 }
386 spin_unlock_irq(&priv->lock); 382 spin_unlock(&priv->lock);
387 383 spin_unlock_irq(&priv->tx_lock);
388 list_for_each_entry_safe(ah, tah, &remove_list, list) {
389 ipoib_dbg(priv, "Reaping ah %p\n", ah->ah);
390 ib_destroy_ah(ah->ah);
391 kfree(ah);
392 }
393} 384}
394 385
395void ipoib_reap_ah(void *dev_ptr) 386void ipoib_reap_ah(void *dev_ptr)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index cb078a7d0bf5..1c6ea1c682a5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -185,8 +185,7 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
185 return 0; 185 return 0;
186} 186}
187 187
188static struct ipoib_path *__path_find(struct net_device *dev, 188static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
189 union ib_gid *gid)
190{ 189{
191 struct ipoib_dev_priv *priv = netdev_priv(dev); 190 struct ipoib_dev_priv *priv = netdev_priv(dev);
192 struct rb_node *n = priv->path_tree.rb_node; 191 struct rb_node *n = priv->path_tree.rb_node;
@@ -196,7 +195,7 @@ static struct ipoib_path *__path_find(struct net_device *dev,
196 while (n) { 195 while (n) {
197 path = rb_entry(n, struct ipoib_path, rb_node); 196 path = rb_entry(n, struct ipoib_path, rb_node);
198 197
199 ret = memcmp(gid->raw, path->pathrec.dgid.raw, 198 ret = memcmp(gid, path->pathrec.dgid.raw,
200 sizeof (union ib_gid)); 199 sizeof (union ib_gid));
201 200
202 if (ret < 0) 201 if (ret < 0)
@@ -424,8 +423,7 @@ static void path_rec_completion(int status,
424 } 423 }
425} 424}
426 425
427static struct ipoib_path *path_rec_create(struct net_device *dev, 426static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
428 union ib_gid *gid)
429{ 427{
430 struct ipoib_dev_priv *priv = netdev_priv(dev); 428 struct ipoib_dev_priv *priv = netdev_priv(dev);
431 struct ipoib_path *path; 429 struct ipoib_path *path;
@@ -440,7 +438,7 @@ static struct ipoib_path *path_rec_create(struct net_device *dev,
440 438
441 INIT_LIST_HEAD(&path->neigh_list); 439 INIT_LIST_HEAD(&path->neigh_list);
442 440
443 memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid)); 441 memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
444 path->pathrec.sgid = priv->local_gid; 442 path->pathrec.sgid = priv->local_gid;
445 path->pathrec.pkey = cpu_to_be16(priv->pkey); 443 path->pathrec.pkey = cpu_to_be16(priv->pkey);
446 path->pathrec.numb_path = 1; 444 path->pathrec.numb_path = 1;
@@ -498,10 +496,9 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
498 */ 496 */
499 spin_lock(&priv->lock); 497 spin_lock(&priv->lock);
500 498
501 path = __path_find(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4)); 499 path = __path_find(dev, skb->dst->neighbour->ha + 4);
502 if (!path) { 500 if (!path) {
503 path = path_rec_create(dev, 501 path = path_rec_create(dev, skb->dst->neighbour->ha + 4);
504 (union ib_gid *) (skb->dst->neighbour->ha + 4));
505 if (!path) 502 if (!path)
506 goto err_path; 503 goto err_path;
507 504
@@ -551,7 +548,7 @@ static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
551 /* Add in the P_Key for multicasts */ 548 /* Add in the P_Key for multicasts */
552 skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff; 549 skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
553 skb->dst->neighbour->ha[9] = priv->pkey & 0xff; 550 skb->dst->neighbour->ha[9] = priv->pkey & 0xff;
554 ipoib_mcast_send(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4), skb); 551 ipoib_mcast_send(dev, skb->dst->neighbour->ha + 4, skb);
555} 552}
556 553
557static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 554static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
@@ -566,10 +563,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
566 */ 563 */
567 spin_lock(&priv->lock); 564 spin_lock(&priv->lock);
568 565
569 path = __path_find(dev, (union ib_gid *) (phdr->hwaddr + 4)); 566 path = __path_find(dev, phdr->hwaddr + 4);
570 if (!path) { 567 if (!path) {
571 path = path_rec_create(dev, 568 path = path_rec_create(dev, phdr->hwaddr + 4);
572 (union ib_gid *) (phdr->hwaddr + 4));
573 if (path) { 569 if (path) {
574 /* put pseudoheader back on for next time */ 570 /* put pseudoheader back on for next time */
575 skb_push(skb, sizeof *phdr); 571 skb_push(skb, sizeof *phdr);
@@ -660,7 +656,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
660 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; 656 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
661 phdr->hwaddr[9] = priv->pkey & 0xff; 657 phdr->hwaddr[9] = priv->pkey & 0xff;
662 658
663 ipoib_mcast_send(dev, (union ib_gid *) (phdr->hwaddr + 4), skb); 659 ipoib_mcast_send(dev, phdr->hwaddr + 4, skb);
664 } else { 660 } else {
665 /* unicast GID -- should be ARP or RARP reply */ 661 /* unicast GID -- should be ARP or RARP reply */
666 662
@@ -671,7 +667,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
671 skb->dst ? "neigh" : "dst", 667 skb->dst ? "neigh" : "dst",
672 be16_to_cpup((__be16 *) skb->data), 668 be16_to_cpup((__be16 *) skb->data),
673 be32_to_cpup((__be32 *) phdr->hwaddr), 669 be32_to_cpup((__be32 *) phdr->hwaddr),
674 IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4))); 670 IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
675 dev_kfree_skb_any(skb); 671 dev_kfree_skb_any(skb);
676 ++priv->stats.tx_dropped; 672 ++priv->stats.tx_dropped;
677 goto out; 673 goto out;
@@ -754,7 +750,7 @@ static void ipoib_neigh_destructor(struct neighbour *n)
754 ipoib_dbg(priv, 750 ipoib_dbg(priv,
755 "neigh_destructor for %06x " IPOIB_GID_FMT "\n", 751 "neigh_destructor for %06x " IPOIB_GID_FMT "\n",
756 be32_to_cpup((__be32 *) n->ha), 752 be32_to_cpup((__be32 *) n->ha),
757 IPOIB_GID_ARG(*((union ib_gid *) (n->ha + 4)))); 753 IPOIB_GID_RAW_ARG(n->ha + 4));
758 754
759 spin_lock_irqsave(&priv->lock, flags); 755 spin_lock_irqsave(&priv->lock, flags);
760 756
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 1dae4b238252..216471fa01cc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -154,7 +154,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
154 return mcast; 154 return mcast;
155} 155}
156 156
157static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, union ib_gid *mgid) 157static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
158{ 158{
159 struct ipoib_dev_priv *priv = netdev_priv(dev); 159 struct ipoib_dev_priv *priv = netdev_priv(dev);
160 struct rb_node *n = priv->multicast_tree.rb_node; 160 struct rb_node *n = priv->multicast_tree.rb_node;
@@ -165,7 +165,7 @@ static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, union ib_g
165 165
166 mcast = rb_entry(n, struct ipoib_mcast, rb_node); 166 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
167 167
168 ret = memcmp(mgid->raw, mcast->mcmember.mgid.raw, 168 ret = memcmp(mgid, mcast->mcmember.mgid.raw,
169 sizeof (union ib_gid)); 169 sizeof (union ib_gid));
170 if (ret < 0) 170 if (ret < 0)
171 n = n->rb_left; 171 n = n->rb_left;
@@ -694,8 +694,7 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
694 return 0; 694 return 0;
695} 695}
696 696
697void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid, 697void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
698 struct sk_buff *skb)
699{ 698{
700 struct ipoib_dev_priv *priv = netdev_priv(dev); 699 struct ipoib_dev_priv *priv = netdev_priv(dev);
701 struct ipoib_mcast *mcast; 700 struct ipoib_mcast *mcast;
@@ -718,7 +717,7 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
718 if (!mcast) { 717 if (!mcast) {
719 /* Let's create a new send only group now */ 718 /* Let's create a new send only group now */
720 ipoib_dbg_mcast(priv, "setting up send only multicast group for " 719 ipoib_dbg_mcast(priv, "setting up send only multicast group for "
721 IPOIB_GID_FMT "\n", IPOIB_GID_ARG(*mgid)); 720 IPOIB_GID_FMT "\n", IPOIB_GID_RAW_ARG(mgid));
722 721
723 mcast = ipoib_mcast_alloc(dev, 0); 722 mcast = ipoib_mcast_alloc(dev, 0);
724 if (!mcast) { 723 if (!mcast) {
@@ -730,7 +729,7 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
730 } 729 }
731 730
732 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags); 731 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
733 mcast->mcmember.mgid = *mgid; 732 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
734 __ipoib_mcast_add(dev, mcast); 733 __ipoib_mcast_add(dev, mcast);
735 list_add_tail(&mcast->list, &priv->multicast_list); 734 list_add_tail(&mcast->list, &priv->multicast_list);
736 } 735 }
@@ -821,7 +820,8 @@ void ipoib_mcast_restart_task(void *dev_ptr)
821 820
822 ipoib_mcast_stop_thread(dev, 0); 821 ipoib_mcast_stop_thread(dev, 0);
823 822
824 spin_lock_irqsave(&dev->xmit_lock, flags); 823 local_irq_save(flags);
824 netif_tx_lock(dev);
825 spin_lock(&priv->lock); 825 spin_lock(&priv->lock);
826 826
827 /* 827 /*
@@ -896,7 +896,8 @@ void ipoib_mcast_restart_task(void *dev_ptr)
896 } 896 }
897 897
898 spin_unlock(&priv->lock); 898 spin_unlock(&priv->lock);
899 spin_unlock_irqrestore(&dev->xmit_lock, flags); 899 netif_tx_unlock(dev);
900 local_irq_restore(flags);
900 901
901 /* We have to cancel outside of the spinlock */ 902 /* We have to cancel outside of the spinlock */
902 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 903 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 1d49d1643c59..7b717c648f72 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -255,7 +255,8 @@ void ipoib_event(struct ib_event_handler *handler,
255 record->event == IB_EVENT_PKEY_CHANGE || 255 record->event == IB_EVENT_PKEY_CHANGE ||
256 record->event == IB_EVENT_PORT_ACTIVE || 256 record->event == IB_EVENT_PORT_ACTIVE ||
257 record->event == IB_EVENT_LID_CHANGE || 257 record->event == IB_EVENT_LID_CHANGE ||
258 record->event == IB_EVENT_SM_CHANGE) { 258 record->event == IB_EVENT_SM_CHANGE ||
259 record->event == IB_EVENT_CLIENT_REREGISTER) {
259 ipoib_dbg(priv, "Port state change event\n"); 260 ipoib_dbg(priv, "Port state change event\n");
260 queue_work(ipoib_workqueue, &priv->flush_task); 261 queue_work(ipoib_workqueue, &priv->flush_task);
261 } 262 }
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 9cbdffa08dc2..4e22afef7206 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -62,6 +62,13 @@ MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION " (" DRV_RELDATE ")"); 62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63MODULE_LICENSE("Dual BSD/GPL"); 63MODULE_LICENSE("Dual BSD/GPL");
64 64
65static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE;
66static int srp_max_iu_len;
67
68module_param(srp_sg_tablesize, int, 0444);
69MODULE_PARM_DESC(srp_sg_tablesize,
70 "Max number of gather/scatter entries per I/O (default is 12)");
71
65static int topspin_workarounds = 1; 72static int topspin_workarounds = 1;
66 73
67module_param(topspin_workarounds, int, 0444); 74module_param(topspin_workarounds, int, 0444);
@@ -105,7 +112,8 @@ static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
105 if (!iu->buf) 112 if (!iu->buf)
106 goto out_free_iu; 113 goto out_free_iu;
107 114
108 iu->dma = dma_map_single(host->dev->dma_device, iu->buf, size, direction); 115 iu->dma = dma_map_single(host->dev->dev->dma_device,
116 iu->buf, size, direction);
109 if (dma_mapping_error(iu->dma)) 117 if (dma_mapping_error(iu->dma))
110 goto out_free_buf; 118 goto out_free_buf;
111 119
@@ -127,7 +135,8 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
127 if (!iu) 135 if (!iu)
128 return; 136 return;
129 137
130 dma_unmap_single(host->dev->dma_device, iu->dma, iu->size, iu->direction); 138 dma_unmap_single(host->dev->dev->dma_device,
139 iu->dma, iu->size, iu->direction);
131 kfree(iu->buf); 140 kfree(iu->buf);
132 kfree(iu); 141 kfree(iu);
133} 142}
@@ -147,7 +156,7 @@ static int srp_init_qp(struct srp_target_port *target,
147 if (!attr) 156 if (!attr)
148 return -ENOMEM; 157 return -ENOMEM;
149 158
150 ret = ib_find_cached_pkey(target->srp_host->dev, 159 ret = ib_find_cached_pkey(target->srp_host->dev->dev,
151 target->srp_host->port, 160 target->srp_host->port,
152 be16_to_cpu(target->path.pkey), 161 be16_to_cpu(target->path.pkey),
153 &attr->pkey_index); 162 &attr->pkey_index);
@@ -179,7 +188,7 @@ static int srp_create_target_ib(struct srp_target_port *target)
179 if (!init_attr) 188 if (!init_attr)
180 return -ENOMEM; 189 return -ENOMEM;
181 190
182 target->cq = ib_create_cq(target->srp_host->dev, srp_completion, 191 target->cq = ib_create_cq(target->srp_host->dev->dev, srp_completion,
183 NULL, target, SRP_CQ_SIZE); 192 NULL, target, SRP_CQ_SIZE);
184 if (IS_ERR(target->cq)) { 193 if (IS_ERR(target->cq)) {
185 ret = PTR_ERR(target->cq); 194 ret = PTR_ERR(target->cq);
@@ -198,7 +207,7 @@ static int srp_create_target_ib(struct srp_target_port *target)
198 init_attr->send_cq = target->cq; 207 init_attr->send_cq = target->cq;
199 init_attr->recv_cq = target->cq; 208 init_attr->recv_cq = target->cq;
200 209
201 target->qp = ib_create_qp(target->srp_host->pd, init_attr); 210 target->qp = ib_create_qp(target->srp_host->dev->pd, init_attr);
202 if (IS_ERR(target->qp)) { 211 if (IS_ERR(target->qp)) {
203 ret = PTR_ERR(target->qp); 212 ret = PTR_ERR(target->qp);
204 ib_destroy_cq(target->cq); 213 ib_destroy_cq(target->cq);
@@ -250,7 +259,7 @@ static int srp_lookup_path(struct srp_target_port *target)
250 259
251 init_completion(&target->done); 260 init_completion(&target->done);
252 261
253 target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev, 262 target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev->dev,
254 target->srp_host->port, 263 target->srp_host->port,
255 &target->path, 264 &target->path,
256 IB_SA_PATH_REC_DGID | 265 IB_SA_PATH_REC_DGID |
@@ -309,10 +318,32 @@ static int srp_send_req(struct srp_target_port *target)
309 318
310 req->priv.opcode = SRP_LOGIN_REQ; 319 req->priv.opcode = SRP_LOGIN_REQ;
311 req->priv.tag = 0; 320 req->priv.tag = 0;
312 req->priv.req_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); 321 req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len);
313 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 322 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
314 SRP_BUF_FORMAT_INDIRECT); 323 SRP_BUF_FORMAT_INDIRECT);
315 memcpy(req->priv.initiator_port_id, target->srp_host->initiator_port_id, 16); 324 /*
325 * In the published SRP specification (draft rev. 16a), the
326 * port identifier format is 8 bytes of ID extension followed
327 * by 8 bytes of GUID. Older drafts put the two halves in the
328 * opposite order, so that the GUID comes first.
329 *
330 * Targets conforming to these obsolete drafts can be
331 * recognized by the I/O Class they report.
332 */
333 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
334 memcpy(req->priv.initiator_port_id,
335 target->srp_host->initiator_port_id + 8, 8);
336 memcpy(req->priv.initiator_port_id + 8,
337 target->srp_host->initiator_port_id, 8);
338 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
339 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
340 } else {
341 memcpy(req->priv.initiator_port_id,
342 target->srp_host->initiator_port_id, 16);
343 memcpy(req->priv.target_port_id, &target->id_ext, 8);
344 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
345 }
346
316 /* 347 /*
317 * Topspin/Cisco SRP targets will reject our login unless we 348 * Topspin/Cisco SRP targets will reject our login unless we
318 * zero out the first 8 bytes of our initiator port ID. The 349 * zero out the first 8 bytes of our initiator port ID. The
@@ -325,8 +356,6 @@ static int srp_send_req(struct srp_target_port *target)
325 (unsigned long long) be64_to_cpu(target->ioc_guid)); 356 (unsigned long long) be64_to_cpu(target->ioc_guid));
326 memset(req->priv.initiator_port_id, 0, 8); 357 memset(req->priv.initiator_port_id, 0, 8);
327 } 358 }
328 memcpy(req->priv.target_port_id, &target->id_ext, 8);
329 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
330 359
331 status = ib_send_cm_req(target->cm_id, &req->param); 360 status = ib_send_cm_req(target->cm_id, &req->param);
332 361
@@ -359,9 +388,9 @@ static void srp_remove_work(void *target_ptr)
359 target->state = SRP_TARGET_REMOVED; 388 target->state = SRP_TARGET_REMOVED;
360 spin_unlock_irq(target->scsi_host->host_lock); 389 spin_unlock_irq(target->scsi_host->host_lock);
361 390
362 mutex_lock(&target->srp_host->target_mutex); 391 spin_lock(&target->srp_host->target_lock);
363 list_del(&target->list); 392 list_del(&target->list);
364 mutex_unlock(&target->srp_host->target_mutex); 393 spin_unlock(&target->srp_host->target_lock);
365 394
366 scsi_remove_host(target->scsi_host); 395 scsi_remove_host(target->scsi_host);
367 ib_destroy_cm_id(target->cm_id); 396 ib_destroy_cm_id(target->cm_id);
@@ -421,6 +450,11 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
421 scmnd->sc_data_direction != DMA_FROM_DEVICE)) 450 scmnd->sc_data_direction != DMA_FROM_DEVICE))
422 return; 451 return;
423 452
453 if (req->fmr) {
454 ib_fmr_pool_unmap(req->fmr);
455 req->fmr = NULL;
456 }
457
424 /* 458 /*
425 * This handling of non-SG commands can be killed when the 459 * This handling of non-SG commands can be killed when the
426 * SCSI midlayer no longer generates non-SG commands. 460 * SCSI midlayer no longer generates non-SG commands.
@@ -433,18 +467,30 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
433 scat = &req->fake_sg; 467 scat = &req->fake_sg;
434 } 468 }
435 469
436 dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, 470 dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents,
437 scmnd->sc_data_direction); 471 scmnd->sc_data_direction);
438} 472}
439 473
474static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
475{
476 srp_unmap_data(req->scmnd, target, req);
477 list_move_tail(&req->list, &target->free_reqs);
478}
479
480static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
481{
482 req->scmnd->result = DID_RESET << 16;
483 req->scmnd->scsi_done(req->scmnd);
484 srp_remove_req(target, req);
485}
486
440static int srp_reconnect_target(struct srp_target_port *target) 487static int srp_reconnect_target(struct srp_target_port *target)
441{ 488{
442 struct ib_cm_id *new_cm_id; 489 struct ib_cm_id *new_cm_id;
443 struct ib_qp_attr qp_attr; 490 struct ib_qp_attr qp_attr;
444 struct srp_request *req; 491 struct srp_request *req, *tmp;
445 struct ib_wc wc; 492 struct ib_wc wc;
446 int ret; 493 int ret;
447 int i;
448 494
449 spin_lock_irq(target->scsi_host->host_lock); 495 spin_lock_irq(target->scsi_host->host_lock);
450 if (target->state != SRP_TARGET_LIVE) { 496 if (target->state != SRP_TARGET_LIVE) {
@@ -459,7 +505,7 @@ static int srp_reconnect_target(struct srp_target_port *target)
459 * Now get a new local CM ID so that we avoid confusing the 505 * Now get a new local CM ID so that we avoid confusing the
460 * target in case things are really fouled up. 506 * target in case things are really fouled up.
461 */ 507 */
462 new_cm_id = ib_create_cm_id(target->srp_host->dev, 508 new_cm_id = ib_create_cm_id(target->srp_host->dev->dev,
463 srp_cm_handler, target); 509 srp_cm_handler, target);
464 if (IS_ERR(new_cm_id)) { 510 if (IS_ERR(new_cm_id)) {
465 ret = PTR_ERR(new_cm_id); 511 ret = PTR_ERR(new_cm_id);
@@ -480,19 +526,12 @@ static int srp_reconnect_target(struct srp_target_port *target)
480 while (ib_poll_cq(target->cq, 1, &wc) > 0) 526 while (ib_poll_cq(target->cq, 1, &wc) > 0)
481 ; /* nothing */ 527 ; /* nothing */
482 528
483 list_for_each_entry(req, &target->req_queue, list) { 529 list_for_each_entry_safe(req, tmp, &target->req_queue, list)
484 req->scmnd->result = DID_RESET << 16; 530 srp_reset_req(target, req);
485 req->scmnd->scsi_done(req->scmnd);
486 srp_unmap_data(req->scmnd, target, req);
487 }
488 531
489 target->rx_head = 0; 532 target->rx_head = 0;
490 target->tx_head = 0; 533 target->tx_head = 0;
491 target->tx_tail = 0; 534 target->tx_tail = 0;
492 INIT_LIST_HEAD(&target->free_reqs);
493 INIT_LIST_HEAD(&target->req_queue);
494 for (i = 0; i < SRP_SQ_SIZE; ++i)
495 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
496 535
497 ret = srp_connect_target(target); 536 ret = srp_connect_target(target);
498 if (ret) 537 if (ret)
@@ -528,14 +567,79 @@ err:
528 return ret; 567 return ret;
529} 568}
530 569
570static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat,
571 int sg_cnt, struct srp_request *req,
572 struct srp_direct_buf *buf)
573{
574 u64 io_addr = 0;
575 u64 *dma_pages;
576 u32 len;
577 int page_cnt;
578 int i, j;
579 int ret;
580
581 if (!dev->fmr_pool)
582 return -ENODEV;
583
584 len = page_cnt = 0;
585 for (i = 0; i < sg_cnt; ++i) {
586 if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) {
587 if (i > 0)
588 return -EINVAL;
589 else
590 ++page_cnt;
591 }
592 if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) &
593 ~dev->fmr_page_mask) {
594 if (i < sg_cnt - 1)
595 return -EINVAL;
596 else
597 ++page_cnt;
598 }
599
600 len += sg_dma_len(&scat[i]);
601 }
602
603 page_cnt += len >> dev->fmr_page_shift;
604 if (page_cnt > SRP_FMR_SIZE)
605 return -ENOMEM;
606
607 dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC);
608 if (!dma_pages)
609 return -ENOMEM;
610
611 page_cnt = 0;
612 for (i = 0; i < sg_cnt; ++i)
613 for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size)
614 dma_pages[page_cnt++] =
615 (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j;
616
617 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
618 dma_pages, page_cnt, &io_addr);
619 if (IS_ERR(req->fmr)) {
620 ret = PTR_ERR(req->fmr);
621 goto out;
622 }
623
624 buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask);
625 buf->key = cpu_to_be32(req->fmr->fmr->rkey);
626 buf->len = cpu_to_be32(len);
627
628 ret = 0;
629
630out:
631 kfree(dma_pages);
632
633 return ret;
634}
635
531static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, 636static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
532 struct srp_request *req) 637 struct srp_request *req)
533{ 638{
534 struct scatterlist *scat; 639 struct scatterlist *scat;
535 struct srp_cmd *cmd = req->cmd->buf; 640 struct srp_cmd *cmd = req->cmd->buf;
536 int len, nents, count; 641 int len, nents, count;
537 int i; 642 u8 fmt = SRP_DATA_DESC_DIRECT;
538 u8 fmt;
539 643
540 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) 644 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
541 return sizeof (struct srp_cmd); 645 return sizeof (struct srp_cmd);
@@ -560,53 +664,63 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
560 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); 664 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
561 } 665 }
562 666
563 count = dma_map_sg(target->srp_host->dev->dma_device, scat, nents, 667 count = dma_map_sg(target->srp_host->dev->dev->dma_device,
564 scmnd->sc_data_direction); 668 scat, nents, scmnd->sc_data_direction);
669
670 fmt = SRP_DATA_DESC_DIRECT;
671 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
565 672
566 if (count == 1) { 673 if (count == 1) {
674 /*
675 * The midlayer only generated a single gather/scatter
676 * entry, or DMA mapping coalesced everything to a
677 * single entry. So a direct descriptor along with
678 * the DMA MR suffices.
679 */
567 struct srp_direct_buf *buf = (void *) cmd->add_data; 680 struct srp_direct_buf *buf = (void *) cmd->add_data;
568 681
569 fmt = SRP_DATA_DESC_DIRECT;
570
571 buf->va = cpu_to_be64(sg_dma_address(scat)); 682 buf->va = cpu_to_be64(sg_dma_address(scat));
572 buf->key = cpu_to_be32(target->srp_host->mr->rkey); 683 buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey);
573 buf->len = cpu_to_be32(sg_dma_len(scat)); 684 buf->len = cpu_to_be32(sg_dma_len(scat));
574 685 } else if (srp_map_fmr(target->srp_host->dev, scat, count, req,
575 len = sizeof (struct srp_cmd) + 686 (void *) cmd->add_data)) {
576 sizeof (struct srp_direct_buf); 687 /*
577 } else { 688 * FMR mapping failed, and the scatterlist has more
689 * than one entry. Generate an indirect memory
690 * descriptor.
691 */
578 struct srp_indirect_buf *buf = (void *) cmd->add_data; 692 struct srp_indirect_buf *buf = (void *) cmd->add_data;
579 u32 datalen = 0; 693 u32 datalen = 0;
694 int i;
580 695
581 fmt = SRP_DATA_DESC_INDIRECT; 696 fmt = SRP_DATA_DESC_INDIRECT;
697 len = sizeof (struct srp_cmd) +
698 sizeof (struct srp_indirect_buf) +
699 count * sizeof (struct srp_direct_buf);
700
701 for (i = 0; i < count; ++i) {
702 buf->desc_list[i].va =
703 cpu_to_be64(sg_dma_address(&scat[i]));
704 buf->desc_list[i].key =
705 cpu_to_be32(target->srp_host->dev->mr->rkey);
706 buf->desc_list[i].len =
707 cpu_to_be32(sg_dma_len(&scat[i]));
708 datalen += sg_dma_len(&scat[i]);
709 }
582 710
583 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 711 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
584 cmd->data_out_desc_cnt = count; 712 cmd->data_out_desc_cnt = count;
585 else 713 else
586 cmd->data_in_desc_cnt = count; 714 cmd->data_in_desc_cnt = count;
587 715
588 buf->table_desc.va = cpu_to_be64(req->cmd->dma + 716 buf->table_desc.va =
589 sizeof *cmd + 717 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
590 sizeof *buf);
591 buf->table_desc.key = 718 buf->table_desc.key =
592 cpu_to_be32(target->srp_host->mr->rkey); 719 cpu_to_be32(target->srp_host->dev->mr->rkey);
593 buf->table_desc.len = 720 buf->table_desc.len =
594 cpu_to_be32(count * sizeof (struct srp_direct_buf)); 721 cpu_to_be32(count * sizeof (struct srp_direct_buf));
595 722
596 for (i = 0; i < count; ++i) {
597 buf->desc_list[i].va = cpu_to_be64(sg_dma_address(&scat[i]));
598 buf->desc_list[i].key =
599 cpu_to_be32(target->srp_host->mr->rkey);
600 buf->desc_list[i].len = cpu_to_be32(sg_dma_len(&scat[i]));
601
602 datalen += sg_dma_len(&scat[i]);
603 }
604
605 buf->len = cpu_to_be32(datalen); 723 buf->len = cpu_to_be32(datalen);
606
607 len = sizeof (struct srp_cmd) +
608 sizeof (struct srp_indirect_buf) +
609 count * sizeof (struct srp_direct_buf);
610 } 724 }
611 725
612 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 726 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
@@ -617,12 +731,6 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
617 return len; 731 return len;
618} 732}
619 733
620static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
621{
622 srp_unmap_data(req->scmnd, target, req);
623 list_move_tail(&req->list, &target->free_reqs);
624}
625
626static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) 734static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
627{ 735{
628 struct srp_request *req; 736 struct srp_request *req;
@@ -689,7 +797,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
689 797
690 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; 798 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
691 799
692 dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, 800 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma,
693 target->max_ti_iu_len, DMA_FROM_DEVICE); 801 target->max_ti_iu_len, DMA_FROM_DEVICE);
694 802
695 opcode = *(u8 *) iu->buf; 803 opcode = *(u8 *) iu->buf;
@@ -726,7 +834,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
726 break; 834 break;
727 } 835 }
728 836
729 dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma, 837 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma,
730 target->max_ti_iu_len, DMA_FROM_DEVICE); 838 target->max_ti_iu_len, DMA_FROM_DEVICE);
731} 839}
732 840
@@ -770,7 +878,7 @@ static int __srp_post_recv(struct srp_target_port *target)
770 878
771 list.addr = iu->dma; 879 list.addr = iu->dma;
772 list.length = iu->size; 880 list.length = iu->size;
773 list.lkey = target->srp_host->mr->lkey; 881 list.lkey = target->srp_host->dev->mr->lkey;
774 882
775 wr.next = NULL; 883 wr.next = NULL;
776 wr.sg_list = &list; 884 wr.sg_list = &list;
@@ -805,12 +913,8 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target)
805 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) 913 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
806 return NULL; 914 return NULL;
807 915
808 if (unlikely(target->req_lim < 1)) { 916 if (unlikely(target->req_lim < 1))
809 if (printk_ratelimit()) 917 ++target->zero_req_lim;
810 printk(KERN_DEBUG PFX "Target has req_lim %d\n",
811 target->req_lim);
812 return NULL;
813 }
814 918
815 return target->tx_ring[target->tx_head & SRP_SQ_SIZE]; 919 return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
816} 920}
@@ -828,7 +932,7 @@ static int __srp_post_send(struct srp_target_port *target,
828 932
829 list.addr = iu->dma; 933 list.addr = iu->dma;
830 list.length = len; 934 list.length = len;
831 list.lkey = target->srp_host->mr->lkey; 935 list.lkey = target->srp_host->dev->mr->lkey;
832 936
833 wr.next = NULL; 937 wr.next = NULL;
834 wr.wr_id = target->tx_head & SRP_SQ_SIZE; 938 wr.wr_id = target->tx_head & SRP_SQ_SIZE;
@@ -870,8 +974,8 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
870 if (!iu) 974 if (!iu)
871 goto err; 975 goto err;
872 976
873 dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, 977 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma,
874 SRP_MAX_IU_LEN, DMA_TO_DEVICE); 978 srp_max_iu_len, DMA_TO_DEVICE);
875 979
876 req = list_entry(target->free_reqs.next, struct srp_request, list); 980 req = list_entry(target->free_reqs.next, struct srp_request, list);
877 981
@@ -903,8 +1007,8 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
903 goto err_unmap; 1007 goto err_unmap;
904 } 1008 }
905 1009
906 dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma, 1010 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma,
907 SRP_MAX_IU_LEN, DMA_TO_DEVICE); 1011 srp_max_iu_len, DMA_TO_DEVICE);
908 1012
909 if (__srp_post_send(target, iu, len)) { 1013 if (__srp_post_send(target, iu, len)) {
910 printk(KERN_ERR PFX "Send failed\n"); 1014 printk(KERN_ERR PFX "Send failed\n");
@@ -936,7 +1040,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
936 1040
937 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 1041 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
938 target->tx_ring[i] = srp_alloc_iu(target->srp_host, 1042 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
939 SRP_MAX_IU_LEN, 1043 srp_max_iu_len,
940 GFP_KERNEL, DMA_TO_DEVICE); 1044 GFP_KERNEL, DMA_TO_DEVICE);
941 if (!target->tx_ring[i]) 1045 if (!target->tx_ring[i])
942 goto err; 1046 goto err;
@@ -1107,11 +1211,10 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1107 srp_cm_rej_handler(cm_id, event, target); 1211 srp_cm_rej_handler(cm_id, event, target);
1108 break; 1212 break;
1109 1213
1110 case IB_CM_MRA_RECEIVED: 1214 case IB_CM_DREQ_RECEIVED:
1111 printk(KERN_ERR PFX "MRA received\n"); 1215 printk(KERN_WARNING PFX "DREQ received - connection closed\n");
1112 break; 1216 if (ib_send_cm_drep(cm_id, NULL, 0))
1113 1217 printk(KERN_ERR PFX "Sending CM DREP failed\n");
1114 case IB_CM_DREP_RECEIVED:
1115 break; 1218 break;
1116 1219
1117 case IB_CM_TIMEWAIT_EXIT: 1220 case IB_CM_TIMEWAIT_EXIT:
@@ -1121,6 +1224,11 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1121 target->status = 0; 1224 target->status = 0;
1122 break; 1225 break;
1123 1226
1227 case IB_CM_MRA_RECEIVED:
1228 case IB_CM_DREQ_ERROR:
1229 case IB_CM_DREP_RECEIVED:
1230 break;
1231
1124 default: 1232 default:
1125 printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event); 1233 printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event);
1126 break; 1234 break;
@@ -1239,11 +1347,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
1239 spin_lock_irq(target->scsi_host->host_lock); 1347 spin_lock_irq(target->scsi_host->host_lock);
1240 1348
1241 list_for_each_entry_safe(req, tmp, &target->req_queue, list) 1349 list_for_each_entry_safe(req, tmp, &target->req_queue, list)
1242 if (req->scmnd->device == scmnd->device) { 1350 if (req->scmnd->device == scmnd->device)
1243 req->scmnd->result = DID_RESET << 16; 1351 srp_reset_req(target, req);
1244 req->scmnd->scsi_done(req->scmnd);
1245 srp_remove_req(target, req);
1246 }
1247 1352
1248 spin_unlock_irq(target->scsi_host->host_lock); 1353 spin_unlock_irq(target->scsi_host->host_lock);
1249 1354
@@ -1329,11 +1434,23 @@ static ssize_t show_dgid(struct class_device *cdev, char *buf)
1329 be16_to_cpu(((__be16 *) target->path.dgid.raw)[7])); 1434 be16_to_cpu(((__be16 *) target->path.dgid.raw)[7]));
1330} 1435}
1331 1436
1437static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf)
1438{
1439 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1440
1441 if (target->state == SRP_TARGET_DEAD ||
1442 target->state == SRP_TARGET_REMOVED)
1443 return -ENODEV;
1444
1445 return sprintf(buf, "%d\n", target->zero_req_lim);
1446}
1447
1332static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); 1448static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
1333static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); 1449static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
1334static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); 1450static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
1335static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 1451static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1336static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); 1452static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
1453static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
1337 1454
1338static struct class_device_attribute *srp_host_attrs[] = { 1455static struct class_device_attribute *srp_host_attrs[] = {
1339 &class_device_attr_id_ext, 1456 &class_device_attr_id_ext,
@@ -1341,6 +1458,7 @@ static struct class_device_attribute *srp_host_attrs[] = {
1341 &class_device_attr_service_id, 1458 &class_device_attr_service_id,
1342 &class_device_attr_pkey, 1459 &class_device_attr_pkey,
1343 &class_device_attr_dgid, 1460 &class_device_attr_dgid,
1461 &class_device_attr_zero_req_lim,
1344 NULL 1462 NULL
1345}; 1463};
1346 1464
@@ -1354,7 +1472,6 @@ static struct scsi_host_template srp_template = {
1354 .eh_host_reset_handler = srp_reset_host, 1472 .eh_host_reset_handler = srp_reset_host,
1355 .can_queue = SRP_SQ_SIZE, 1473 .can_queue = SRP_SQ_SIZE,
1356 .this_id = -1, 1474 .this_id = -1,
1357 .sg_tablesize = SRP_MAX_INDIRECT,
1358 .cmd_per_lun = SRP_SQ_SIZE, 1475 .cmd_per_lun = SRP_SQ_SIZE,
1359 .use_clustering = ENABLE_CLUSTERING, 1476 .use_clustering = ENABLE_CLUSTERING,
1360 .shost_attrs = srp_host_attrs 1477 .shost_attrs = srp_host_attrs
@@ -1365,18 +1482,17 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1365 sprintf(target->target_name, "SRP.T10:%016llX", 1482 sprintf(target->target_name, "SRP.T10:%016llX",
1366 (unsigned long long) be64_to_cpu(target->id_ext)); 1483 (unsigned long long) be64_to_cpu(target->id_ext));
1367 1484
1368 if (scsi_add_host(target->scsi_host, host->dev->dma_device)) 1485 if (scsi_add_host(target->scsi_host, host->dev->dev->dma_device))
1369 return -ENODEV; 1486 return -ENODEV;
1370 1487
1371 mutex_lock(&host->target_mutex); 1488 spin_lock(&host->target_lock);
1372 list_add_tail(&target->list, &host->target_list); 1489 list_add_tail(&target->list, &host->target_list);
1373 mutex_unlock(&host->target_mutex); 1490 spin_unlock(&host->target_lock);
1374 1491
1375 target->state = SRP_TARGET_LIVE; 1492 target->state = SRP_TARGET_LIVE;
1376 1493
1377 /* XXX: are we supposed to have a definition of SCAN_WILD_CARD ?? */
1378 scsi_scan_target(&target->scsi_host->shost_gendev, 1494 scsi_scan_target(&target->scsi_host->shost_gendev,
1379 0, target->scsi_id, ~0, 0); 1495 0, target->scsi_id, SCAN_WILD_CARD, 0);
1380 1496
1381 return 0; 1497 return 0;
1382} 1498}
@@ -1410,6 +1526,8 @@ enum {
1410 SRP_OPT_PKEY = 1 << 3, 1526 SRP_OPT_PKEY = 1 << 3,
1411 SRP_OPT_SERVICE_ID = 1 << 4, 1527 SRP_OPT_SERVICE_ID = 1 << 4,
1412 SRP_OPT_MAX_SECT = 1 << 5, 1528 SRP_OPT_MAX_SECT = 1 << 5,
1529 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
1530 SRP_OPT_IO_CLASS = 1 << 7,
1413 SRP_OPT_ALL = (SRP_OPT_ID_EXT | 1531 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
1414 SRP_OPT_IOC_GUID | 1532 SRP_OPT_IOC_GUID |
1415 SRP_OPT_DGID | 1533 SRP_OPT_DGID |
@@ -1418,13 +1536,15 @@ enum {
1418}; 1536};
1419 1537
1420static match_table_t srp_opt_tokens = { 1538static match_table_t srp_opt_tokens = {
1421 { SRP_OPT_ID_EXT, "id_ext=%s" }, 1539 { SRP_OPT_ID_EXT, "id_ext=%s" },
1422 { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, 1540 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
1423 { SRP_OPT_DGID, "dgid=%s" }, 1541 { SRP_OPT_DGID, "dgid=%s" },
1424 { SRP_OPT_PKEY, "pkey=%x" }, 1542 { SRP_OPT_PKEY, "pkey=%x" },
1425 { SRP_OPT_SERVICE_ID, "service_id=%s" }, 1543 { SRP_OPT_SERVICE_ID, "service_id=%s" },
1426 { SRP_OPT_MAX_SECT, "max_sect=%d" }, 1544 { SRP_OPT_MAX_SECT, "max_sect=%d" },
1427 { SRP_OPT_ERR, NULL } 1545 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
1546 { SRP_OPT_IO_CLASS, "io_class=%x" },
1547 { SRP_OPT_ERR, NULL }
1428}; 1548};
1429 1549
1430static int srp_parse_options(const char *buf, struct srp_target_port *target) 1550static int srp_parse_options(const char *buf, struct srp_target_port *target)
@@ -1500,6 +1620,29 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
1500 target->scsi_host->max_sectors = token; 1620 target->scsi_host->max_sectors = token;
1501 break; 1621 break;
1502 1622
1623 case SRP_OPT_MAX_CMD_PER_LUN:
1624 if (match_int(args, &token)) {
1625 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
1626 goto out;
1627 }
1628 target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE);
1629 break;
1630
1631 case SRP_OPT_IO_CLASS:
1632 if (match_hex(args, &token)) {
1633 printk(KERN_WARNING PFX "bad IO class parameter '%s' \n", p);
1634 goto out;
1635 }
1636 if (token != SRP_REV10_IB_IO_CLASS &&
1637 token != SRP_REV16A_IB_IO_CLASS) {
1638 printk(KERN_WARNING PFX "unknown IO class parameter value"
1639 " %x specified (use %x or %x).\n",
1640 token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS);
1641 goto out;
1642 }
1643 target->io_class = token;
1644 break;
1645
1503 default: 1646 default:
1504 printk(KERN_WARNING PFX "unknown parameter or missing value " 1647 printk(KERN_WARNING PFX "unknown parameter or missing value "
1505 "'%s' in target creation request\n", p); 1648 "'%s' in target creation request\n", p);
@@ -1542,6 +1685,7 @@ static ssize_t srp_create_target(struct class_device *class_dev,
1542 target = host_to_target(target_host); 1685 target = host_to_target(target_host);
1543 memset(target, 0, sizeof *target); 1686 memset(target, 0, sizeof *target);
1544 1687
1688 target->io_class = SRP_REV16A_IB_IO_CLASS;
1545 target->scsi_host = target_host; 1689 target->scsi_host = target_host;
1546 target->srp_host = host; 1690 target->srp_host = host;
1547 1691
@@ -1558,7 +1702,7 @@ static ssize_t srp_create_target(struct class_device *class_dev,
1558 if (ret) 1702 if (ret)
1559 goto err; 1703 goto err;
1560 1704
1561 ib_get_cached_gid(host->dev, host->port, 0, &target->path.sgid); 1705 ib_get_cached_gid(host->dev->dev, host->port, 0, &target->path.sgid);
1562 1706
1563 printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x " 1707 printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
1564 "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 1708 "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
@@ -1579,7 +1723,7 @@ static ssize_t srp_create_target(struct class_device *class_dev,
1579 if (ret) 1723 if (ret)
1580 goto err; 1724 goto err;
1581 1725
1582 target->cm_id = ib_create_cm_id(host->dev, srp_cm_handler, target); 1726 target->cm_id = ib_create_cm_id(host->dev->dev, srp_cm_handler, target);
1583 if (IS_ERR(target->cm_id)) { 1727 if (IS_ERR(target->cm_id)) {
1584 ret = PTR_ERR(target->cm_id); 1728 ret = PTR_ERR(target->cm_id);
1585 goto err_free; 1729 goto err_free;
@@ -1619,7 +1763,7 @@ static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
1619 struct srp_host *host = 1763 struct srp_host *host =
1620 container_of(class_dev, struct srp_host, class_dev); 1764 container_of(class_dev, struct srp_host, class_dev);
1621 1765
1622 return sprintf(buf, "%s\n", host->dev->name); 1766 return sprintf(buf, "%s\n", host->dev->dev->name);
1623} 1767}
1624 1768
1625static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 1769static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
@@ -1634,7 +1778,7 @@ static ssize_t show_port(struct class_device *class_dev, char *buf)
1634 1778
1635static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 1779static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
1636 1780
1637static struct srp_host *srp_add_port(struct ib_device *device, u8 port) 1781static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
1638{ 1782{
1639 struct srp_host *host; 1783 struct srp_host *host;
1640 1784
@@ -1643,32 +1787,21 @@ static struct srp_host *srp_add_port(struct ib_device *device, u8 port)
1643 return NULL; 1787 return NULL;
1644 1788
1645 INIT_LIST_HEAD(&host->target_list); 1789 INIT_LIST_HEAD(&host->target_list);
1646 mutex_init(&host->target_mutex); 1790 spin_lock_init(&host->target_lock);
1647 init_completion(&host->released); 1791 init_completion(&host->released);
1648 host->dev = device; 1792 host->dev = device;
1649 host->port = port; 1793 host->port = port;
1650 1794
1651 host->initiator_port_id[7] = port; 1795 host->initiator_port_id[7] = port;
1652 memcpy(host->initiator_port_id + 8, &device->node_guid, 8); 1796 memcpy(host->initiator_port_id + 8, &device->dev->node_guid, 8);
1653
1654 host->pd = ib_alloc_pd(device);
1655 if (IS_ERR(host->pd))
1656 goto err_free;
1657
1658 host->mr = ib_get_dma_mr(host->pd,
1659 IB_ACCESS_LOCAL_WRITE |
1660 IB_ACCESS_REMOTE_READ |
1661 IB_ACCESS_REMOTE_WRITE);
1662 if (IS_ERR(host->mr))
1663 goto err_pd;
1664 1797
1665 host->class_dev.class = &srp_class; 1798 host->class_dev.class = &srp_class;
1666 host->class_dev.dev = device->dma_device; 1799 host->class_dev.dev = device->dev->dma_device;
1667 snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d", 1800 snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d",
1668 device->name, port); 1801 device->dev->name, port);
1669 1802
1670 if (class_device_register(&host->class_dev)) 1803 if (class_device_register(&host->class_dev))
1671 goto err_mr; 1804 goto free_host;
1672 if (class_device_create_file(&host->class_dev, &class_device_attr_add_target)) 1805 if (class_device_create_file(&host->class_dev, &class_device_attr_add_target))
1673 goto err_class; 1806 goto err_class;
1674 if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev)) 1807 if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev))
@@ -1681,13 +1814,7 @@ static struct srp_host *srp_add_port(struct ib_device *device, u8 port)
1681err_class: 1814err_class:
1682 class_device_unregister(&host->class_dev); 1815 class_device_unregister(&host->class_dev);
1683 1816
1684err_mr: 1817free_host:
1685 ib_dereg_mr(host->mr);
1686
1687err_pd:
1688 ib_dealloc_pd(host->pd);
1689
1690err_free:
1691 kfree(host); 1818 kfree(host);
1692 1819
1693 return NULL; 1820 return NULL;
@@ -1695,15 +1822,62 @@ err_free:
1695 1822
1696static void srp_add_one(struct ib_device *device) 1823static void srp_add_one(struct ib_device *device)
1697{ 1824{
1698 struct list_head *dev_list; 1825 struct srp_device *srp_dev;
1826 struct ib_device_attr *dev_attr;
1827 struct ib_fmr_pool_param fmr_param;
1699 struct srp_host *host; 1828 struct srp_host *host;
1700 int s, e, p; 1829 int s, e, p;
1701 1830
1702 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1831 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
1703 if (!dev_list) 1832 if (!dev_attr)
1704 return; 1833 return;
1705 1834
1706 INIT_LIST_HEAD(dev_list); 1835 if (ib_query_device(device, dev_attr)) {
1836 printk(KERN_WARNING PFX "Query device failed for %s\n",
1837 device->name);
1838 goto free_attr;
1839 }
1840
1841 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
1842 if (!srp_dev)
1843 goto free_attr;
1844
1845 /*
1846 * Use the smallest page size supported by the HCA, down to a
1847 * minimum of 512 bytes (which is the smallest sector that a
1848 * SCSI command will ever carry).
1849 */
1850 srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1);
1851 srp_dev->fmr_page_size = 1 << srp_dev->fmr_page_shift;
1852 srp_dev->fmr_page_mask = ~((unsigned long) srp_dev->fmr_page_size - 1);
1853
1854 INIT_LIST_HEAD(&srp_dev->dev_list);
1855
1856 srp_dev->dev = device;
1857 srp_dev->pd = ib_alloc_pd(device);
1858 if (IS_ERR(srp_dev->pd))
1859 goto free_dev;
1860
1861 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
1862 IB_ACCESS_LOCAL_WRITE |
1863 IB_ACCESS_REMOTE_READ |
1864 IB_ACCESS_REMOTE_WRITE);
1865 if (IS_ERR(srp_dev->mr))
1866 goto err_pd;
1867
1868 memset(&fmr_param, 0, sizeof fmr_param);
1869 fmr_param.pool_size = SRP_FMR_POOL_SIZE;
1870 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
1871 fmr_param.cache = 1;
1872 fmr_param.max_pages_per_fmr = SRP_FMR_SIZE;
1873 fmr_param.page_shift = srp_dev->fmr_page_shift;
1874 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
1875 IB_ACCESS_REMOTE_WRITE |
1876 IB_ACCESS_REMOTE_READ);
1877
1878 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
1879 if (IS_ERR(srp_dev->fmr_pool))
1880 srp_dev->fmr_pool = NULL;
1707 1881
1708 if (device->node_type == IB_NODE_SWITCH) { 1882 if (device->node_type == IB_NODE_SWITCH) {
1709 s = 0; 1883 s = 0;
@@ -1714,25 +1888,35 @@ static void srp_add_one(struct ib_device *device)
1714 } 1888 }
1715 1889
1716 for (p = s; p <= e; ++p) { 1890 for (p = s; p <= e; ++p) {
1717 host = srp_add_port(device, p); 1891 host = srp_add_port(srp_dev, p);
1718 if (host) 1892 if (host)
1719 list_add_tail(&host->list, dev_list); 1893 list_add_tail(&host->list, &srp_dev->dev_list);
1720 } 1894 }
1721 1895
1722 ib_set_client_data(device, &srp_client, dev_list); 1896 ib_set_client_data(device, &srp_client, srp_dev);
1897
1898 goto free_attr;
1899
1900err_pd:
1901 ib_dealloc_pd(srp_dev->pd);
1902
1903free_dev:
1904 kfree(srp_dev);
1905
1906free_attr:
1907 kfree(dev_attr);
1723} 1908}
1724 1909
1725static void srp_remove_one(struct ib_device *device) 1910static void srp_remove_one(struct ib_device *device)
1726{ 1911{
1727 struct list_head *dev_list; 1912 struct srp_device *srp_dev;
1728 struct srp_host *host, *tmp_host; 1913 struct srp_host *host, *tmp_host;
1729 LIST_HEAD(target_list); 1914 LIST_HEAD(target_list);
1730 struct srp_target_port *target, *tmp_target; 1915 struct srp_target_port *target, *tmp_target;
1731 unsigned long flags;
1732 1916
1733 dev_list = ib_get_client_data(device, &srp_client); 1917 srp_dev = ib_get_client_data(device, &srp_client);
1734 1918
1735 list_for_each_entry_safe(host, tmp_host, dev_list, list) { 1919 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
1736 class_device_unregister(&host->class_dev); 1920 class_device_unregister(&host->class_dev);
1737 /* 1921 /*
1738 * Wait for the sysfs entry to go away, so that no new 1922 * Wait for the sysfs entry to go away, so that no new
@@ -1744,15 +1928,13 @@ static void srp_remove_one(struct ib_device *device)
1744 * Mark all target ports as removed, so we stop queueing 1928 * Mark all target ports as removed, so we stop queueing
1745 * commands and don't try to reconnect. 1929 * commands and don't try to reconnect.
1746 */ 1930 */
1747 mutex_lock(&host->target_mutex); 1931 spin_lock(&host->target_lock);
1748 list_for_each_entry_safe(target, tmp_target, 1932 list_for_each_entry(target, &host->target_list, list) {
1749 &host->target_list, list) { 1933 spin_lock_irq(target->scsi_host->host_lock);
1750 spin_lock_irqsave(target->scsi_host->host_lock, flags); 1934 target->state = SRP_TARGET_REMOVED;
1751 if (target->state != SRP_TARGET_REMOVED) 1935 spin_unlock_irq(target->scsi_host->host_lock);
1752 target->state = SRP_TARGET_REMOVED;
1753 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
1754 } 1936 }
1755 mutex_unlock(&host->target_mutex); 1937 spin_unlock(&host->target_lock);
1756 1938
1757 /* 1939 /*
1758 * Wait for any reconnection tasks that may have 1940 * Wait for any reconnection tasks that may have
@@ -1770,18 +1952,26 @@ static void srp_remove_one(struct ib_device *device)
1770 scsi_host_put(target->scsi_host); 1952 scsi_host_put(target->scsi_host);
1771 } 1953 }
1772 1954
1773 ib_dereg_mr(host->mr);
1774 ib_dealloc_pd(host->pd);
1775 kfree(host); 1955 kfree(host);
1776 } 1956 }
1777 1957
1778 kfree(dev_list); 1958 if (srp_dev->fmr_pool)
1959 ib_destroy_fmr_pool(srp_dev->fmr_pool);
1960 ib_dereg_mr(srp_dev->mr);
1961 ib_dealloc_pd(srp_dev->pd);
1962
1963 kfree(srp_dev);
1779} 1964}
1780 1965
1781static int __init srp_init_module(void) 1966static int __init srp_init_module(void)
1782{ 1967{
1783 int ret; 1968 int ret;
1784 1969
1970 srp_template.sg_tablesize = srp_sg_tablesize;
1971 srp_max_iu_len = (sizeof (struct srp_cmd) +
1972 sizeof (struct srp_indirect_buf) +
1973 srp_sg_tablesize * 16);
1974
1785 ret = class_register(&srp_class); 1975 ret = class_register(&srp_class);
1786 if (ret) { 1976 if (ret) {
1787 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); 1977 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index c5cd43aae860..5b581fb8eb0d 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -46,6 +46,7 @@
46#include <rdma/ib_verbs.h> 46#include <rdma/ib_verbs.h>
47#include <rdma/ib_sa.h> 47#include <rdma/ib_sa.h>
48#include <rdma/ib_cm.h> 48#include <rdma/ib_cm.h>
49#include <rdma/ib_fmr_pool.h>
49 50
50enum { 51enum {
51 SRP_PATH_REC_TIMEOUT_MS = 1000, 52 SRP_PATH_REC_TIMEOUT_MS = 1000,
@@ -55,20 +56,21 @@ enum {
55 SRP_DLID_REDIRECT = 2, 56 SRP_DLID_REDIRECT = 2,
56 57
57 SRP_MAX_LUN = 512, 58 SRP_MAX_LUN = 512,
58 SRP_MAX_IU_LEN = 256, 59 SRP_DEF_SG_TABLESIZE = 12,
59 60
60 SRP_RQ_SHIFT = 6, 61 SRP_RQ_SHIFT = 6,
61 SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, 62 SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
62 SRP_SQ_SIZE = SRP_RQ_SIZE - 1, 63 SRP_SQ_SIZE = SRP_RQ_SIZE - 1,
63 SRP_CQ_SIZE = SRP_SQ_SIZE + SRP_RQ_SIZE, 64 SRP_CQ_SIZE = SRP_SQ_SIZE + SRP_RQ_SIZE,
64 65
65 SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1) 66 SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1),
67
68 SRP_FMR_SIZE = 256,
69 SRP_FMR_POOL_SIZE = 1024,
70 SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4
66}; 71};
67 72
68#define SRP_OP_RECV (1 << 31) 73#define SRP_OP_RECV (1 << 31)
69#define SRP_MAX_INDIRECT ((SRP_MAX_IU_LEN - \
70 sizeof (struct srp_cmd) - \
71 sizeof (struct srp_indirect_buf)) / 16)
72 74
73enum srp_target_state { 75enum srp_target_state {
74 SRP_TARGET_LIVE, 76 SRP_TARGET_LIVE,
@@ -77,15 +79,24 @@ enum srp_target_state {
77 SRP_TARGET_REMOVED 79 SRP_TARGET_REMOVED
78}; 80};
79 81
80struct srp_host { 82struct srp_device {
81 u8 initiator_port_id[16]; 83 struct list_head dev_list;
82 struct ib_device *dev; 84 struct ib_device *dev;
83 u8 port;
84 struct ib_pd *pd; 85 struct ib_pd *pd;
85 struct ib_mr *mr; 86 struct ib_mr *mr;
87 struct ib_fmr_pool *fmr_pool;
88 int fmr_page_shift;
89 int fmr_page_size;
90 unsigned long fmr_page_mask;
91};
92
93struct srp_host {
94 u8 initiator_port_id[16];
95 struct srp_device *dev;
96 u8 port;
86 struct class_device class_dev; 97 struct class_device class_dev;
87 struct list_head target_list; 98 struct list_head target_list;
88 struct mutex target_mutex; 99 spinlock_t target_lock;
89 struct completion released; 100 struct completion released;
90 struct list_head list; 101 struct list_head list;
91}; 102};
@@ -95,6 +106,7 @@ struct srp_request {
95 struct scsi_cmnd *scmnd; 106 struct scsi_cmnd *scmnd;
96 struct srp_iu *cmd; 107 struct srp_iu *cmd;
97 struct srp_iu *tsk_mgmt; 108 struct srp_iu *tsk_mgmt;
109 struct ib_pool_fmr *fmr;
98 /* 110 /*
99 * Fake scatterlist used when scmnd->use_sg==0. Can be killed 111 * Fake scatterlist used when scmnd->use_sg==0. Can be killed
100 * when the SCSI midlayer no longer generates non-SG commands. 112 * when the SCSI midlayer no longer generates non-SG commands.
@@ -110,6 +122,7 @@ struct srp_target_port {
110 __be64 id_ext; 122 __be64 id_ext;
111 __be64 ioc_guid; 123 __be64 ioc_guid;
112 __be64 service_id; 124 __be64 service_id;
125 u16 io_class;
113 struct srp_host *srp_host; 126 struct srp_host *srp_host;
114 struct Scsi_Host *scsi_host; 127 struct Scsi_Host *scsi_host;
115 char target_name[32]; 128 char target_name[32];
@@ -126,6 +139,8 @@ struct srp_target_port {
126 int max_ti_iu_len; 139 int max_ti_iu_len;
127 s32 req_lim; 140 s32 req_lim;
128 141
142 int zero_req_lim;
143
129 unsigned rx_head; 144 unsigned rx_head;
130 struct srp_iu *rx_ring[SRP_RQ_SIZE]; 145 struct srp_iu *rx_ring[SRP_RQ_SIZE];
131 146