aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c194
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c37
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c34
-rw-r--r--drivers/infiniband/ulp/iser/Kconfig2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c1
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h7
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c80
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c43
10 files changed, 262 insertions, 148 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 474aa214ab57..0b8a79d53a00 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -336,6 +336,8 @@ static inline void ipoib_unregister_debugfs(void) { }
336extern int ipoib_sendq_size; 336extern int ipoib_sendq_size;
337extern int ipoib_recvq_size; 337extern int ipoib_recvq_size;
338 338
339extern struct ib_sa_client ipoib_sa_client;
340
339#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 341#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
340extern int ipoib_debug_level; 342extern int ipoib_debug_level;
341 343
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5033666b1481..f426a69d9a43 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -169,117 +169,129 @@ static int ipoib_ib_post_receives(struct net_device *dev)
169 return 0; 169 return 0;
170} 170}
171 171
172static void ipoib_ib_handle_wc(struct net_device *dev, 172static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
173 struct ib_wc *wc)
174{ 173{
175 struct ipoib_dev_priv *priv = netdev_priv(dev); 174 struct ipoib_dev_priv *priv = netdev_priv(dev);
176 unsigned int wr_id = wc->wr_id; 175 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
176 struct sk_buff *skb;
177 dma_addr_t addr;
177 178
178 ipoib_dbg_data(priv, "called: id %d, op %d, status: %d\n", 179 ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n",
179 wr_id, wc->opcode, wc->status); 180 wr_id, wc->opcode, wc->status);
180 181
181 if (wr_id & IPOIB_OP_RECV) { 182 if (unlikely(wr_id >= ipoib_recvq_size)) {
182 wr_id &= ~IPOIB_OP_RECV; 183 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
183 184 wr_id, ipoib_recvq_size);
184 if (wr_id < ipoib_recvq_size) { 185 return;
185 struct sk_buff *skb = priv->rx_ring[wr_id].skb; 186 }
186 dma_addr_t addr = priv->rx_ring[wr_id].mapping;
187
188 if (unlikely(wc->status != IB_WC_SUCCESS)) {
189 if (wc->status != IB_WC_WR_FLUSH_ERR)
190 ipoib_warn(priv, "failed recv event "
191 "(status=%d, wrid=%d vend_err %x)\n",
192 wc->status, wr_id, wc->vendor_err);
193 dma_unmap_single(priv->ca->dma_device, addr,
194 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
195 dev_kfree_skb_any(skb);
196 priv->rx_ring[wr_id].skb = NULL;
197 return;
198 }
199 187
200 /* 188 skb = priv->rx_ring[wr_id].skb;
201 * If we can't allocate a new RX buffer, dump 189 addr = priv->rx_ring[wr_id].mapping;
202 * this packet and reuse the old buffer.
203 */
204 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
205 ++priv->stats.rx_dropped;
206 goto repost;
207 }
208 190
209 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 191 if (unlikely(wc->status != IB_WC_SUCCESS)) {
210 wc->byte_len, wc->slid); 192 if (wc->status != IB_WC_WR_FLUSH_ERR)
193 ipoib_warn(priv, "failed recv event "
194 "(status=%d, wrid=%d vend_err %x)\n",
195 wc->status, wr_id, wc->vendor_err);
196 dma_unmap_single(priv->ca->dma_device, addr,
197 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
198 dev_kfree_skb_any(skb);
199 priv->rx_ring[wr_id].skb = NULL;
200 return;
201 }
211 202
212 dma_unmap_single(priv->ca->dma_device, addr, 203 /*
213 IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 204 * If we can't allocate a new RX buffer, dump
205 * this packet and reuse the old buffer.
206 */
207 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
208 ++priv->stats.rx_dropped;
209 goto repost;
210 }
214 211
215 skb_put(skb, wc->byte_len); 212 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
216 skb_pull(skb, IB_GRH_BYTES); 213 wc->byte_len, wc->slid);
217 214
218 if (wc->slid != priv->local_lid || 215 dma_unmap_single(priv->ca->dma_device, addr,
219 wc->src_qp != priv->qp->qp_num) { 216 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
220 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
221 skb->mac.raw = skb->data;
222 skb_pull(skb, IPOIB_ENCAP_LEN);
223 217
224 dev->last_rx = jiffies; 218 skb_put(skb, wc->byte_len);
225 ++priv->stats.rx_packets; 219 skb_pull(skb, IB_GRH_BYTES);
226 priv->stats.rx_bytes += skb->len;
227 220
228 skb->dev = dev; 221 if (wc->slid != priv->local_lid ||
229 /* XXX get correct PACKET_ type here */ 222 wc->src_qp != priv->qp->qp_num) {
230 skb->pkt_type = PACKET_HOST; 223 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
231 netif_rx_ni(skb); 224 skb->mac.raw = skb->data;
232 } else { 225 skb_pull(skb, IPOIB_ENCAP_LEN);
233 ipoib_dbg_data(priv, "dropping loopback packet\n");
234 dev_kfree_skb_any(skb);
235 }
236 226
237 repost: 227 dev->last_rx = jiffies;
238 if (unlikely(ipoib_ib_post_receive(dev, wr_id))) 228 ++priv->stats.rx_packets;
239 ipoib_warn(priv, "ipoib_ib_post_receive failed " 229 priv->stats.rx_bytes += skb->len;
240 "for buf %d\n", wr_id);
241 } else
242 ipoib_warn(priv, "completion event with wrid %d\n",
243 wr_id);
244 230
231 skb->dev = dev;
232 /* XXX get correct PACKET_ type here */
233 skb->pkt_type = PACKET_HOST;
234 netif_rx_ni(skb);
245 } else { 235 } else {
246 struct ipoib_tx_buf *tx_req; 236 ipoib_dbg_data(priv, "dropping loopback packet\n");
247 unsigned long flags; 237 dev_kfree_skb_any(skb);
238 }
248 239
249 if (wr_id >= ipoib_sendq_size) { 240repost:
250 ipoib_warn(priv, "completion event with wrid %d (> %d)\n", 241 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
251 wr_id, ipoib_sendq_size); 242 ipoib_warn(priv, "ipoib_ib_post_receive failed "
252 return; 243 "for buf %d\n", wr_id);
253 } 244}
254 245
255 ipoib_dbg_data(priv, "send complete, wrid %d\n", wr_id); 246static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
247{
248 struct ipoib_dev_priv *priv = netdev_priv(dev);
249 unsigned int wr_id = wc->wr_id;
250 struct ipoib_tx_buf *tx_req;
251 unsigned long flags;
256 252
257 tx_req = &priv->tx_ring[wr_id]; 253 ipoib_dbg_data(priv, "send completion: id %d, op %d, status: %d\n",
254 wr_id, wc->opcode, wc->status);
258 255
259 dma_unmap_single(priv->ca->dma_device, 256 if (unlikely(wr_id >= ipoib_sendq_size)) {
260 pci_unmap_addr(tx_req, mapping), 257 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
261 tx_req->skb->len, 258 wr_id, ipoib_sendq_size);
262 DMA_TO_DEVICE); 259 return;
260 }
263 261
264 ++priv->stats.tx_packets; 262 tx_req = &priv->tx_ring[wr_id];
265 priv->stats.tx_bytes += tx_req->skb->len;
266 263
267 dev_kfree_skb_any(tx_req->skb); 264 dma_unmap_single(priv->ca->dma_device,
265 pci_unmap_addr(tx_req, mapping),
266 tx_req->skb->len,
267 DMA_TO_DEVICE);
268 268
269 spin_lock_irqsave(&priv->tx_lock, flags); 269 ++priv->stats.tx_packets;
270 ++priv->tx_tail; 270 priv->stats.tx_bytes += tx_req->skb->len;
271 if (netif_queue_stopped(dev) &&
272 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) &&
273 priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1)
274 netif_wake_queue(dev);
275 spin_unlock_irqrestore(&priv->tx_lock, flags);
276 271
277 if (wc->status != IB_WC_SUCCESS && 272 dev_kfree_skb_any(tx_req->skb);
278 wc->status != IB_WC_WR_FLUSH_ERR) 273
279 ipoib_warn(priv, "failed send event " 274 spin_lock_irqsave(&priv->tx_lock, flags);
280 "(status=%d, wrid=%d vend_err %x)\n", 275 ++priv->tx_tail;
281 wc->status, wr_id, wc->vendor_err); 276 if (netif_queue_stopped(dev) &&
282 } 277 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) &&
278 priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1)
279 netif_wake_queue(dev);
280 spin_unlock_irqrestore(&priv->tx_lock, flags);
281
282 if (wc->status != IB_WC_SUCCESS &&
283 wc->status != IB_WC_WR_FLUSH_ERR)
284 ipoib_warn(priv, "failed send event "
285 "(status=%d, wrid=%d vend_err %x)\n",
286 wc->status, wr_id, wc->vendor_err);
287}
288
289static void ipoib_ib_handle_wc(struct net_device *dev, struct ib_wc *wc)
290{
291 if (wc->wr_id & IPOIB_OP_RECV)
292 ipoib_ib_handle_rx_wc(dev, wc);
293 else
294 ipoib_ib_handle_tx_wc(dev, wc);
283} 295}
284 296
285void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) 297void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
@@ -320,7 +332,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
320 struct ipoib_tx_buf *tx_req; 332 struct ipoib_tx_buf *tx_req;
321 dma_addr_t addr; 333 dma_addr_t addr;
322 334
323 if (skb->len > dev->mtu + INFINIBAND_ALEN) { 335 if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) {
324 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 336 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
325 skb->len, dev->mtu + INFINIBAND_ALEN); 337 skb->len, dev->mtu + INFINIBAND_ALEN);
326 ++priv->stats.tx_dropped; 338 ++priv->stats.tx_dropped;
@@ -619,8 +631,10 @@ void ipoib_ib_dev_flush(void *_dev)
619 * The device could have been brought down between the start and when 631 * The device could have been brought down between the start and when
620 * we get here, don't bring it back up if it's not configured up 632 * we get here, don't bring it back up if it's not configured up
621 */ 633 */
622 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 634 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
623 ipoib_ib_dev_up(dev); 635 ipoib_ib_dev_up(dev);
636 ipoib_mcast_restart_task(dev);
637 }
624 638
625 mutex_lock(&priv->vlan_mutex); 639 mutex_lock(&priv->vlan_mutex);
626 640
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index cf71d2a5515c..1eaf00e9862c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -40,7 +40,6 @@
40 40
41#include <linux/init.h> 41#include <linux/init.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/vmalloc.h>
44#include <linux/kernel.h> 43#include <linux/kernel.h>
45 44
46#include <linux/if_arp.h> /* For ARPHRD_xxx */ 45#include <linux/if_arp.h> /* For ARPHRD_xxx */
@@ -82,6 +81,8 @@ static const u8 ipv4_bcast_addr[] = {
82 81
83struct workqueue_struct *ipoib_workqueue; 82struct workqueue_struct *ipoib_workqueue;
84 83
84struct ib_sa_client ipoib_sa_client;
85
85static void ipoib_add_one(struct ib_device *device); 86static void ipoib_add_one(struct ib_device *device);
86static void ipoib_remove_one(struct ib_device *device); 87static void ipoib_remove_one(struct ib_device *device);
87 88
@@ -336,7 +337,8 @@ void ipoib_flush_paths(struct net_device *dev)
336 struct ipoib_path *path, *tp; 337 struct ipoib_path *path, *tp;
337 LIST_HEAD(remove_list); 338 LIST_HEAD(remove_list);
338 339
339 spin_lock_irq(&priv->lock); 340 spin_lock_irq(&priv->tx_lock);
341 spin_lock(&priv->lock);
340 342
341 list_splice(&priv->path_list, &remove_list); 343 list_splice(&priv->path_list, &remove_list);
342 INIT_LIST_HEAD(&priv->path_list); 344 INIT_LIST_HEAD(&priv->path_list);
@@ -347,12 +349,15 @@ void ipoib_flush_paths(struct net_device *dev)
347 list_for_each_entry_safe(path, tp, &remove_list, list) { 349 list_for_each_entry_safe(path, tp, &remove_list, list) {
348 if (path->query) 350 if (path->query)
349 ib_sa_cancel_query(path->query_id, path->query); 351 ib_sa_cancel_query(path->query_id, path->query);
350 spin_unlock_irq(&priv->lock); 352 spin_unlock(&priv->lock);
353 spin_unlock_irq(&priv->tx_lock);
351 wait_for_completion(&path->done); 354 wait_for_completion(&path->done);
352 path_free(dev, path); 355 path_free(dev, path);
353 spin_lock_irq(&priv->lock); 356 spin_lock_irq(&priv->tx_lock);
357 spin_lock(&priv->lock);
354 } 358 }
355 spin_unlock_irq(&priv->lock); 359 spin_unlock(&priv->lock);
360 spin_unlock_irq(&priv->tx_lock);
356} 361}
357 362
358static void path_rec_completion(int status, 363static void path_rec_completion(int status,
@@ -459,7 +464,7 @@ static int path_rec_start(struct net_device *dev,
459 init_completion(&path->done); 464 init_completion(&path->done);
460 465
461 path->query_id = 466 path->query_id =
462 ib_sa_path_rec_get(priv->ca, priv->port, 467 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
463 &path->pathrec, 468 &path->pathrec,
464 IB_SA_PATH_REC_DGID | 469 IB_SA_PATH_REC_DGID |
465 IB_SA_PATH_REC_SGID | 470 IB_SA_PATH_REC_SGID |
@@ -615,7 +620,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
615 struct ipoib_neigh *neigh; 620 struct ipoib_neigh *neigh;
616 unsigned long flags; 621 unsigned long flags;
617 622
618 if (!spin_trylock_irqsave(&priv->tx_lock, flags)) 623 if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags)))
619 return NETDEV_TX_LOCKED; 624 return NETDEV_TX_LOCKED;
620 625
621 /* 626 /*
@@ -628,7 +633,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
628 return NETDEV_TX_BUSY; 633 return NETDEV_TX_BUSY;
629 } 634 }
630 635
631 if (skb->dst && skb->dst->neighbour) { 636 if (likely(skb->dst && skb->dst->neighbour)) {
632 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { 637 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
633 ipoib_path_lookup(skb, dev); 638 ipoib_path_lookup(skb, dev);
634 goto out; 639 goto out;
@@ -1107,13 +1112,16 @@ static void ipoib_add_one(struct ib_device *device)
1107 struct ipoib_dev_priv *priv; 1112 struct ipoib_dev_priv *priv;
1108 int s, e, p; 1113 int s, e, p;
1109 1114
1115 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1116 return;
1117
1110 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1118 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1111 if (!dev_list) 1119 if (!dev_list)
1112 return; 1120 return;
1113 1121
1114 INIT_LIST_HEAD(dev_list); 1122 INIT_LIST_HEAD(dev_list);
1115 1123
1116 if (device->node_type == IB_NODE_SWITCH) { 1124 if (device->node_type == RDMA_NODE_IB_SWITCH) {
1117 s = 0; 1125 s = 0;
1118 e = 0; 1126 e = 0;
1119 } else { 1127 } else {
@@ -1137,6 +1145,9 @@ static void ipoib_remove_one(struct ib_device *device)
1137 struct ipoib_dev_priv *priv, *tmp; 1145 struct ipoib_dev_priv *priv, *tmp;
1138 struct list_head *dev_list; 1146 struct list_head *dev_list;
1139 1147
1148 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1149 return;
1150
1140 dev_list = ib_get_client_data(device, &ipoib_client); 1151 dev_list = ib_get_client_data(device, &ipoib_client);
1141 1152
1142 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1153 list_for_each_entry_safe(priv, tmp, dev_list, list) {
@@ -1181,13 +1192,16 @@ static int __init ipoib_init_module(void)
1181 goto err_fs; 1192 goto err_fs;
1182 } 1193 }
1183 1194
1195 ib_sa_register_client(&ipoib_sa_client);
1196
1184 ret = ib_register_client(&ipoib_client); 1197 ret = ib_register_client(&ipoib_client);
1185 if (ret) 1198 if (ret)
1186 goto err_wq; 1199 goto err_sa;
1187 1200
1188 return 0; 1201 return 0;
1189 1202
1190err_wq: 1203err_sa:
1204 ib_sa_unregister_client(&ipoib_sa_client);
1191 destroy_workqueue(ipoib_workqueue); 1205 destroy_workqueue(ipoib_workqueue);
1192 1206
1193err_fs: 1207err_fs:
@@ -1199,6 +1213,7 @@ err_fs:
1199static void __exit ipoib_cleanup_module(void) 1213static void __exit ipoib_cleanup_module(void)
1200{ 1214{
1201 ib_unregister_client(&ipoib_client); 1215 ib_unregister_client(&ipoib_client);
1216 ib_sa_unregister_client(&ipoib_sa_client);
1202 ipoib_unregister_debugfs(); 1217 ipoib_unregister_debugfs();
1203 destroy_workqueue(ipoib_workqueue); 1218 destroy_workqueue(ipoib_workqueue);
1204} 1219}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index ec356ce7cdcd..3faa1820f0e9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -361,7 +361,7 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
361 361
362 init_completion(&mcast->done); 362 init_completion(&mcast->done);
363 363
364 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, 364 ret = ib_sa_mcmember_rec_set(&ipoib_sa_client, priv->ca, priv->port, &rec,
365 IB_SA_MCMEMBER_REC_MGID | 365 IB_SA_MCMEMBER_REC_MGID |
366 IB_SA_MCMEMBER_REC_PORT_GID | 366 IB_SA_MCMEMBER_REC_PORT_GID |
367 IB_SA_MCMEMBER_REC_PKEY | 367 IB_SA_MCMEMBER_REC_PKEY |
@@ -472,22 +472,32 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
472 472
473 if (create) { 473 if (create) {
474 comp_mask |= 474 comp_mask |=
475 IB_SA_MCMEMBER_REC_QKEY | 475 IB_SA_MCMEMBER_REC_QKEY |
476 IB_SA_MCMEMBER_REC_SL | 476 IB_SA_MCMEMBER_REC_MTU_SELECTOR |
477 IB_SA_MCMEMBER_REC_FLOW_LABEL | 477 IB_SA_MCMEMBER_REC_MTU |
478 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 478 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS |
479 IB_SA_MCMEMBER_REC_RATE_SELECTOR |
480 IB_SA_MCMEMBER_REC_RATE |
481 IB_SA_MCMEMBER_REC_SL |
482 IB_SA_MCMEMBER_REC_FLOW_LABEL |
483 IB_SA_MCMEMBER_REC_HOP_LIMIT;
479 484
480 rec.qkey = priv->broadcast->mcmember.qkey; 485 rec.qkey = priv->broadcast->mcmember.qkey;
486 rec.mtu_selector = IB_SA_EQ;
487 rec.mtu = priv->broadcast->mcmember.mtu;
488 rec.traffic_class = priv->broadcast->mcmember.traffic_class;
489 rec.rate_selector = IB_SA_EQ;
490 rec.rate = priv->broadcast->mcmember.rate;
481 rec.sl = priv->broadcast->mcmember.sl; 491 rec.sl = priv->broadcast->mcmember.sl;
482 rec.flow_label = priv->broadcast->mcmember.flow_label; 492 rec.flow_label = priv->broadcast->mcmember.flow_label;
483 rec.traffic_class = priv->broadcast->mcmember.traffic_class; 493 rec.hop_limit = priv->broadcast->mcmember.hop_limit;
484 } 494 }
485 495
486 init_completion(&mcast->done); 496 init_completion(&mcast->done);
487 497
488 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, comp_mask, 498 ret = ib_sa_mcmember_rec_set(&ipoib_sa_client, priv->ca, priv->port,
489 mcast->backoff * 1000, GFP_ATOMIC, 499 &rec, comp_mask, mcast->backoff * 1000,
490 ipoib_mcast_join_complete, 500 GFP_ATOMIC, ipoib_mcast_join_complete,
491 mcast, &mcast->query); 501 mcast, &mcast->query);
492 502
493 if (ret < 0) { 503 if (ret < 0) {
@@ -528,7 +538,7 @@ void ipoib_mcast_join_task(void *dev_ptr)
528 priv->local_rate = attr.active_speed * 538 priv->local_rate = attr.active_speed *
529 ib_width_enum_to_int(attr.active_width); 539 ib_width_enum_to_int(attr.active_width);
530 } else 540 } else
531 ipoib_warn(priv, "ib_query_port failed\n"); 541 ipoib_warn(priv, "ib_query_port failed\n");
532 } 542 }
533 543
534 if (!priv->broadcast) { 544 if (!priv->broadcast) {
@@ -681,7 +691,7 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
681 * Just make one shot at leaving and don't wait for a reply; 691 * Just make one shot at leaving and don't wait for a reply;
682 * if we fail, too bad. 692 * if we fail, too bad.
683 */ 693 */
684 ret = ib_sa_mcmember_rec_delete(priv->ca, priv->port, &rec, 694 ret = ib_sa_mcmember_rec_delete(&ipoib_sa_client, priv->ca, priv->port, &rec,
685 IB_SA_MCMEMBER_REC_MGID | 695 IB_SA_MCMEMBER_REC_MGID |
686 IB_SA_MCMEMBER_REC_PORT_GID | 696 IB_SA_MCMEMBER_REC_PORT_GID |
687 IB_SA_MCMEMBER_REC_PKEY | 697 IB_SA_MCMEMBER_REC_PKEY |
@@ -795,7 +805,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
795 } 805 }
796 806
797 if (priv->broadcast) { 807 if (priv->broadcast) {
798 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); 808 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
799 list_add_tail(&priv->broadcast->list, &remove_list); 809 list_add_tail(&priv->broadcast->list, &remove_list);
800 priv->broadcast = NULL; 810 priv->broadcast = NULL;
801 } 811 }
diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig
index fead87d1eff9..365a1b5f19e0 100644
--- a/drivers/infiniband/ulp/iser/Kconfig
+++ b/drivers/infiniband/ulp/iser/Kconfig
@@ -1,6 +1,6 @@
1config INFINIBAND_ISER 1config INFINIBAND_ISER
2 tristate "ISCSI RDMA Protocol" 2 tristate "ISCSI RDMA Protocol"
3 depends on INFINIBAND && SCSI 3 depends on INFINIBAND && SCSI && INET
4 select SCSI_ISCSI_ATTRS 4 select SCSI_ISCSI_ATTRS
5 ---help--- 5 ---help---
6 Support for the ISCSI RDMA Protocol over InfiniBand. This 6 Support for the ISCSI RDMA Protocol over InfiniBand. This
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 1437d7ee3b19..e9cf1a9f1e1c 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -555,6 +555,7 @@ static struct scsi_host_template iscsi_iser_sht = {
555 .queuecommand = iscsi_queuecommand, 555 .queuecommand = iscsi_queuecommand,
556 .can_queue = ISCSI_XMIT_CMDS_MAX - 1, 556 .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
557 .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, 557 .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
558 .max_sectors = 1024,
558 .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN, 559 .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
559 .eh_abort_handler = iscsi_eh_abort, 560 .eh_abort_handler = iscsi_eh_abort,
560 .eh_host_reset_handler = iscsi_eh_host_reset, 561 .eh_host_reset_handler = iscsi_eh_host_reset,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 3350ba690cfe..7e1a411db2a3 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -82,8 +82,12 @@
82 __func__ , ## arg); \ 82 __func__ , ## arg); \
83 } while (0) 83 } while (0)
84 84
85#define SHIFT_4K 12
86#define SIZE_4K (1UL << SHIFT_4K)
87#define MASK_4K (~(SIZE_4K-1))
88
85 /* support upto 512KB in one RDMA */ 89 /* support upto 512KB in one RDMA */
86#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> PAGE_SHIFT) 90#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
87#define ISCSI_ISER_MAX_LUN 256 91#define ISCSI_ISER_MAX_LUN 256
88#define ISCSI_ISER_MAX_CMD_LEN 16 92#define ISCSI_ISER_MAX_CMD_LEN 16
89 93
@@ -171,6 +175,7 @@ struct iser_mem_reg {
171 u64 va; 175 u64 va;
172 u64 len; 176 u64 len;
173 void *mem_h; 177 void *mem_h;
178 int is_fmr;
174}; 179};
175 180
176struct iser_regd_buf { 181struct iser_regd_buf {
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 31950a522a1c..d0b03f426581 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -42,6 +42,7 @@
42#include "iscsi_iser.h" 42#include "iscsi_iser.h"
43 43
44#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */ 44#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
45
45/** 46/**
46 * Decrements the reference count for the 47 * Decrements the reference count for the
47 * registered buffer & releases it 48 * registered buffer & releases it
@@ -55,7 +56,7 @@ int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
55 if ((atomic_read(&regd_buf->ref_count) == 0) || 56 if ((atomic_read(&regd_buf->ref_count) == 0) ||
56 atomic_dec_and_test(&regd_buf->ref_count)) { 57 atomic_dec_and_test(&regd_buf->ref_count)) {
57 /* if we used the dma mr, unreg is just NOP */ 58 /* if we used the dma mr, unreg is just NOP */
58 if (regd_buf->reg.rkey != 0) 59 if (regd_buf->reg.is_fmr)
59 iser_unreg_mem(&regd_buf->reg); 60 iser_unreg_mem(&regd_buf->reg);
60 61
61 if (regd_buf->dma_addr) { 62 if (regd_buf->dma_addr) {
@@ -90,9 +91,9 @@ void iser_reg_single(struct iser_device *device,
90 BUG_ON(dma_mapping_error(dma_addr)); 91 BUG_ON(dma_mapping_error(dma_addr));
91 92
92 regd_buf->reg.lkey = device->mr->lkey; 93 regd_buf->reg.lkey = device->mr->lkey;
93 regd_buf->reg.rkey = 0; /* indicate there's no need to unreg */
94 regd_buf->reg.len = regd_buf->data_size; 94 regd_buf->reg.len = regd_buf->data_size;
95 regd_buf->reg.va = dma_addr; 95 regd_buf->reg.va = dma_addr;
96 regd_buf->reg.is_fmr = 0;
96 97
97 regd_buf->dma_addr = dma_addr; 98 regd_buf->dma_addr = dma_addr;
98 regd_buf->direction = direction; 99 regd_buf->direction = direction;
@@ -239,7 +240,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
239 int i; 240 int i;
240 241
241 /* compute the offset of first element */ 242 /* compute the offset of first element */
242 page_vec->offset = (u64) sg[0].offset; 243 page_vec->offset = (u64) sg[0].offset & ~MASK_4K;
243 244
244 for (i = 0; i < data->dma_nents; i++) { 245 for (i = 0; i < data->dma_nents; i++) {
245 total_sz += sg_dma_len(&sg[i]); 246 total_sz += sg_dma_len(&sg[i]);
@@ -247,21 +248,30 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
247 first_addr = sg_dma_address(&sg[i]); 248 first_addr = sg_dma_address(&sg[i]);
248 last_addr = first_addr + sg_dma_len(&sg[i]); 249 last_addr = first_addr + sg_dma_len(&sg[i]);
249 250
250 start_aligned = !(first_addr & ~PAGE_MASK); 251 start_aligned = !(first_addr & ~MASK_4K);
251 end_aligned = !(last_addr & ~PAGE_MASK); 252 end_aligned = !(last_addr & ~MASK_4K);
252 253
253 /* continue to collect page fragments till aligned or SG ends */ 254 /* continue to collect page fragments till aligned or SG ends */
254 while (!end_aligned && (i + 1 < data->dma_nents)) { 255 while (!end_aligned && (i + 1 < data->dma_nents)) {
255 i++; 256 i++;
256 total_sz += sg_dma_len(&sg[i]); 257 total_sz += sg_dma_len(&sg[i]);
257 last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]); 258 last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]);
258 end_aligned = !(last_addr & ~PAGE_MASK); 259 end_aligned = !(last_addr & ~MASK_4K);
259 } 260 }
260 261
261 first_addr = first_addr & PAGE_MASK; 262 /* handle the 1st page in the 1st DMA element */
262 263 if (cur_page == 0) {
263 for (page = first_addr; page < last_addr; page += PAGE_SIZE) 264 page = first_addr & MASK_4K;
264 page_vec->pages[cur_page++] = page; 265 page_vec->pages[cur_page] = page;
266 cur_page++;
267 page += SIZE_4K;
268 } else
269 page = first_addr;
270
271 for (; page < last_addr; page += SIZE_4K) {
272 page_vec->pages[cur_page] = page;
273 cur_page++;
274 }
265 275
266 } 276 }
267 page_vec->data_size = total_sz; 277 page_vec->data_size = total_sz;
@@ -269,8 +279,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
269 return cur_page; 279 return cur_page;
270} 280}
271 281
272#define MASK_4K ((1UL << 12) - 1) /* 0xFFF */ 282#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
273#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & MASK_4K) == 0)
274 283
275/** 284/**
276 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned 285 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
@@ -320,9 +329,9 @@ static void iser_data_buf_dump(struct iser_data_buf *data)
320 struct scatterlist *sg = (struct scatterlist *)data->buf; 329 struct scatterlist *sg = (struct scatterlist *)data->buf;
321 int i; 330 int i;
322 331
323 for (i = 0; i < data->size; i++) 332 for (i = 0; i < data->dma_nents; i++)
324 iser_err("sg[%d] dma_addr:0x%lX page:0x%p " 333 iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
325 "off:%d sz:%d dma_len:%d\n", 334 "off:0x%x sz:0x%x dma_len:0x%x\n",
326 i, (unsigned long)sg_dma_address(&sg[i]), 335 i, (unsigned long)sg_dma_address(&sg[i]),
327 sg[i].page, sg[i].offset, 336 sg[i].page, sg[i].offset,
328 sg[i].length,sg_dma_len(&sg[i])); 337 sg[i].length,sg_dma_len(&sg[i]));
@@ -352,7 +361,7 @@ static void iser_page_vec_build(struct iser_data_buf *data,
352 361
353 page_vec->length = page_vec_len; 362 page_vec->length = page_vec_len;
354 363
355 if (page_vec_len * PAGE_SIZE < page_vec->data_size) { 364 if (page_vec_len * SIZE_4K < page_vec->data_size) {
356 iser_err("page_vec too short to hold this SG\n"); 365 iser_err("page_vec too short to hold this SG\n");
357 iser_data_buf_dump(data); 366 iser_data_buf_dump(data);
358 iser_dump_page_vec(page_vec); 367 iser_dump_page_vec(page_vec);
@@ -370,15 +379,18 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
370 enum iser_data_dir cmd_dir) 379 enum iser_data_dir cmd_dir)
371{ 380{
372 struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; 381 struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
382 struct iser_device *device = ib_conn->device;
373 struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; 383 struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
374 struct iser_regd_buf *regd_buf; 384 struct iser_regd_buf *regd_buf;
375 int aligned_len; 385 int aligned_len;
376 int err; 386 int err;
387 int i;
388 struct scatterlist *sg;
377 389
378 regd_buf = &iser_ctask->rdma_regd[cmd_dir]; 390 regd_buf = &iser_ctask->rdma_regd[cmd_dir];
379 391
380 aligned_len = iser_data_buf_aligned_len(mem); 392 aligned_len = iser_data_buf_aligned_len(mem);
381 if (aligned_len != mem->size) { 393 if (aligned_len != mem->dma_nents) {
382 iser_err("rdma alignment violation %d/%d aligned\n", 394 iser_err("rdma alignment violation %d/%d aligned\n",
383 aligned_len, mem->size); 395 aligned_len, mem->size);
384 iser_data_buf_dump(mem); 396 iser_data_buf_dump(mem);
@@ -389,10 +401,38 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
389 mem = &iser_ctask->data_copy[cmd_dir]; 401 mem = &iser_ctask->data_copy[cmd_dir];
390 } 402 }
391 403
392 iser_page_vec_build(mem, ib_conn->page_vec); 404 /* if there a single dma entry, FMR is not needed */
393 err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg); 405 if (mem->dma_nents == 1) {
394 if (err) 406 sg = (struct scatterlist *)mem->buf;
395 return err; 407
408 regd_buf->reg.lkey = device->mr->lkey;
409 regd_buf->reg.rkey = device->mr->rkey;
410 regd_buf->reg.len = sg_dma_len(&sg[0]);
411 regd_buf->reg.va = sg_dma_address(&sg[0]);
412 regd_buf->reg.is_fmr = 0;
413
414 iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
415 "va: 0x%08lX sz: %ld]\n",
416 (unsigned int)regd_buf->reg.lkey,
417 (unsigned int)regd_buf->reg.rkey,
418 (unsigned long)regd_buf->reg.va,
419 (unsigned long)regd_buf->reg.len);
420 } else { /* use FMR for multiple dma entries */
421 iser_page_vec_build(mem, ib_conn->page_vec);
422 err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
423 if (err) {
424 iser_data_buf_dump(mem);
425 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
426 ntoh24(iser_ctask->desc.iscsi_header.dlength));
427 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
428 ib_conn->page_vec->data_size, ib_conn->page_vec->length,
429 ib_conn->page_vec->offset);
430 for (i=0 ; i<ib_conn->page_vec->length ; i++)
431 iser_err("page_vec[%d] = 0x%llx\n", i,
432 (unsigned long long) ib_conn->page_vec->pages[i]);
433 return err;
434 }
435 }
396 436
397 /* take a reference on this regd buf such that it will not be released * 437 /* take a reference on this regd buf such that it will not be released *
398 * (eg in send dto completion) before we get the scsi response */ 438 * (eg in send dto completion) before we get the scsi response */
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 72febf1f8ff8..ecdca7fc1e4c 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -88,8 +88,9 @@ static int iser_create_device_ib_res(struct iser_device *device)
88 iser_cq_tasklet_fn, 88 iser_cq_tasklet_fn,
89 (unsigned long)device); 89 (unsigned long)device);
90 90
91 device->mr = ib_get_dma_mr(device->pd, 91 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
92 IB_ACCESS_LOCAL_WRITE); 92 IB_ACCESS_REMOTE_WRITE |
93 IB_ACCESS_REMOTE_READ);
93 if (IS_ERR(device->mr)) 94 if (IS_ERR(device->mr))
94 goto dma_mr_err; 95 goto dma_mr_err;
95 96
@@ -150,7 +151,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
150 } 151 }
151 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1); 152 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
152 153
153 params.page_shift = PAGE_SHIFT; 154 params.page_shift = SHIFT_4K;
154 /* when the first/last SG element are not start/end * 155 /* when the first/last SG element are not start/end *
155 * page aligned, the map whould be of N+1 pages */ 156 * page aligned, the map whould be of N+1 pages */
156 params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1; 157 params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
@@ -604,8 +605,9 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
604 605
605 mem_reg->lkey = mem->fmr->lkey; 606 mem_reg->lkey = mem->fmr->lkey;
606 mem_reg->rkey = mem->fmr->rkey; 607 mem_reg->rkey = mem->fmr->rkey;
607 mem_reg->len = page_vec->length * PAGE_SIZE; 608 mem_reg->len = page_vec->length * SIZE_4K;
608 mem_reg->va = io_addr; 609 mem_reg->va = io_addr;
610 mem_reg->is_fmr = 1;
609 mem_reg->mem_h = (void *)mem; 611 mem_reg->mem_h = (void *)mem;
610 612
611 mem_reg->va += page_vec->offset; 613 mem_reg->va += page_vec->offset;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index fd8344cdc0db..44b9e5be6687 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -96,6 +96,8 @@ static struct ib_client srp_client = {
96 .remove = srp_remove_one 96 .remove = srp_remove_one
97}; 97};
98 98
99static struct ib_sa_client srp_sa_client;
100
99static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) 101static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
100{ 102{
101 return (struct srp_target_port *) host->hostdata; 103 return (struct srp_target_port *) host->hostdata;
@@ -267,7 +269,8 @@ static int srp_lookup_path(struct srp_target_port *target)
267 269
268 init_completion(&target->done); 270 init_completion(&target->done);
269 271
270 target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev->dev, 272 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
273 target->srp_host->dev->dev,
271 target->srp_host->port, 274 target->srp_host->port,
272 &target->path, 275 &target->path,
273 IB_SA_PATH_REC_DGID | 276 IB_SA_PATH_REC_DGID |
@@ -330,7 +333,7 @@ static int srp_send_req(struct srp_target_port *target)
330 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 333 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
331 SRP_BUF_FORMAT_INDIRECT); 334 SRP_BUF_FORMAT_INDIRECT);
332 /* 335 /*
333 * In the published SRP specification (draft rev. 16a), the 336 * In the published SRP specification (draft rev. 16a), the
334 * port identifier format is 8 bytes of ID extension followed 337 * port identifier format is 8 bytes of ID extension followed
335 * by 8 bytes of GUID. Older drafts put the two halves in the 338 * by 8 bytes of GUID. Older drafts put the two halves in the
336 * opposite order, so that the GUID comes first. 339 * opposite order, so that the GUID comes first.
@@ -1449,12 +1452,28 @@ static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf)
1449 return sprintf(buf, "%d\n", target->zero_req_lim); 1452 return sprintf(buf, "%d\n", target->zero_req_lim);
1450} 1453}
1451 1454
1452static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); 1455static ssize_t show_local_ib_port(struct class_device *cdev, char *buf)
1453static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); 1456{
1454static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); 1457 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1455static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 1458
1456static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); 1459 return sprintf(buf, "%d\n", target->srp_host->port);
1457static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); 1460}
1461
1462static ssize_t show_local_ib_device(struct class_device *cdev, char *buf)
1463{
1464 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1465
1466 return sprintf(buf, "%s\n", target->srp_host->dev->dev->name);
1467}
1468
1469static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
1470static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
1471static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
1472static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1473static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
1474static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
1475static CLASS_DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
1476static CLASS_DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
1458 1477
1459static struct class_device_attribute *srp_host_attrs[] = { 1478static struct class_device_attribute *srp_host_attrs[] = {
1460 &class_device_attr_id_ext, 1479 &class_device_attr_id_ext,
@@ -1463,6 +1482,8 @@ static struct class_device_attribute *srp_host_attrs[] = {
1463 &class_device_attr_pkey, 1482 &class_device_attr_pkey,
1464 &class_device_attr_dgid, 1483 &class_device_attr_dgid,
1465 &class_device_attr_zero_req_lim, 1484 &class_device_attr_zero_req_lim,
1485 &class_device_attr_local_ib_port,
1486 &class_device_attr_local_ib_device,
1466 NULL 1487 NULL
1467}; 1488};
1468 1489
@@ -1881,7 +1902,7 @@ static void srp_add_one(struct ib_device *device)
1881 if (IS_ERR(srp_dev->fmr_pool)) 1902 if (IS_ERR(srp_dev->fmr_pool))
1882 srp_dev->fmr_pool = NULL; 1903 srp_dev->fmr_pool = NULL;
1883 1904
1884 if (device->node_type == IB_NODE_SWITCH) { 1905 if (device->node_type == RDMA_NODE_IB_SWITCH) {
1885 s = 0; 1906 s = 0;
1886 e = 0; 1907 e = 0;
1887 } else { 1908 } else {
@@ -1980,9 +2001,12 @@ static int __init srp_init_module(void)
1980 return ret; 2001 return ret;
1981 } 2002 }
1982 2003
2004 ib_sa_register_client(&srp_sa_client);
2005
1983 ret = ib_register_client(&srp_client); 2006 ret = ib_register_client(&srp_client);
1984 if (ret) { 2007 if (ret) {
1985 printk(KERN_ERR PFX "couldn't register IB client\n"); 2008 printk(KERN_ERR PFX "couldn't register IB client\n");
2009 ib_sa_unregister_client(&srp_sa_client);
1986 class_unregister(&srp_class); 2010 class_unregister(&srp_class);
1987 return ret; 2011 return ret;
1988 } 2012 }
@@ -1993,6 +2017,7 @@ static int __init srp_init_module(void)
1993static void __exit srp_cleanup_module(void) 2017static void __exit srp_cleanup_module(void)
1994{ 2018{
1995 ib_unregister_client(&srp_client); 2019 ib_unregister_client(&srp_client);
2020 ib_sa_unregister_client(&srp_sa_client);
1996 class_unregister(&srp_class); 2021 class_unregister(&srp_class);
1997} 2022}
1998 2023