aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c29
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c3
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c75
-rw-r--r--drivers/infiniband/ulp/srp/Kconfig1
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c28
6 files changed, 93 insertions, 47 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 0a00ea0e887d..eb7edab0e836 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -348,6 +348,7 @@ struct ipoib_neigh {
348 struct sk_buff_head queue; 348 struct sk_buff_head queue;
349 349
350 struct neighbour *neighbour; 350 struct neighbour *neighbour;
351 struct net_device *dev;
351 352
352 struct list_head list; 353 struct list_head list;
353}; 354};
@@ -364,7 +365,8 @@ static inline struct ipoib_neigh **to_ipoib_neigh(struct neighbour *neigh)
364 INFINIBAND_ALEN, sizeof(void *)); 365 INFINIBAND_ALEN, sizeof(void *));
365} 366}
366 367
367struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh); 368struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh,
369 struct net_device *dev);
368void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh); 370void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh);
369 371
370extern struct workqueue_struct *ipoib_workqueue; 372extern struct workqueue_struct *ipoib_workqueue;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ace2345960ee..a03a65ebcf0c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -515,7 +515,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
515 struct ipoib_path *path; 515 struct ipoib_path *path;
516 struct ipoib_neigh *neigh; 516 struct ipoib_neigh *neigh;
517 517
518 neigh = ipoib_neigh_alloc(skb->dst->neighbour); 518 neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
519 if (!neigh) { 519 if (!neigh) {
520 ++dev->stats.tx_dropped; 520 ++dev->stats.tx_dropped;
521 dev_kfree_skb_any(skb); 521 dev_kfree_skb_any(skb);
@@ -690,9 +690,10 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
690 goto out; 690 goto out;
691 } 691 }
692 } else if (neigh->ah) { 692 } else if (neigh->ah) {
693 if (unlikely(memcmp(&neigh->dgid.raw, 693 if (unlikely((memcmp(&neigh->dgid.raw,
694 skb->dst->neighbour->ha + 4, 694 skb->dst->neighbour->ha + 4,
695 sizeof(union ib_gid)))) { 695 sizeof(union ib_gid))) ||
696 (neigh->dev != dev))) {
696 spin_lock(&priv->lock); 697 spin_lock(&priv->lock);
697 /* 698 /*
698 * It's safe to call ipoib_put_ah() inside 699 * It's safe to call ipoib_put_ah() inside
@@ -815,6 +816,13 @@ static void ipoib_neigh_cleanup(struct neighbour *n)
815 unsigned long flags; 816 unsigned long flags;
816 struct ipoib_ah *ah = NULL; 817 struct ipoib_ah *ah = NULL;
817 818
819 neigh = *to_ipoib_neigh(n);
820 if (neigh) {
821 priv = netdev_priv(neigh->dev);
822 ipoib_dbg(priv, "neigh_destructor for bonding device: %s\n",
823 n->dev->name);
824 } else
825 return;
818 ipoib_dbg(priv, 826 ipoib_dbg(priv,
819 "neigh_cleanup for %06x " IPOIB_GID_FMT "\n", 827 "neigh_cleanup for %06x " IPOIB_GID_FMT "\n",
820 IPOIB_QPN(n->ha), 828 IPOIB_QPN(n->ha),
@@ -822,13 +830,10 @@ static void ipoib_neigh_cleanup(struct neighbour *n)
822 830
823 spin_lock_irqsave(&priv->lock, flags); 831 spin_lock_irqsave(&priv->lock, flags);
824 832
825 neigh = *to_ipoib_neigh(n); 833 if (neigh->ah)
826 if (neigh) { 834 ah = neigh->ah;
827 if (neigh->ah) 835 list_del(&neigh->list);
828 ah = neigh->ah; 836 ipoib_neigh_free(n->dev, neigh);
829 list_del(&neigh->list);
830 ipoib_neigh_free(n->dev, neigh);
831 }
832 837
833 spin_unlock_irqrestore(&priv->lock, flags); 838 spin_unlock_irqrestore(&priv->lock, flags);
834 839
@@ -836,7 +841,8 @@ static void ipoib_neigh_cleanup(struct neighbour *n)
836 ipoib_put_ah(ah); 841 ipoib_put_ah(ah);
837} 842}
838 843
839struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour) 844struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour,
845 struct net_device *dev)
840{ 846{
841 struct ipoib_neigh *neigh; 847 struct ipoib_neigh *neigh;
842 848
@@ -845,6 +851,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour)
845 return NULL; 851 return NULL;
846 852
847 neigh->neighbour = neighbour; 853 neigh->neighbour = neighbour;
854 neigh->dev = dev;
848 *to_ipoib_neigh(neighbour) = neigh; 855 *to_ipoib_neigh(neighbour) = neigh;
849 skb_queue_head_init(&neigh->queue); 856 skb_queue_head_init(&neigh->queue);
850 ipoib_cm_set(neigh, NULL); 857 ipoib_cm_set(neigh, NULL);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 827820ec66d1..9bcfc7ad6aa6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -705,7 +705,8 @@ out:
705 if (skb->dst && 705 if (skb->dst &&
706 skb->dst->neighbour && 706 skb->dst->neighbour &&
707 !*to_ipoib_neigh(skb->dst->neighbour)) { 707 !*to_ipoib_neigh(skb->dst->neighbour)) {
708 struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour); 708 struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour,
709 skb->dev);
709 710
710 if (neigh) { 711 if (neigh) {
711 kref_get(&mcast->ah->ref); 712 kref_get(&mcast->ah->ref);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index e05690e3592f..d68798061795 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -124,17 +124,19 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
124 124
125 if (cmd_dir == ISER_DIR_OUT) { 125 if (cmd_dir == ISER_DIR_OUT) {
126 /* copy the unaligned sg the buffer which is used for RDMA */ 126 /* copy the unaligned sg the buffer which is used for RDMA */
127 struct scatterlist *sg = (struct scatterlist *)data->buf; 127 struct scatterlist *sgl = (struct scatterlist *)data->buf;
128 struct scatterlist *sg;
128 int i; 129 int i;
129 char *p, *from; 130 char *p, *from;
130 131
131 for (p = mem, i = 0; i < data->size; i++) { 132 p = mem;
132 from = kmap_atomic(sg[i].page, KM_USER0); 133 for_each_sg(sgl, sg, data->size, i) {
134 from = kmap_atomic(sg_page(sg), KM_USER0);
133 memcpy(p, 135 memcpy(p,
134 from + sg[i].offset, 136 from + sg->offset,
135 sg[i].length); 137 sg->length);
136 kunmap_atomic(from, KM_USER0); 138 kunmap_atomic(from, KM_USER0);
137 p += sg[i].length; 139 p += sg->length;
138 } 140 }
139 } 141 }
140 142
@@ -176,7 +178,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
176 178
177 if (cmd_dir == ISER_DIR_IN) { 179 if (cmd_dir == ISER_DIR_IN) {
178 char *mem; 180 char *mem;
179 struct scatterlist *sg; 181 struct scatterlist *sgl, *sg;
180 unsigned char *p, *to; 182 unsigned char *p, *to;
181 unsigned int sg_size; 183 unsigned int sg_size;
182 int i; 184 int i;
@@ -184,16 +186,17 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
184 /* copy back read RDMA to unaligned sg */ 186 /* copy back read RDMA to unaligned sg */
185 mem = mem_copy->copy_buf; 187 mem = mem_copy->copy_buf;
186 188
187 sg = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; 189 sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
188 sg_size = iser_ctask->data[ISER_DIR_IN].size; 190 sg_size = iser_ctask->data[ISER_DIR_IN].size;
189 191
190 for (p = mem, i = 0; i < sg_size; i++){ 192 p = mem;
191 to = kmap_atomic(sg[i].page, KM_SOFTIRQ0); 193 for_each_sg(sgl, sg, sg_size, i) {
192 memcpy(to + sg[i].offset, 194 to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
195 memcpy(to + sg->offset,
193 p, 196 p,
194 sg[i].length); 197 sg->length);
195 kunmap_atomic(to, KM_SOFTIRQ0); 198 kunmap_atomic(to, KM_SOFTIRQ0);
196 p += sg[i].length; 199 p += sg->length;
197 } 200 }
198 } 201 }
199 202
@@ -224,7 +227,8 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
224 struct iser_page_vec *page_vec, 227 struct iser_page_vec *page_vec,
225 struct ib_device *ibdev) 228 struct ib_device *ibdev)
226{ 229{
227 struct scatterlist *sg = (struct scatterlist *)data->buf; 230 struct scatterlist *sgl = (struct scatterlist *)data->buf;
231 struct scatterlist *sg;
228 u64 first_addr, last_addr, page; 232 u64 first_addr, last_addr, page;
229 int end_aligned; 233 int end_aligned;
230 unsigned int cur_page = 0; 234 unsigned int cur_page = 0;
@@ -232,24 +236,25 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
232 int i; 236 int i;
233 237
234 /* compute the offset of first element */ 238 /* compute the offset of first element */
235 page_vec->offset = (u64) sg[0].offset & ~MASK_4K; 239 page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
236 240
237 for (i = 0; i < data->dma_nents; i++) { 241 for_each_sg(sgl, sg, data->dma_nents, i) {
238 unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]); 242 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
239 243
240 total_sz += dma_len; 244 total_sz += dma_len;
241 245
242 first_addr = ib_sg_dma_address(ibdev, &sg[i]); 246 first_addr = ib_sg_dma_address(ibdev, sg);
243 last_addr = first_addr + dma_len; 247 last_addr = first_addr + dma_len;
244 248
245 end_aligned = !(last_addr & ~MASK_4K); 249 end_aligned = !(last_addr & ~MASK_4K);
246 250
247 /* continue to collect page fragments till aligned or SG ends */ 251 /* continue to collect page fragments till aligned or SG ends */
248 while (!end_aligned && (i + 1 < data->dma_nents)) { 252 while (!end_aligned && (i + 1 < data->dma_nents)) {
253 sg = sg_next(sg);
249 i++; 254 i++;
250 dma_len = ib_sg_dma_len(ibdev, &sg[i]); 255 dma_len = ib_sg_dma_len(ibdev, sg);
251 total_sz += dma_len; 256 total_sz += dma_len;
252 last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len; 257 last_addr = ib_sg_dma_address(ibdev, sg) + dma_len;
253 end_aligned = !(last_addr & ~MASK_4K); 258 end_aligned = !(last_addr & ~MASK_4K);
254 } 259 }
255 260
@@ -284,25 +289,26 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
284static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, 289static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
285 struct ib_device *ibdev) 290 struct ib_device *ibdev)
286{ 291{
287 struct scatterlist *sg; 292 struct scatterlist *sgl, *sg;
288 u64 end_addr, next_addr; 293 u64 end_addr, next_addr;
289 int i, cnt; 294 int i, cnt;
290 unsigned int ret_len = 0; 295 unsigned int ret_len = 0;
291 296
292 sg = (struct scatterlist *)data->buf; 297 sgl = (struct scatterlist *)data->buf;
293 298
294 for (cnt = 0, i = 0; i < data->dma_nents; i++, cnt++) { 299 cnt = 0;
300 for_each_sg(sgl, sg, data->dma_nents, i) {
295 /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " 301 /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
296 "offset: %ld sz: %ld\n", i, 302 "offset: %ld sz: %ld\n", i,
297 (unsigned long)page_to_phys(sg[i].page), 303 (unsigned long)sg_phys(sg),
298 (unsigned long)sg[i].offset, 304 (unsigned long)sg->offset,
299 (unsigned long)sg[i].length); */ 305 (unsigned long)sg->length); */
300 end_addr = ib_sg_dma_address(ibdev, &sg[i]) + 306 end_addr = ib_sg_dma_address(ibdev, sg) +
301 ib_sg_dma_len(ibdev, &sg[i]); 307 ib_sg_dma_len(ibdev, sg);
302 /* iser_dbg("Checking sg iobuf end address " 308 /* iser_dbg("Checking sg iobuf end address "
303 "0x%08lX\n", end_addr); */ 309 "0x%08lX\n", end_addr); */
304 if (i + 1 < data->dma_nents) { 310 if (i + 1 < data->dma_nents) {
305 next_addr = ib_sg_dma_address(ibdev, &sg[i+1]); 311 next_addr = ib_sg_dma_address(ibdev, sg_next(sg));
306 /* are i, i+1 fragments of the same page? */ 312 /* are i, i+1 fragments of the same page? */
307 if (end_addr == next_addr) 313 if (end_addr == next_addr)
308 continue; 314 continue;
@@ -322,15 +328,16 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
322static void iser_data_buf_dump(struct iser_data_buf *data, 328static void iser_data_buf_dump(struct iser_data_buf *data,
323 struct ib_device *ibdev) 329 struct ib_device *ibdev)
324{ 330{
325 struct scatterlist *sg = (struct scatterlist *)data->buf; 331 struct scatterlist *sgl = (struct scatterlist *)data->buf;
332 struct scatterlist *sg;
326 int i; 333 int i;
327 334
328 for (i = 0; i < data->dma_nents; i++) 335 for_each_sg(sgl, sg, data->dma_nents, i)
329 iser_err("sg[%d] dma_addr:0x%lX page:0x%p " 336 iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
330 "off:0x%x sz:0x%x dma_len:0x%x\n", 337 "off:0x%x sz:0x%x dma_len:0x%x\n",
331 i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]), 338 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
332 sg[i].page, sg[i].offset, 339 sg_page(sg), sg->offset,
333 sg[i].length, ib_sg_dma_len(ibdev, &sg[i])); 340 sg->length, ib_sg_dma_len(ibdev, sg));
334} 341}
335 342
336static void iser_dump_page_vec(struct iser_page_vec *page_vec) 343static void iser_dump_page_vec(struct iser_page_vec *page_vec)
diff --git a/drivers/infiniband/ulp/srp/Kconfig b/drivers/infiniband/ulp/srp/Kconfig
index 3432dce29520..c74ee9633041 100644
--- a/drivers/infiniband/ulp/srp/Kconfig
+++ b/drivers/infiniband/ulp/srp/Kconfig
@@ -1,6 +1,7 @@
1config INFINIBAND_SRP 1config INFINIBAND_SRP
2 tristate "InfiniBand SCSI RDMA Protocol" 2 tristate "InfiniBand SCSI RDMA Protocol"
3 depends on SCSI 3 depends on SCSI
4 select SCSI_SRP_ATTRS
4 ---help--- 5 ---help---
5 Support for the SCSI RDMA Protocol over InfiniBand. This 6 Support for the SCSI RDMA Protocol over InfiniBand. This
6 allows you to access storage devices that speak SRP over 7 allows you to access storage devices that speak SRP over
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 9ccc63886d92..950228fb009f 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -47,6 +47,7 @@
47#include <scsi/scsi_device.h> 47#include <scsi/scsi_device.h>
48#include <scsi/scsi_dbg.h> 48#include <scsi/scsi_dbg.h>
49#include <scsi/srp.h> 49#include <scsi/srp.h>
50#include <scsi/scsi_transport_srp.h>
50 51
51#include <rdma/ib_cache.h> 52#include <rdma/ib_cache.h>
52 53
@@ -86,6 +87,8 @@ static void srp_remove_one(struct ib_device *device);
86static void srp_completion(struct ib_cq *cq, void *target_ptr); 87static void srp_completion(struct ib_cq *cq, void *target_ptr);
87static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 88static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
88 89
90static struct scsi_transport_template *ib_srp_transport_template;
91
89static struct ib_client srp_client = { 92static struct ib_client srp_client = {
90 .name = "srp", 93 .name = "srp",
91 .add = srp_add_one, 94 .add = srp_add_one,
@@ -420,6 +423,7 @@ static void srp_remove_work(struct work_struct *work)
420 list_del(&target->list); 423 list_del(&target->list);
421 spin_unlock(&target->srp_host->target_lock); 424 spin_unlock(&target->srp_host->target_lock);
422 425
426 srp_remove_host(target->scsi_host);
423 scsi_remove_host(target->scsi_host); 427 scsi_remove_host(target->scsi_host);
424 ib_destroy_cm_id(target->cm_id); 428 ib_destroy_cm_id(target->cm_id);
425 srp_free_target_ib(target); 429 srp_free_target_ib(target);
@@ -1544,12 +1548,24 @@ static struct scsi_host_template srp_template = {
1544 1548
1545static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 1549static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1546{ 1550{
1551 struct srp_rport_identifiers ids;
1552 struct srp_rport *rport;
1553
1547 sprintf(target->target_name, "SRP.T10:%016llX", 1554 sprintf(target->target_name, "SRP.T10:%016llX",
1548 (unsigned long long) be64_to_cpu(target->id_ext)); 1555 (unsigned long long) be64_to_cpu(target->id_ext));
1549 1556
1550 if (scsi_add_host(target->scsi_host, host->dev->dev->dma_device)) 1557 if (scsi_add_host(target->scsi_host, host->dev->dev->dma_device))
1551 return -ENODEV; 1558 return -ENODEV;
1552 1559
1560 memcpy(ids.port_id, &target->id_ext, 8);
1561 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
1562 ids.roles = SRP_RPORT_ROLE_TARGET;
1563 rport = srp_rport_add(target->scsi_host, &ids);
1564 if (IS_ERR(rport)) {
1565 scsi_remove_host(target->scsi_host);
1566 return PTR_ERR(rport);
1567 }
1568
1553 spin_lock(&host->target_lock); 1569 spin_lock(&host->target_lock);
1554 list_add_tail(&target->list, &host->target_list); 1570 list_add_tail(&target->list, &host->target_list);
1555 spin_unlock(&host->target_lock); 1571 spin_unlock(&host->target_lock);
@@ -1775,6 +1791,7 @@ static ssize_t srp_create_target(struct class_device *class_dev,
1775 if (!target_host) 1791 if (!target_host)
1776 return -ENOMEM; 1792 return -ENOMEM;
1777 1793
1794 target_host->transportt = ib_srp_transport_template;
1778 target_host->max_lun = SRP_MAX_LUN; 1795 target_host->max_lun = SRP_MAX_LUN;
1779 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; 1796 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
1780 1797
@@ -2054,10 +2071,18 @@ static void srp_remove_one(struct ib_device *device)
2054 kfree(srp_dev); 2071 kfree(srp_dev);
2055} 2072}
2056 2073
2074static struct srp_function_template ib_srp_transport_functions = {
2075};
2076
2057static int __init srp_init_module(void) 2077static int __init srp_init_module(void)
2058{ 2078{
2059 int ret; 2079 int ret;
2060 2080
2081 ib_srp_transport_template =
2082 srp_attach_transport(&ib_srp_transport_functions);
2083 if (!ib_srp_transport_template)
2084 return -ENOMEM;
2085
2061 srp_template.sg_tablesize = srp_sg_tablesize; 2086 srp_template.sg_tablesize = srp_sg_tablesize;
2062 srp_max_iu_len = (sizeof (struct srp_cmd) + 2087 srp_max_iu_len = (sizeof (struct srp_cmd) +
2063 sizeof (struct srp_indirect_buf) + 2088 sizeof (struct srp_indirect_buf) +
@@ -2066,6 +2091,7 @@ static int __init srp_init_module(void)
2066 ret = class_register(&srp_class); 2091 ret = class_register(&srp_class);
2067 if (ret) { 2092 if (ret) {
2068 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); 2093 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
2094 srp_release_transport(ib_srp_transport_template);
2069 return ret; 2095 return ret;
2070 } 2096 }
2071 2097
@@ -2074,6 +2100,7 @@ static int __init srp_init_module(void)
2074 ret = ib_register_client(&srp_client); 2100 ret = ib_register_client(&srp_client);
2075 if (ret) { 2101 if (ret) {
2076 printk(KERN_ERR PFX "couldn't register IB client\n"); 2102 printk(KERN_ERR PFX "couldn't register IB client\n");
2103 srp_release_transport(ib_srp_transport_template);
2077 ib_sa_unregister_client(&srp_sa_client); 2104 ib_sa_unregister_client(&srp_sa_client);
2078 class_unregister(&srp_class); 2105 class_unregister(&srp_class);
2079 return ret; 2106 return ret;
@@ -2087,6 +2114,7 @@ static void __exit srp_cleanup_module(void)
2087 ib_unregister_client(&srp_client); 2114 ib_unregister_client(&srp_client);
2088 ib_sa_unregister_client(&srp_sa_client); 2115 ib_sa_unregister_client(&srp_sa_client);
2089 class_unregister(&srp_class); 2116 class_unregister(&srp_class);
2117 srp_release_transport(ib_srp_transport_template);
2090} 2118}
2091 2119
2092module_init(srp_init_module); 2120module_init(srp_init_module);