aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cma.c5
-rw-r--r--drivers/infiniband/core/umem.c11
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_eq.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c36
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_dma.c12
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c24
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c29
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c3
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c75
-rw-r--r--drivers/infiniband/ulp/srp/Kconfig1
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c28
15 files changed, 151 insertions, 93 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index ee946cc2576b..0751697ef984 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2803,11 +2803,12 @@ static void cma_remove_one(struct ib_device *device)
2803 2803
2804static int cma_init(void) 2804static int cma_init(void)
2805{ 2805{
2806 int ret, low, high; 2806 int ret, low, high, remaining;
2807 2807
2808 get_random_bytes(&next_port, sizeof next_port); 2808 get_random_bytes(&next_port, sizeof next_port);
2809 inet_get_local_port_range(&low, &high); 2809 inet_get_local_port_range(&low, &high);
2810 next_port = ((unsigned int) next_port % (high - low)) + low; 2810 remaining = (high - low) + 1;
2811 next_port = ((unsigned int) next_port % remaining) + low;
2811 2812
2812 cma_wq = create_singlethread_workqueue("rdma_cm"); 2813 cma_wq = create_singlethread_workqueue("rdma_cm");
2813 if (!cma_wq) 2814 if (!cma_wq)
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 2f54e29dc7a6..14159ff29408 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -55,9 +55,11 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
55 ib_dma_unmap_sg(dev, chunk->page_list, 55 ib_dma_unmap_sg(dev, chunk->page_list,
56 chunk->nents, DMA_BIDIRECTIONAL); 56 chunk->nents, DMA_BIDIRECTIONAL);
57 for (i = 0; i < chunk->nents; ++i) { 57 for (i = 0; i < chunk->nents; ++i) {
58 struct page *page = sg_page(&chunk->page_list[i]);
59
58 if (umem->writable && dirty) 60 if (umem->writable && dirty)
59 set_page_dirty_lock(chunk->page_list[i].page); 61 set_page_dirty_lock(page);
60 put_page(chunk->page_list[i].page); 62 put_page(page);
61 } 63 }
62 64
63 kfree(chunk); 65 kfree(chunk);
@@ -164,11 +166,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
164 } 166 }
165 167
166 chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK); 168 chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
169 sg_init_table(chunk->page_list, chunk->nents);
167 for (i = 0; i < chunk->nents; ++i) { 170 for (i = 0; i < chunk->nents; ++i) {
168 if (vma_list && 171 if (vma_list &&
169 !is_vm_hugetlb_page(vma_list[i + off])) 172 !is_vm_hugetlb_page(vma_list[i + off]))
170 umem->hugetlb = 0; 173 umem->hugetlb = 0;
171 chunk->page_list[i].page = page_list[i + off]; 174 sg_set_page(&chunk->page_list[i], page_list[i + off]);
172 chunk->page_list[i].offset = 0; 175 chunk->page_list[i].offset = 0;
173 chunk->page_list[i].length = PAGE_SIZE; 176 chunk->page_list[i].length = PAGE_SIZE;
174 } 177 }
@@ -179,7 +182,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
179 DMA_BIDIRECTIONAL); 182 DMA_BIDIRECTIONAL);
180 if (chunk->nmap <= 0) { 183 if (chunk->nmap <= 0) {
181 for (i = 0; i < chunk->nents; ++i) 184 for (i = 0; i < chunk->nents; ++i)
182 put_page(chunk->page_list[i].page); 185 put_page(sg_page(&chunk->page_list[i]));
183 kfree(chunk); 186 kfree(chunk);
184 187
185 ret = -ENOMEM; 188 ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 365bc5dfcc88..2d660ae189e5 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -107,7 +107,7 @@ struct ehca_sport {
107 107
108struct ehca_shca { 108struct ehca_shca {
109 struct ib_device ib_device; 109 struct ib_device ib_device;
110 struct ibmebus_dev *ibmebus_dev; 110 struct of_device *ofdev;
111 u8 num_ports; 111 u8 num_ports;
112 int hw_level; 112 int hw_level;
113 struct list_head shca_list; 113 struct list_head shca_list;
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c
index 1d41faa7a337..b4ac617a70e6 100644
--- a/drivers/infiniband/hw/ehca/ehca_eq.c
+++ b/drivers/infiniband/hw/ehca/ehca_eq.c
@@ -123,7 +123,7 @@ int ehca_create_eq(struct ehca_shca *shca,
123 123
124 /* register interrupt handlers and initialize work queues */ 124 /* register interrupt handlers and initialize work queues */
125 if (type == EHCA_EQ) { 125 if (type == EHCA_EQ) {
126 ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_eq, 126 ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
127 IRQF_DISABLED, "ehca_eq", 127 IRQF_DISABLED, "ehca_eq",
128 (void *)shca); 128 (void *)shca);
129 if (ret < 0) 129 if (ret < 0)
@@ -131,7 +131,7 @@ int ehca_create_eq(struct ehca_shca *shca,
131 131
132 tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca); 132 tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
133 } else if (type == EHCA_NEQ) { 133 } else if (type == EHCA_NEQ) {
134 ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_neq, 134 ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
135 IRQF_DISABLED, "ehca_neq", 135 IRQF_DISABLED, "ehca_neq",
136 (void *)shca); 136 (void *)shca);
137 if (ret < 0) 137 if (ret < 0)
@@ -171,7 +171,7 @@ int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
171 u64 h_ret; 171 u64 h_ret;
172 172
173 spin_lock_irqsave(&eq->spinlock, flags); 173 spin_lock_irqsave(&eq->spinlock, flags);
174 ibmebus_free_irq(NULL, eq->ist, (void *)shca); 174 ibmebus_free_irq(eq->ist, (void *)shca);
175 175
176 h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq); 176 h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
177 177
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 2f51c13d2f49..c6cd38c5321f 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -418,7 +418,7 @@ int ehca_init_device(struct ehca_shca *shca)
418 shca->ib_device.node_type = RDMA_NODE_IB_CA; 418 shca->ib_device.node_type = RDMA_NODE_IB_CA;
419 shca->ib_device.phys_port_cnt = shca->num_ports; 419 shca->ib_device.phys_port_cnt = shca->num_ports;
420 shca->ib_device.num_comp_vectors = 1; 420 shca->ib_device.num_comp_vectors = 1;
421 shca->ib_device.dma_device = &shca->ibmebus_dev->ofdev.dev; 421 shca->ib_device.dma_device = &shca->ofdev->dev;
422 shca->ib_device.query_device = ehca_query_device; 422 shca->ib_device.query_device = ehca_query_device;
423 shca->ib_device.query_port = ehca_query_port; 423 shca->ib_device.query_port = ehca_query_port;
424 shca->ib_device.query_gid = ehca_query_gid; 424 shca->ib_device.query_gid = ehca_query_gid;
@@ -593,12 +593,12 @@ static ssize_t ehca_show_##name(struct device *dev, \
593 \ 593 \
594 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \ 594 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \
595 if (!rblock) { \ 595 if (!rblock) { \
596 dev_err(dev, "Can't allocate rblock memory."); \ 596 dev_err(dev, "Can't allocate rblock memory.\n"); \
597 return 0; \ 597 return 0; \
598 } \ 598 } \
599 \ 599 \
600 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \ 600 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \
601 dev_err(dev, "Can't query device properties"); \ 601 dev_err(dev, "Can't query device properties\n"); \
602 ehca_free_fw_ctrlblock(rblock); \ 602 ehca_free_fw_ctrlblock(rblock); \
603 return 0; \ 603 return 0; \
604 } \ 604 } \
@@ -672,7 +672,7 @@ static struct attribute_group ehca_dev_attr_grp = {
672 .attrs = ehca_dev_attrs 672 .attrs = ehca_dev_attrs
673}; 673};
674 674
675static int __devinit ehca_probe(struct ibmebus_dev *dev, 675static int __devinit ehca_probe(struct of_device *dev,
676 const struct of_device_id *id) 676 const struct of_device_id *id)
677{ 677{
678 struct ehca_shca *shca; 678 struct ehca_shca *shca;
@@ -680,16 +680,16 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev,
680 struct ib_pd *ibpd; 680 struct ib_pd *ibpd;
681 int ret; 681 int ret;
682 682
683 handle = of_get_property(dev->ofdev.node, "ibm,hca-handle", NULL); 683 handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
684 if (!handle) { 684 if (!handle) {
685 ehca_gen_err("Cannot get eHCA handle for adapter: %s.", 685 ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
686 dev->ofdev.node->full_name); 686 dev->node->full_name);
687 return -ENODEV; 687 return -ENODEV;
688 } 688 }
689 689
690 if (!(*handle)) { 690 if (!(*handle)) {
691 ehca_gen_err("Wrong eHCA handle for adapter: %s.", 691 ehca_gen_err("Wrong eHCA handle for adapter: %s.",
692 dev->ofdev.node->full_name); 692 dev->node->full_name);
693 return -ENODEV; 693 return -ENODEV;
694 } 694 }
695 695
@@ -700,9 +700,9 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev,
700 } 700 }
701 mutex_init(&shca->modify_mutex); 701 mutex_init(&shca->modify_mutex);
702 702
703 shca->ibmebus_dev = dev; 703 shca->ofdev = dev;
704 shca->ipz_hca_handle.handle = *handle; 704 shca->ipz_hca_handle.handle = *handle;
705 dev->ofdev.dev.driver_data = shca; 705 dev->dev.driver_data = shca;
706 706
707 ret = ehca_sense_attributes(shca); 707 ret = ehca_sense_attributes(shca);
708 if (ret < 0) { 708 if (ret < 0) {
@@ -778,7 +778,7 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev,
778 } 778 }
779 } 779 }
780 780
781 ret = sysfs_create_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp); 781 ret = sysfs_create_group(&dev->dev.kobj, &ehca_dev_attr_grp);
782 if (ret) /* only complain; we can live without attributes */ 782 if (ret) /* only complain; we can live without attributes */
783 ehca_err(&shca->ib_device, 783 ehca_err(&shca->ib_device,
784 "Cannot create device attributes ret=%d", ret); 784 "Cannot create device attributes ret=%d", ret);
@@ -828,12 +828,12 @@ probe1:
828 return -EINVAL; 828 return -EINVAL;
829} 829}
830 830
831static int __devexit ehca_remove(struct ibmebus_dev *dev) 831static int __devexit ehca_remove(struct of_device *dev)
832{ 832{
833 struct ehca_shca *shca = dev->ofdev.dev.driver_data; 833 struct ehca_shca *shca = dev->dev.driver_data;
834 int ret; 834 int ret;
835 835
836 sysfs_remove_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp); 836 sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp);
837 837
838 if (ehca_open_aqp1 == 1) { 838 if (ehca_open_aqp1 == 1) {
839 int i; 839 int i;
@@ -884,11 +884,11 @@ static struct of_device_id ehca_device_table[] =
884 {}, 884 {},
885}; 885};
886 886
887static struct ibmebus_driver ehca_driver = { 887static struct of_platform_driver ehca_driver = {
888 .name = "ehca", 888 .name = "ehca",
889 .id_table = ehca_device_table, 889 .match_table = ehca_device_table,
890 .probe = ehca_probe, 890 .probe = ehca_probe,
891 .remove = ehca_remove, 891 .remove = ehca_remove,
892}; 892};
893 893
894void ehca_poll_eqs(unsigned long data) 894void ehca_poll_eqs(unsigned long data)
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index bb9791555f49..e239bbf54da1 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -1769,7 +1769,7 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1769 list_for_each_entry_continue( 1769 list_for_each_entry_continue(
1770 chunk, (&(pginfo->u.usr.region->chunk_list)), list) { 1770 chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
1771 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { 1771 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
1772 pgaddr = page_to_pfn(chunk->page_list[i].page) 1772 pgaddr = page_to_pfn(sg_page(&chunk->page_list[i]))
1773 << PAGE_SHIFT ; 1773 << PAGE_SHIFT ;
1774 *kpage = phys_to_abs(pgaddr + 1774 *kpage = phys_to_abs(pgaddr +
1775 (pginfo->next_hwpage * 1775 (pginfo->next_hwpage *
@@ -1825,7 +1825,7 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
1825{ 1825{
1826 int t; 1826 int t;
1827 for (t = start_idx; t <= end_idx; t++) { 1827 for (t = start_idx; t <= end_idx; t++) {
1828 u64 pgaddr = page_to_pfn(page_list[t].page) << PAGE_SHIFT; 1828 u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
1829 ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, 1829 ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
1830 *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); 1830 *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
1831 if (pgaddr - PAGE_SIZE != *prev_pgaddr) { 1831 if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
@@ -1860,7 +1860,7 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1860 chunk, (&(pginfo->u.usr.region->chunk_list)), list) { 1860 chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
1861 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { 1861 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
1862 if (nr_kpages == kpages_per_hwpage) { 1862 if (nr_kpages == kpages_per_hwpage) {
1863 pgaddr = ( page_to_pfn(chunk->page_list[i].page) 1863 pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i]))
1864 << PAGE_SHIFT ); 1864 << PAGE_SHIFT );
1865 *kpage = phys_to_abs(pgaddr); 1865 *kpage = phys_to_abs(pgaddr);
1866 if ( !(*kpage) ) { 1866 if ( !(*kpage) ) {
diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
index f87f003e3ef8..e90a0ea538a0 100644
--- a/drivers/infiniband/hw/ipath/ipath_dma.c
+++ b/drivers/infiniband/hw/ipath/ipath_dma.c
@@ -30,6 +30,7 @@
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31 */
32 32
33#include <linux/scatterlist.h>
33#include <rdma/ib_verbs.h> 34#include <rdma/ib_verbs.h>
34 35
35#include "ipath_verbs.h" 36#include "ipath_verbs.h"
@@ -96,17 +97,18 @@ static void ipath_dma_unmap_page(struct ib_device *dev,
96 BUG_ON(!valid_dma_direction(direction)); 97 BUG_ON(!valid_dma_direction(direction));
97} 98}
98 99
99static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents, 100static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
100 enum dma_data_direction direction) 101 int nents, enum dma_data_direction direction)
101{ 102{
103 struct scatterlist *sg;
102 u64 addr; 104 u64 addr;
103 int i; 105 int i;
104 int ret = nents; 106 int ret = nents;
105 107
106 BUG_ON(!valid_dma_direction(direction)); 108 BUG_ON(!valid_dma_direction(direction));
107 109
108 for (i = 0; i < nents; i++) { 110 for_each_sg(sgl, sg, nents, i) {
109 addr = (u64) page_address(sg[i].page); 111 addr = (u64) page_address(sg_page(sg));
110 /* TODO: handle highmem pages */ 112 /* TODO: handle highmem pages */
111 if (!addr) { 113 if (!addr) {
112 ret = 0; 114 ret = 0;
@@ -125,7 +127,7 @@ static void ipath_unmap_sg(struct ib_device *dev,
125 127
126static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg) 128static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
127{ 129{
128 u64 addr = (u64) page_address(sg->page); 130 u64 addr = (u64) page_address(sg_page(sg));
129 131
130 if (addr) 132 if (addr)
131 addr += sg->offset; 133 addr += sg->offset;
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index e442470a2375..db4ba92f79fc 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -225,7 +225,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
225 for (i = 0; i < chunk->nents; i++) { 225 for (i = 0; i < chunk->nents; i++) {
226 void *vaddr; 226 void *vaddr;
227 227
228 vaddr = page_address(chunk->page_list[i].page); 228 vaddr = page_address(sg_page(&chunk->page_list[i]));
229 if (!vaddr) { 229 if (!vaddr) {
230 ret = ERR_PTR(-EINVAL); 230 ret = ERR_PTR(-EINVAL);
231 goto bail; 231 goto bail;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index e61f3e626980..007b38157fc4 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -71,7 +71,7 @@ static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *
71 PCI_DMA_BIDIRECTIONAL); 71 PCI_DMA_BIDIRECTIONAL);
72 72
73 for (i = 0; i < chunk->npages; ++i) 73 for (i = 0; i < chunk->npages; ++i)
74 __free_pages(chunk->mem[i].page, 74 __free_pages(sg_page(&chunk->mem[i]),
75 get_order(chunk->mem[i].length)); 75 get_order(chunk->mem[i].length));
76} 76}
77 77
@@ -81,7 +81,7 @@ static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chun
81 81
82 for (i = 0; i < chunk->npages; ++i) { 82 for (i = 0; i < chunk->npages; ++i) {
83 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, 83 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
84 lowmem_page_address(chunk->mem[i].page), 84 lowmem_page_address(sg_page(&chunk->mem[i])),
85 sg_dma_address(&chunk->mem[i])); 85 sg_dma_address(&chunk->mem[i]));
86 } 86 }
87} 87}
@@ -107,10 +107,13 @@ void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent)
107 107
108static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) 108static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
109{ 109{
110 mem->page = alloc_pages(gfp_mask, order); 110 struct page *page;
111 if (!mem->page) 111
112 page = alloc_pages(gfp_mask, order);
113 if (!page)
112 return -ENOMEM; 114 return -ENOMEM;
113 115
116 sg_set_page(mem, page);
114 mem->length = PAGE_SIZE << order; 117 mem->length = PAGE_SIZE << order;
115 mem->offset = 0; 118 mem->offset = 0;
116 return 0; 119 return 0;
@@ -157,6 +160,7 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
157 if (!chunk) 160 if (!chunk)
158 goto fail; 161 goto fail;
159 162
163 sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN);
160 chunk->npages = 0; 164 chunk->npages = 0;
161 chunk->nsg = 0; 165 chunk->nsg = 0;
162 list_add_tail(&chunk->list, &icm->chunk_list); 166 list_add_tail(&chunk->list, &icm->chunk_list);
@@ -304,7 +308,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_h
304 * so if we found the page, dma_handle has already 308 * so if we found the page, dma_handle has already
305 * been assigned to. */ 309 * been assigned to. */
306 if (chunk->mem[i].length > offset) { 310 if (chunk->mem[i].length > offset) {
307 page = chunk->mem[i].page; 311 page = sg_page(&chunk->mem[i]);
308 goto out; 312 goto out;
309 } 313 }
310 offset -= chunk->mem[i].length; 314 offset -= chunk->mem[i].length;
@@ -445,6 +449,7 @@ static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int pag
445int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, 449int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
446 struct mthca_user_db_table *db_tab, int index, u64 uaddr) 450 struct mthca_user_db_table *db_tab, int index, u64 uaddr)
447{ 451{
452 struct page *pages[1];
448 int ret = 0; 453 int ret = 0;
449 u8 status; 454 u8 status;
450 int i; 455 int i;
@@ -472,16 +477,17 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
472 } 477 }
473 478
474 ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0, 479 ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
475 &db_tab->page[i].mem.page, NULL); 480 pages, NULL);
476 if (ret < 0) 481 if (ret < 0)
477 goto out; 482 goto out;
478 483
484 sg_set_page(&db_tab->page[i].mem, pages[0]);
479 db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE; 485 db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE;
480 db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK; 486 db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK;
481 487
482 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 488 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
483 if (ret < 0) { 489 if (ret < 0) {
484 put_page(db_tab->page[i].mem.page); 490 put_page(pages[0]);
485 goto out; 491 goto out;
486 } 492 }
487 493
@@ -491,7 +497,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
491 ret = -EINVAL; 497 ret = -EINVAL;
492 if (ret) { 498 if (ret) {
493 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 499 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
494 put_page(db_tab->page[i].mem.page); 500 put_page(sg_page(&db_tab->page[i].mem));
495 goto out; 501 goto out;
496 } 502 }
497 503
@@ -557,7 +563,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
557 if (db_tab->page[i].uvirt) { 563 if (db_tab->page[i].uvirt) {
558 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status); 564 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
559 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 565 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
560 put_page(db_tab->page[i].mem.page); 566 put_page(sg_page(&db_tab->page[i].mem));
561 } 567 }
562 } 568 }
563 569
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 0a00ea0e887d..eb7edab0e836 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -348,6 +348,7 @@ struct ipoib_neigh {
348 struct sk_buff_head queue; 348 struct sk_buff_head queue;
349 349
350 struct neighbour *neighbour; 350 struct neighbour *neighbour;
351 struct net_device *dev;
351 352
352 struct list_head list; 353 struct list_head list;
353}; 354};
@@ -364,7 +365,8 @@ static inline struct ipoib_neigh **to_ipoib_neigh(struct neighbour *neigh)
364 INFINIBAND_ALEN, sizeof(void *)); 365 INFINIBAND_ALEN, sizeof(void *));
365} 366}
366 367
367struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh); 368struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh,
369 struct net_device *dev);
368void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh); 370void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh);
369 371
370extern struct workqueue_struct *ipoib_workqueue; 372extern struct workqueue_struct *ipoib_workqueue;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ace2345960ee..a03a65ebcf0c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -515,7 +515,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
515 struct ipoib_path *path; 515 struct ipoib_path *path;
516 struct ipoib_neigh *neigh; 516 struct ipoib_neigh *neigh;
517 517
518 neigh = ipoib_neigh_alloc(skb->dst->neighbour); 518 neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
519 if (!neigh) { 519 if (!neigh) {
520 ++dev->stats.tx_dropped; 520 ++dev->stats.tx_dropped;
521 dev_kfree_skb_any(skb); 521 dev_kfree_skb_any(skb);
@@ -690,9 +690,10 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
690 goto out; 690 goto out;
691 } 691 }
692 } else if (neigh->ah) { 692 } else if (neigh->ah) {
693 if (unlikely(memcmp(&neigh->dgid.raw, 693 if (unlikely((memcmp(&neigh->dgid.raw,
694 skb->dst->neighbour->ha + 4, 694 skb->dst->neighbour->ha + 4,
695 sizeof(union ib_gid)))) { 695 sizeof(union ib_gid))) ||
696 (neigh->dev != dev))) {
696 spin_lock(&priv->lock); 697 spin_lock(&priv->lock);
697 /* 698 /*
698 * It's safe to call ipoib_put_ah() inside 699 * It's safe to call ipoib_put_ah() inside
@@ -815,6 +816,13 @@ static void ipoib_neigh_cleanup(struct neighbour *n)
815 unsigned long flags; 816 unsigned long flags;
816 struct ipoib_ah *ah = NULL; 817 struct ipoib_ah *ah = NULL;
817 818
819 neigh = *to_ipoib_neigh(n);
820 if (neigh) {
821 priv = netdev_priv(neigh->dev);
822 ipoib_dbg(priv, "neigh_destructor for bonding device: %s\n",
823 n->dev->name);
824 } else
825 return;
818 ipoib_dbg(priv, 826 ipoib_dbg(priv,
819 "neigh_cleanup for %06x " IPOIB_GID_FMT "\n", 827 "neigh_cleanup for %06x " IPOIB_GID_FMT "\n",
820 IPOIB_QPN(n->ha), 828 IPOIB_QPN(n->ha),
@@ -822,13 +830,10 @@ static void ipoib_neigh_cleanup(struct neighbour *n)
822 830
823 spin_lock_irqsave(&priv->lock, flags); 831 spin_lock_irqsave(&priv->lock, flags);
824 832
825 neigh = *to_ipoib_neigh(n); 833 if (neigh->ah)
826 if (neigh) { 834 ah = neigh->ah;
827 if (neigh->ah) 835 list_del(&neigh->list);
828 ah = neigh->ah; 836 ipoib_neigh_free(n->dev, neigh);
829 list_del(&neigh->list);
830 ipoib_neigh_free(n->dev, neigh);
831 }
832 837
833 spin_unlock_irqrestore(&priv->lock, flags); 838 spin_unlock_irqrestore(&priv->lock, flags);
834 839
@@ -836,7 +841,8 @@ static void ipoib_neigh_cleanup(struct neighbour *n)
836 ipoib_put_ah(ah); 841 ipoib_put_ah(ah);
837} 842}
838 843
839struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour) 844struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour,
845 struct net_device *dev)
840{ 846{
841 struct ipoib_neigh *neigh; 847 struct ipoib_neigh *neigh;
842 848
@@ -845,6 +851,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour)
845 return NULL; 851 return NULL;
846 852
847 neigh->neighbour = neighbour; 853 neigh->neighbour = neighbour;
854 neigh->dev = dev;
848 *to_ipoib_neigh(neighbour) = neigh; 855 *to_ipoib_neigh(neighbour) = neigh;
849 skb_queue_head_init(&neigh->queue); 856 skb_queue_head_init(&neigh->queue);
850 ipoib_cm_set(neigh, NULL); 857 ipoib_cm_set(neigh, NULL);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 827820ec66d1..9bcfc7ad6aa6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -705,7 +705,8 @@ out:
705 if (skb->dst && 705 if (skb->dst &&
706 skb->dst->neighbour && 706 skb->dst->neighbour &&
707 !*to_ipoib_neigh(skb->dst->neighbour)) { 707 !*to_ipoib_neigh(skb->dst->neighbour)) {
708 struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour); 708 struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour,
709 skb->dev);
709 710
710 if (neigh) { 711 if (neigh) {
711 kref_get(&mcast->ah->ref); 712 kref_get(&mcast->ah->ref);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index e05690e3592f..d68798061795 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -124,17 +124,19 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
124 124
125 if (cmd_dir == ISER_DIR_OUT) { 125 if (cmd_dir == ISER_DIR_OUT) {
126 /* copy the unaligned sg the buffer which is used for RDMA */ 126 /* copy the unaligned sg the buffer which is used for RDMA */
127 struct scatterlist *sg = (struct scatterlist *)data->buf; 127 struct scatterlist *sgl = (struct scatterlist *)data->buf;
128 struct scatterlist *sg;
128 int i; 129 int i;
129 char *p, *from; 130 char *p, *from;
130 131
131 for (p = mem, i = 0; i < data->size; i++) { 132 p = mem;
132 from = kmap_atomic(sg[i].page, KM_USER0); 133 for_each_sg(sgl, sg, data->size, i) {
134 from = kmap_atomic(sg_page(sg), KM_USER0);
133 memcpy(p, 135 memcpy(p,
134 from + sg[i].offset, 136 from + sg->offset,
135 sg[i].length); 137 sg->length);
136 kunmap_atomic(from, KM_USER0); 138 kunmap_atomic(from, KM_USER0);
137 p += sg[i].length; 139 p += sg->length;
138 } 140 }
139 } 141 }
140 142
@@ -176,7 +178,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
176 178
177 if (cmd_dir == ISER_DIR_IN) { 179 if (cmd_dir == ISER_DIR_IN) {
178 char *mem; 180 char *mem;
179 struct scatterlist *sg; 181 struct scatterlist *sgl, *sg;
180 unsigned char *p, *to; 182 unsigned char *p, *to;
181 unsigned int sg_size; 183 unsigned int sg_size;
182 int i; 184 int i;
@@ -184,16 +186,17 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
184 /* copy back read RDMA to unaligned sg */ 186 /* copy back read RDMA to unaligned sg */
185 mem = mem_copy->copy_buf; 187 mem = mem_copy->copy_buf;
186 188
187 sg = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; 189 sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
188 sg_size = iser_ctask->data[ISER_DIR_IN].size; 190 sg_size = iser_ctask->data[ISER_DIR_IN].size;
189 191
190 for (p = mem, i = 0; i < sg_size; i++){ 192 p = mem;
191 to = kmap_atomic(sg[i].page, KM_SOFTIRQ0); 193 for_each_sg(sgl, sg, sg_size, i) {
192 memcpy(to + sg[i].offset, 194 to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
195 memcpy(to + sg->offset,
193 p, 196 p,
194 sg[i].length); 197 sg->length);
195 kunmap_atomic(to, KM_SOFTIRQ0); 198 kunmap_atomic(to, KM_SOFTIRQ0);
196 p += sg[i].length; 199 p += sg->length;
197 } 200 }
198 } 201 }
199 202
@@ -224,7 +227,8 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
224 struct iser_page_vec *page_vec, 227 struct iser_page_vec *page_vec,
225 struct ib_device *ibdev) 228 struct ib_device *ibdev)
226{ 229{
227 struct scatterlist *sg = (struct scatterlist *)data->buf; 230 struct scatterlist *sgl = (struct scatterlist *)data->buf;
231 struct scatterlist *sg;
228 u64 first_addr, last_addr, page; 232 u64 first_addr, last_addr, page;
229 int end_aligned; 233 int end_aligned;
230 unsigned int cur_page = 0; 234 unsigned int cur_page = 0;
@@ -232,24 +236,25 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
232 int i; 236 int i;
233 237
234 /* compute the offset of first element */ 238 /* compute the offset of first element */
235 page_vec->offset = (u64) sg[0].offset & ~MASK_4K; 239 page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
236 240
237 for (i = 0; i < data->dma_nents; i++) { 241 for_each_sg(sgl, sg, data->dma_nents, i) {
238 unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]); 242 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
239 243
240 total_sz += dma_len; 244 total_sz += dma_len;
241 245
242 first_addr = ib_sg_dma_address(ibdev, &sg[i]); 246 first_addr = ib_sg_dma_address(ibdev, sg);
243 last_addr = first_addr + dma_len; 247 last_addr = first_addr + dma_len;
244 248
245 end_aligned = !(last_addr & ~MASK_4K); 249 end_aligned = !(last_addr & ~MASK_4K);
246 250
247 /* continue to collect page fragments till aligned or SG ends */ 251 /* continue to collect page fragments till aligned or SG ends */
248 while (!end_aligned && (i + 1 < data->dma_nents)) { 252 while (!end_aligned && (i + 1 < data->dma_nents)) {
253 sg = sg_next(sg);
249 i++; 254 i++;
250 dma_len = ib_sg_dma_len(ibdev, &sg[i]); 255 dma_len = ib_sg_dma_len(ibdev, sg);
251 total_sz += dma_len; 256 total_sz += dma_len;
252 last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len; 257 last_addr = ib_sg_dma_address(ibdev, sg) + dma_len;
253 end_aligned = !(last_addr & ~MASK_4K); 258 end_aligned = !(last_addr & ~MASK_4K);
254 } 259 }
255 260
@@ -284,25 +289,26 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
284static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, 289static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
285 struct ib_device *ibdev) 290 struct ib_device *ibdev)
286{ 291{
287 struct scatterlist *sg; 292 struct scatterlist *sgl, *sg;
288 u64 end_addr, next_addr; 293 u64 end_addr, next_addr;
289 int i, cnt; 294 int i, cnt;
290 unsigned int ret_len = 0; 295 unsigned int ret_len = 0;
291 296
292 sg = (struct scatterlist *)data->buf; 297 sgl = (struct scatterlist *)data->buf;
293 298
294 for (cnt = 0, i = 0; i < data->dma_nents; i++, cnt++) { 299 cnt = 0;
300 for_each_sg(sgl, sg, data->dma_nents, i) {
295 /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " 301 /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
296 "offset: %ld sz: %ld\n", i, 302 "offset: %ld sz: %ld\n", i,
297 (unsigned long)page_to_phys(sg[i].page), 303 (unsigned long)sg_phys(sg),
298 (unsigned long)sg[i].offset, 304 (unsigned long)sg->offset,
299 (unsigned long)sg[i].length); */ 305 (unsigned long)sg->length); */
300 end_addr = ib_sg_dma_address(ibdev, &sg[i]) + 306 end_addr = ib_sg_dma_address(ibdev, sg) +
301 ib_sg_dma_len(ibdev, &sg[i]); 307 ib_sg_dma_len(ibdev, sg);
302 /* iser_dbg("Checking sg iobuf end address " 308 /* iser_dbg("Checking sg iobuf end address "
303 "0x%08lX\n", end_addr); */ 309 "0x%08lX\n", end_addr); */
304 if (i + 1 < data->dma_nents) { 310 if (i + 1 < data->dma_nents) {
305 next_addr = ib_sg_dma_address(ibdev, &sg[i+1]); 311 next_addr = ib_sg_dma_address(ibdev, sg_next(sg));
306 /* are i, i+1 fragments of the same page? */ 312 /* are i, i+1 fragments of the same page? */
307 if (end_addr == next_addr) 313 if (end_addr == next_addr)
308 continue; 314 continue;
@@ -322,15 +328,16 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
322static void iser_data_buf_dump(struct iser_data_buf *data, 328static void iser_data_buf_dump(struct iser_data_buf *data,
323 struct ib_device *ibdev) 329 struct ib_device *ibdev)
324{ 330{
325 struct scatterlist *sg = (struct scatterlist *)data->buf; 331 struct scatterlist *sgl = (struct scatterlist *)data->buf;
332 struct scatterlist *sg;
326 int i; 333 int i;
327 334
328 for (i = 0; i < data->dma_nents; i++) 335 for_each_sg(sgl, sg, data->dma_nents, i)
329 iser_err("sg[%d] dma_addr:0x%lX page:0x%p " 336 iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
330 "off:0x%x sz:0x%x dma_len:0x%x\n", 337 "off:0x%x sz:0x%x dma_len:0x%x\n",
331 i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]), 338 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
332 sg[i].page, sg[i].offset, 339 sg_page(sg), sg->offset,
333 sg[i].length, ib_sg_dma_len(ibdev, &sg[i])); 340 sg->length, ib_sg_dma_len(ibdev, sg));
334} 341}
335 342
336static void iser_dump_page_vec(struct iser_page_vec *page_vec) 343static void iser_dump_page_vec(struct iser_page_vec *page_vec)
diff --git a/drivers/infiniband/ulp/srp/Kconfig b/drivers/infiniband/ulp/srp/Kconfig
index 3432dce29520..c74ee9633041 100644
--- a/drivers/infiniband/ulp/srp/Kconfig
+++ b/drivers/infiniband/ulp/srp/Kconfig
@@ -1,6 +1,7 @@
1config INFINIBAND_SRP 1config INFINIBAND_SRP
2 tristate "InfiniBand SCSI RDMA Protocol" 2 tristate "InfiniBand SCSI RDMA Protocol"
3 depends on SCSI 3 depends on SCSI
4 select SCSI_SRP_ATTRS
4 ---help--- 5 ---help---
5 Support for the SCSI RDMA Protocol over InfiniBand. This 6 Support for the SCSI RDMA Protocol over InfiniBand. This
6 allows you to access storage devices that speak SRP over 7 allows you to access storage devices that speak SRP over
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 9ccc63886d92..950228fb009f 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -47,6 +47,7 @@
47#include <scsi/scsi_device.h> 47#include <scsi/scsi_device.h>
48#include <scsi/scsi_dbg.h> 48#include <scsi/scsi_dbg.h>
49#include <scsi/srp.h> 49#include <scsi/srp.h>
50#include <scsi/scsi_transport_srp.h>
50 51
51#include <rdma/ib_cache.h> 52#include <rdma/ib_cache.h>
52 53
@@ -86,6 +87,8 @@ static void srp_remove_one(struct ib_device *device);
86static void srp_completion(struct ib_cq *cq, void *target_ptr); 87static void srp_completion(struct ib_cq *cq, void *target_ptr);
87static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 88static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
88 89
90static struct scsi_transport_template *ib_srp_transport_template;
91
89static struct ib_client srp_client = { 92static struct ib_client srp_client = {
90 .name = "srp", 93 .name = "srp",
91 .add = srp_add_one, 94 .add = srp_add_one,
@@ -420,6 +423,7 @@ static void srp_remove_work(struct work_struct *work)
420 list_del(&target->list); 423 list_del(&target->list);
421 spin_unlock(&target->srp_host->target_lock); 424 spin_unlock(&target->srp_host->target_lock);
422 425
426 srp_remove_host(target->scsi_host);
423 scsi_remove_host(target->scsi_host); 427 scsi_remove_host(target->scsi_host);
424 ib_destroy_cm_id(target->cm_id); 428 ib_destroy_cm_id(target->cm_id);
425 srp_free_target_ib(target); 429 srp_free_target_ib(target);
@@ -1544,12 +1548,24 @@ static struct scsi_host_template srp_template = {
1544 1548
1545static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 1549static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1546{ 1550{
1551 struct srp_rport_identifiers ids;
1552 struct srp_rport *rport;
1553
1547 sprintf(target->target_name, "SRP.T10:%016llX", 1554 sprintf(target->target_name, "SRP.T10:%016llX",
1548 (unsigned long long) be64_to_cpu(target->id_ext)); 1555 (unsigned long long) be64_to_cpu(target->id_ext));
1549 1556
1550 if (scsi_add_host(target->scsi_host, host->dev->dev->dma_device)) 1557 if (scsi_add_host(target->scsi_host, host->dev->dev->dma_device))
1551 return -ENODEV; 1558 return -ENODEV;
1552 1559
1560 memcpy(ids.port_id, &target->id_ext, 8);
1561 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
1562 ids.roles = SRP_RPORT_ROLE_TARGET;
1563 rport = srp_rport_add(target->scsi_host, &ids);
1564 if (IS_ERR(rport)) {
1565 scsi_remove_host(target->scsi_host);
1566 return PTR_ERR(rport);
1567 }
1568
1553 spin_lock(&host->target_lock); 1569 spin_lock(&host->target_lock);
1554 list_add_tail(&target->list, &host->target_list); 1570 list_add_tail(&target->list, &host->target_list);
1555 spin_unlock(&host->target_lock); 1571 spin_unlock(&host->target_lock);
@@ -1775,6 +1791,7 @@ static ssize_t srp_create_target(struct class_device *class_dev,
1775 if (!target_host) 1791 if (!target_host)
1776 return -ENOMEM; 1792 return -ENOMEM;
1777 1793
1794 target_host->transportt = ib_srp_transport_template;
1778 target_host->max_lun = SRP_MAX_LUN; 1795 target_host->max_lun = SRP_MAX_LUN;
1779 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; 1796 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
1780 1797
@@ -2054,10 +2071,18 @@ static void srp_remove_one(struct ib_device *device)
2054 kfree(srp_dev); 2071 kfree(srp_dev);
2055} 2072}
2056 2073
2074static struct srp_function_template ib_srp_transport_functions = {
2075};
2076
2057static int __init srp_init_module(void) 2077static int __init srp_init_module(void)
2058{ 2078{
2059 int ret; 2079 int ret;
2060 2080
2081 ib_srp_transport_template =
2082 srp_attach_transport(&ib_srp_transport_functions);
2083 if (!ib_srp_transport_template)
2084 return -ENOMEM;
2085
2061 srp_template.sg_tablesize = srp_sg_tablesize; 2086 srp_template.sg_tablesize = srp_sg_tablesize;
2062 srp_max_iu_len = (sizeof (struct srp_cmd) + 2087 srp_max_iu_len = (sizeof (struct srp_cmd) +
2063 sizeof (struct srp_indirect_buf) + 2088 sizeof (struct srp_indirect_buf) +
@@ -2066,6 +2091,7 @@ static int __init srp_init_module(void)
2066 ret = class_register(&srp_class); 2091 ret = class_register(&srp_class);
2067 if (ret) { 2092 if (ret) {
2068 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); 2093 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
2094 srp_release_transport(ib_srp_transport_template);
2069 return ret; 2095 return ret;
2070 } 2096 }
2071 2097
@@ -2074,6 +2100,7 @@ static int __init srp_init_module(void)
2074 ret = ib_register_client(&srp_client); 2100 ret = ib_register_client(&srp_client);
2075 if (ret) { 2101 if (ret) {
2076 printk(KERN_ERR PFX "couldn't register IB client\n"); 2102 printk(KERN_ERR PFX "couldn't register IB client\n");
2103 srp_release_transport(ib_srp_transport_template);
2077 ib_sa_unregister_client(&srp_sa_client); 2104 ib_sa_unregister_client(&srp_sa_client);
2078 class_unregister(&srp_class); 2105 class_unregister(&srp_class);
2079 return ret; 2106 return ret;
@@ -2087,6 +2114,7 @@ static void __exit srp_cleanup_module(void)
2087 ib_unregister_client(&srp_client); 2114 ib_unregister_client(&srp_client);
2088 ib_sa_unregister_client(&srp_sa_client); 2115 ib_sa_unregister_client(&srp_sa_client);
2089 class_unregister(&srp_class); 2116 class_unregister(&srp_class);
2117 srp_release_transport(ib_srp_transport_template);
2090} 2118}
2091 2119
2092module_init(srp_init_module); 2120module_init(srp_init_module);