aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-11-02 17:36:05 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-11-02 17:36:05 -0500
commit40eb006685387b2861bd7196be0ab7144c5d5b71 (patch)
treec43ba94aa8632127ce9433dce0832e4e2734a2b4
parente957b00a8caece90cbc2afd0a4cb4c4d61b9efa8 (diff)
parent7a118df3ea23820b9922a1b51cd2f24e464f4c17 (diff)
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: RDMA/addr: Use client registration to fix module unload race IB/mthca: Fix MAD extended header format for MAD_IFC firmware command IB/uverbs: Return sq_draining value in query_qp response IB/amso1100: Fix incorrect pr_debug() IB/amso1100: Use dma_alloc_coherent() instead of kmalloc/dma_map_single IB/ehca: Fix eHCA driver compilation for uniprocessor RDMA/cma: rdma_bind_addr() leaks a cma_dev reference count IB/iser: Start connection after enabling iSER
-rw-r--r--drivers/infiniband/core/addr.c28
-rw-r--r--drivers/infiniband/core/cma.c31
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_alloc.c13
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c18
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c56
-rw-r--r--drivers/infiniband/hw/ehca/ehca_tools.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c14
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c4
-rw-r--r--include/rdma/ib_addr.h20
-rw-r--r--include/rdma/ib_user_verbs.h2
11 files changed, 114 insertions, 75 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 60d3fbdd216c..e11187ecc931 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -47,6 +47,7 @@ struct addr_req {
47 struct sockaddr src_addr; 47 struct sockaddr src_addr;
48 struct sockaddr dst_addr; 48 struct sockaddr dst_addr;
49 struct rdma_dev_addr *addr; 49 struct rdma_dev_addr *addr;
50 struct rdma_addr_client *client;
50 void *context; 51 void *context;
51 void (*callback)(int status, struct sockaddr *src_addr, 52 void (*callback)(int status, struct sockaddr *src_addr,
52 struct rdma_dev_addr *addr, void *context); 53 struct rdma_dev_addr *addr, void *context);
@@ -61,6 +62,26 @@ static LIST_HEAD(req_list);
61static DECLARE_WORK(work, process_req, NULL); 62static DECLARE_WORK(work, process_req, NULL);
62static struct workqueue_struct *addr_wq; 63static struct workqueue_struct *addr_wq;
63 64
65void rdma_addr_register_client(struct rdma_addr_client *client)
66{
67 atomic_set(&client->refcount, 1);
68 init_completion(&client->comp);
69}
70EXPORT_SYMBOL(rdma_addr_register_client);
71
72static inline void put_client(struct rdma_addr_client *client)
73{
74 if (atomic_dec_and_test(&client->refcount))
75 complete(&client->comp);
76}
77
78void rdma_addr_unregister_client(struct rdma_addr_client *client)
79{
80 put_client(client);
81 wait_for_completion(&client->comp);
82}
83EXPORT_SYMBOL(rdma_addr_unregister_client);
84
64int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, 85int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
65 const unsigned char *dst_dev_addr) 86 const unsigned char *dst_dev_addr)
66{ 87{
@@ -229,6 +250,7 @@ static void process_req(void *data)
229 list_del(&req->list); 250 list_del(&req->list);
230 req->callback(req->status, &req->src_addr, req->addr, 251 req->callback(req->status, &req->src_addr, req->addr,
231 req->context); 252 req->context);
253 put_client(req->client);
232 kfree(req); 254 kfree(req);
233 } 255 }
234} 256}
@@ -264,7 +286,8 @@ static int addr_resolve_local(struct sockaddr_in *src_in,
264 return ret; 286 return ret;
265} 287}
266 288
267int rdma_resolve_ip(struct sockaddr *src_addr, struct sockaddr *dst_addr, 289int rdma_resolve_ip(struct rdma_addr_client *client,
290 struct sockaddr *src_addr, struct sockaddr *dst_addr,
268 struct rdma_dev_addr *addr, int timeout_ms, 291 struct rdma_dev_addr *addr, int timeout_ms,
269 void (*callback)(int status, struct sockaddr *src_addr, 292 void (*callback)(int status, struct sockaddr *src_addr,
270 struct rdma_dev_addr *addr, void *context), 293 struct rdma_dev_addr *addr, void *context),
@@ -285,6 +308,8 @@ int rdma_resolve_ip(struct sockaddr *src_addr, struct sockaddr *dst_addr,
285 req->addr = addr; 308 req->addr = addr;
286 req->callback = callback; 309 req->callback = callback;
287 req->context = context; 310 req->context = context;
311 req->client = client;
312 atomic_inc(&client->refcount);
288 313
289 src_in = (struct sockaddr_in *) &req->src_addr; 314 src_in = (struct sockaddr_in *) &req->src_addr;
290 dst_in = (struct sockaddr_in *) &req->dst_addr; 315 dst_in = (struct sockaddr_in *) &req->dst_addr;
@@ -305,6 +330,7 @@ int rdma_resolve_ip(struct sockaddr *src_addr, struct sockaddr *dst_addr,
305 break; 330 break;
306 default: 331 default:
307 ret = req->status; 332 ret = req->status;
333 atomic_dec(&client->refcount);
308 kfree(req); 334 kfree(req);
309 break; 335 break;
310 } 336 }
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 9ae4f3a67c70..845090b0859c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -63,6 +63,7 @@ static struct ib_client cma_client = {
63}; 63};
64 64
65static struct ib_sa_client sa_client; 65static struct ib_sa_client sa_client;
66static struct rdma_addr_client addr_client;
66static LIST_HEAD(dev_list); 67static LIST_HEAD(dev_list);
67static LIST_HEAD(listen_any_list); 68static LIST_HEAD(listen_any_list);
68static DEFINE_MUTEX(lock); 69static DEFINE_MUTEX(lock);
@@ -1625,8 +1626,8 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1625 if (cma_any_addr(dst_addr)) 1626 if (cma_any_addr(dst_addr))
1626 ret = cma_resolve_loopback(id_priv); 1627 ret = cma_resolve_loopback(id_priv);
1627 else 1628 else
1628 ret = rdma_resolve_ip(&id->route.addr.src_addr, dst_addr, 1629 ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr,
1629 &id->route.addr.dev_addr, 1630 dst_addr, &id->route.addr.dev_addr,
1630 timeout_ms, addr_handler, id_priv); 1631 timeout_ms, addr_handler, id_priv);
1631 if (ret) 1632 if (ret)
1632 goto err; 1633 goto err;
@@ -1762,22 +1763,29 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
1762 1763
1763 if (!cma_any_addr(addr)) { 1764 if (!cma_any_addr(addr)) {
1764 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); 1765 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
1765 if (!ret) {
1766 mutex_lock(&lock);
1767 ret = cma_acquire_dev(id_priv);
1768 mutex_unlock(&lock);
1769 }
1770 if (ret) 1766 if (ret)
1771 goto err; 1767 goto err1;
1768
1769 mutex_lock(&lock);
1770 ret = cma_acquire_dev(id_priv);
1771 mutex_unlock(&lock);
1772 if (ret)
1773 goto err1;
1772 } 1774 }
1773 1775
1774 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr)); 1776 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr));
1775 ret = cma_get_port(id_priv); 1777 ret = cma_get_port(id_priv);
1776 if (ret) 1778 if (ret)
1777 goto err; 1779 goto err2;
1778 1780
1779 return 0; 1781 return 0;
1780err: 1782err2:
1783 if (!cma_any_addr(addr)) {
1784 mutex_lock(&lock);
1785 cma_detach_from_dev(id_priv);
1786 mutex_unlock(&lock);
1787 }
1788err1:
1781 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); 1789 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);
1782 return ret; 1790 return ret;
1783} 1791}
@@ -2210,6 +2218,7 @@ static int cma_init(void)
2210 return -ENOMEM; 2218 return -ENOMEM;
2211 2219
2212 ib_sa_register_client(&sa_client); 2220 ib_sa_register_client(&sa_client);
2221 rdma_addr_register_client(&addr_client);
2213 2222
2214 ret = ib_register_client(&cma_client); 2223 ret = ib_register_client(&cma_client);
2215 if (ret) 2224 if (ret)
@@ -2217,6 +2226,7 @@ static int cma_init(void)
2217 return 0; 2226 return 0;
2218 2227
2219err: 2228err:
2229 rdma_addr_unregister_client(&addr_client);
2220 ib_sa_unregister_client(&sa_client); 2230 ib_sa_unregister_client(&sa_client);
2221 destroy_workqueue(cma_wq); 2231 destroy_workqueue(cma_wq);
2222 return ret; 2232 return ret;
@@ -2225,6 +2235,7 @@ err:
2225static void cma_cleanup(void) 2235static void cma_cleanup(void)
2226{ 2236{
2227 ib_unregister_client(&cma_client); 2237 ib_unregister_client(&cma_client);
2238 rdma_addr_unregister_client(&addr_client);
2228 ib_sa_unregister_client(&sa_client); 2239 ib_sa_unregister_client(&sa_client);
2229 destroy_workqueue(cma_wq); 2240 destroy_workqueue(cma_wq);
2230 idr_destroy(&sdp_ps); 2241 idr_destroy(&sdp_ps);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index b72c7f69ca90..743247ec065e 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1214,7 +1214,7 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1214 resp.qp_access_flags = attr->qp_access_flags; 1214 resp.qp_access_flags = attr->qp_access_flags;
1215 resp.pkey_index = attr->pkey_index; 1215 resp.pkey_index = attr->pkey_index;
1216 resp.alt_pkey_index = attr->alt_pkey_index; 1216 resp.alt_pkey_index = attr->alt_pkey_index;
1217 resp.en_sqd_async_notify = attr->en_sqd_async_notify; 1217 resp.sq_draining = attr->sq_draining;
1218 resp.max_rd_atomic = attr->max_rd_atomic; 1218 resp.max_rd_atomic = attr->max_rd_atomic;
1219 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1219 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
1220 resp.min_rnr_timer = attr->min_rnr_timer; 1220 resp.min_rnr_timer = attr->min_rnr_timer;
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c
index 028a60bbfca9..0315f99e4191 100644
--- a/drivers/infiniband/hw/amso1100/c2_alloc.c
+++ b/drivers/infiniband/hw/amso1100/c2_alloc.c
@@ -42,13 +42,14 @@ static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
42{ 42{
43 int i; 43 int i;
44 struct sp_chunk *new_head; 44 struct sp_chunk *new_head;
45 dma_addr_t dma_addr;
45 46
46 new_head = (struct sp_chunk *) __get_free_page(gfp_mask); 47 new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE,
48 &dma_addr, gfp_mask);
47 if (new_head == NULL) 49 if (new_head == NULL)
48 return -ENOMEM; 50 return -ENOMEM;
49 51
50 new_head->dma_addr = dma_map_single(c2dev->ibdev.dma_device, new_head, 52 new_head->dma_addr = dma_addr;
51 PAGE_SIZE, DMA_FROM_DEVICE);
52 pci_unmap_addr_set(new_head, mapping, new_head->dma_addr); 53 pci_unmap_addr_set(new_head, mapping, new_head->dma_addr);
53 54
54 new_head->next = NULL; 55 new_head->next = NULL;
@@ -80,10 +81,8 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
80 81
81 while (root) { 82 while (root) {
82 next = root->next; 83 next = root->next;
83 dma_unmap_single(c2dev->ibdev.dma_device, 84 dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
84 pci_unmap_addr(root, mapping), PAGE_SIZE, 85 pci_unmap_addr(root, mapping));
85 DMA_FROM_DEVICE);
86 __free_page((struct page *) root);
87 root = next; 86 root = next;
88 } 87 }
89} 88}
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index 9d7bcc5ade93..05c9154d46f4 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -246,20 +246,17 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
246 246
247static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) 247static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
248{ 248{
249 249 dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
250 dma_unmap_single(c2dev->ibdev.dma_device, pci_unmap_addr(mq, mapping), 250 mq->msg_pool.host, pci_unmap_addr(mq, mapping));
251 mq->q_size * mq->msg_size, DMA_FROM_DEVICE);
252 free_pages((unsigned long) mq->msg_pool.host,
253 get_order(mq->q_size * mq->msg_size));
254} 251}
255 252
256static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, 253static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
257 int msg_size) 254 int msg_size)
258{ 255{
259 unsigned long pool_start; 256 u8 *pool_start;
260 257
261 pool_start = __get_free_pages(GFP_KERNEL, 258 pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
262 get_order(q_size * msg_size)); 259 &mq->host_dma, GFP_KERNEL);
263 if (!pool_start) 260 if (!pool_start)
264 return -ENOMEM; 261 return -ENOMEM;
265 262
@@ -267,13 +264,10 @@ static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
267 0, /* index (currently unknown) */ 264 0, /* index (currently unknown) */
268 q_size, 265 q_size,
269 msg_size, 266 msg_size,
270 (u8 *) pool_start, 267 pool_start,
271 NULL, /* peer (currently unknown) */ 268 NULL, /* peer (currently unknown) */
272 C2_MQ_HOST_TARGET); 269 C2_MQ_HOST_TARGET);
273 270
274 mq->host_dma = dma_map_single(c2dev->ibdev.dma_device,
275 (void *)pool_start,
276 q_size * msg_size, DMA_FROM_DEVICE);
277 pci_unmap_addr_set(mq, mapping, mq->host_dma); 271 pci_unmap_addr_set(mq, mapping, mq->host_dma);
278 272
279 return 0; 273 return 0;
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 30409e179606..21d9612a56cc 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -517,14 +517,12 @@ int c2_rnic_init(struct c2_dev *c2dev)
517 /* Initialize the Verbs Reply Queue */ 517 /* Initialize the Verbs Reply Queue */
518 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); 518 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE));
519 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); 519 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
520 q1_pages = kmalloc(qsize * msgsize, GFP_KERNEL); 520 q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
521 &c2dev->rep_vq.host_dma, GFP_KERNEL);
521 if (!q1_pages) { 522 if (!q1_pages) {
522 err = -ENOMEM; 523 err = -ENOMEM;
523 goto bail1; 524 goto bail1;
524 } 525 }
525 c2dev->rep_vq.host_dma = dma_map_single(c2dev->ibdev.dma_device,
526 (void *)q1_pages, qsize * msgsize,
527 DMA_FROM_DEVICE);
528 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); 526 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
529 pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, 527 pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages,
530 (unsigned long long) c2dev->rep_vq.host_dma); 528 (unsigned long long) c2dev->rep_vq.host_dma);
@@ -540,17 +538,15 @@ int c2_rnic_init(struct c2_dev *c2dev)
540 /* Initialize the Asynchronus Event Queue */ 538 /* Initialize the Asynchronus Event Queue */
541 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); 539 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE));
542 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); 540 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
543 q2_pages = kmalloc(qsize * msgsize, GFP_KERNEL); 541 q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
542 &c2dev->aeq.host_dma, GFP_KERNEL);
544 if (!q2_pages) { 543 if (!q2_pages) {
545 err = -ENOMEM; 544 err = -ENOMEM;
546 goto bail2; 545 goto bail2;
547 } 546 }
548 c2dev->aeq.host_dma = dma_map_single(c2dev->ibdev.dma_device,
549 (void *)q2_pages, qsize * msgsize,
550 DMA_FROM_DEVICE);
551 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); 547 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
552 pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages, 548 pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q2_pages,
553 (unsigned long long) c2dev->rep_vq.host_dma); 549 (unsigned long long) c2dev->aeq.host_dma);
554 c2_mq_rep_init(&c2dev->aeq, 550 c2_mq_rep_init(&c2dev->aeq,
555 2, 551 2,
556 qsize, 552 qsize,
@@ -597,17 +593,13 @@ int c2_rnic_init(struct c2_dev *c2dev)
597 bail4: 593 bail4:
598 vq_term(c2dev); 594 vq_term(c2dev);
599 bail3: 595 bail3:
600 dma_unmap_single(c2dev->ibdev.dma_device, 596 dma_free_coherent(&c2dev->pcidev->dev,
601 pci_unmap_addr(&c2dev->aeq, mapping), 597 c2dev->aeq.q_size * c2dev->aeq.msg_size,
602 c2dev->aeq.q_size * c2dev->aeq.msg_size, 598 q2_pages, pci_unmap_addr(&c2dev->aeq, mapping));
603 DMA_FROM_DEVICE);
604 kfree(q2_pages);
605 bail2: 599 bail2:
606 dma_unmap_single(c2dev->ibdev.dma_device, 600 dma_free_coherent(&c2dev->pcidev->dev,
607 pci_unmap_addr(&c2dev->rep_vq, mapping), 601 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
608 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 602 q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping));
609 DMA_FROM_DEVICE);
610 kfree(q1_pages);
611 bail1: 603 bail1:
612 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 604 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
613 bail0: 605 bail0:
@@ -640,19 +632,17 @@ void c2_rnic_term(struct c2_dev *c2dev)
640 /* Free the verbs request allocator */ 632 /* Free the verbs request allocator */
641 vq_term(c2dev); 633 vq_term(c2dev);
642 634
643 /* Unmap and free the asynchronus event queue */ 635 /* Free the asynchronus event queue */
644 dma_unmap_single(c2dev->ibdev.dma_device, 636 dma_free_coherent(&c2dev->pcidev->dev,
645 pci_unmap_addr(&c2dev->aeq, mapping), 637 c2dev->aeq.q_size * c2dev->aeq.msg_size,
646 c2dev->aeq.q_size * c2dev->aeq.msg_size, 638 c2dev->aeq.msg_pool.host,
647 DMA_FROM_DEVICE); 639 pci_unmap_addr(&c2dev->aeq, mapping));
648 kfree(c2dev->aeq.msg_pool.host); 640
649 641 /* Free the verbs reply queue */
650 /* Unmap and free the verbs reply queue */ 642 dma_free_coherent(&c2dev->pcidev->dev,
651 dma_unmap_single(c2dev->ibdev.dma_device, 643 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
652 pci_unmap_addr(&c2dev->rep_vq, mapping), 644 c2dev->rep_vq.msg_pool.host,
653 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 645 pci_unmap_addr(&c2dev->rep_vq, mapping));
654 DMA_FROM_DEVICE);
655 kfree(c2dev->rep_vq.msg_pool.host);
656 646
657 /* Free the MQ shared pointer pool */ 647 /* Free the MQ shared pointer pool */
658 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 648 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
index 809da3ef706b..973c4b591545 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -63,6 +63,7 @@
63#include <asm/ibmebus.h> 63#include <asm/ibmebus.h>
64#include <asm/io.h> 64#include <asm/io.h>
65#include <asm/pgtable.h> 65#include <asm/pgtable.h>
66#include <asm/hvcall.h>
66 67
67extern int ehca_debug_level; 68extern int ehca_debug_level;
68 69
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 99a94d710935..768df7265b81 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1820,11 +1820,11 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1820 1820
1821#define MAD_IFC_BOX_SIZE 0x400 1821#define MAD_IFC_BOX_SIZE 0x400
1822#define MAD_IFC_MY_QPN_OFFSET 0x100 1822#define MAD_IFC_MY_QPN_OFFSET 0x100
1823#define MAD_IFC_RQPN_OFFSET 0x104 1823#define MAD_IFC_RQPN_OFFSET 0x108
1824#define MAD_IFC_SL_OFFSET 0x108 1824#define MAD_IFC_SL_OFFSET 0x10c
1825#define MAD_IFC_G_PATH_OFFSET 0x109 1825#define MAD_IFC_G_PATH_OFFSET 0x10d
1826#define MAD_IFC_RLID_OFFSET 0x10a 1826#define MAD_IFC_RLID_OFFSET 0x10e
1827#define MAD_IFC_PKEY_OFFSET 0x10e 1827#define MAD_IFC_PKEY_OFFSET 0x112
1828#define MAD_IFC_GRH_OFFSET 0x140 1828#define MAD_IFC_GRH_OFFSET 0x140
1829 1829
1830 inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 1830 inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
@@ -1862,7 +1862,7 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1862 1862
1863 val = in_wc->dlid_path_bits | 1863 val = in_wc->dlid_path_bits |
1864 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); 1864 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
1865 MTHCA_PUT(inbox, val, MAD_IFC_GRH_OFFSET); 1865 MTHCA_PUT(inbox, val, MAD_IFC_G_PATH_OFFSET);
1866 1866
1867 MTHCA_PUT(inbox, in_wc->slid, MAD_IFC_RLID_OFFSET); 1867 MTHCA_PUT(inbox, in_wc->slid, MAD_IFC_RLID_OFFSET);
1868 MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET); 1868 MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET);
@@ -1870,7 +1870,7 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1870 if (in_grh) 1870 if (in_grh)
1871 memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40); 1871 memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40);
1872 1872
1873 op_modifier |= 0x10; 1873 op_modifier |= 0x4;
1874 1874
1875 in_modifier |= in_wc->slid << 16; 1875 in_modifier |= in_wc->slid << 16;
1876 } 1876 }
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index eb6f98d82289..9b2041e25d59 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -363,11 +363,11 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
363 struct iscsi_conn *conn = cls_conn->dd_data; 363 struct iscsi_conn *conn = cls_conn->dd_data;
364 int err; 364 int err;
365 365
366 err = iscsi_conn_start(cls_conn); 366 err = iser_conn_set_full_featured_mode(conn);
367 if (err) 367 if (err)
368 return err; 368 return err;
369 369
370 return iser_conn_set_full_featured_mode(conn); 370 return iscsi_conn_start(cls_conn);
371} 371}
372 372
373static struct iscsi_transport iscsi_iser_transport; 373static struct iscsi_transport iscsi_iser_transport;
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 81b62307621d..c094e5012862 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -36,6 +36,22 @@
36#include <linux/socket.h> 36#include <linux/socket.h>
37#include <rdma/ib_verbs.h> 37#include <rdma/ib_verbs.h>
38 38
39struct rdma_addr_client {
40 atomic_t refcount;
41 struct completion comp;
42};
43
44/**
45 * rdma_addr_register_client - Register an address client.
46 */
47void rdma_addr_register_client(struct rdma_addr_client *client);
48
49/**
50 * rdma_addr_unregister_client - Deregister an address client.
51 * @client: Client object to deregister.
52 */
53void rdma_addr_unregister_client(struct rdma_addr_client *client);
54
39struct rdma_dev_addr { 55struct rdma_dev_addr {
40 unsigned char src_dev_addr[MAX_ADDR_LEN]; 56 unsigned char src_dev_addr[MAX_ADDR_LEN];
41 unsigned char dst_dev_addr[MAX_ADDR_LEN]; 57 unsigned char dst_dev_addr[MAX_ADDR_LEN];
@@ -52,6 +68,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr);
52/** 68/**
53 * rdma_resolve_ip - Resolve source and destination IP addresses to 69 * rdma_resolve_ip - Resolve source and destination IP addresses to
54 * RDMA hardware addresses. 70 * RDMA hardware addresses.
71 * @client: Address client associated with request.
55 * @src_addr: An optional source address to use in the resolution. If a 72 * @src_addr: An optional source address to use in the resolution. If a
56 * source address is not provided, a usable address will be returned via 73 * source address is not provided, a usable address will be returned via
57 * the callback. 74 * the callback.
@@ -64,7 +81,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr);
64 * or been canceled. A status of 0 indicates success. 81 * or been canceled. A status of 0 indicates success.
65 * @context: User-specified context associated with the call. 82 * @context: User-specified context associated with the call.
66 */ 83 */
67int rdma_resolve_ip(struct sockaddr *src_addr, struct sockaddr *dst_addr, 84int rdma_resolve_ip(struct rdma_addr_client *client,
85 struct sockaddr *src_addr, struct sockaddr *dst_addr,
68 struct rdma_dev_addr *addr, int timeout_ms, 86 struct rdma_dev_addr *addr, int timeout_ms,
69 void (*callback)(int status, struct sockaddr *src_addr, 87 void (*callback)(int status, struct sockaddr *src_addr,
70 struct rdma_dev_addr *addr, void *context), 88 struct rdma_dev_addr *addr, void *context),
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h
index db1b814b62cc..64a721fcbc1c 100644
--- a/include/rdma/ib_user_verbs.h
+++ b/include/rdma/ib_user_verbs.h
@@ -458,7 +458,7 @@ struct ib_uverbs_query_qp_resp {
458 __u8 cur_qp_state; 458 __u8 cur_qp_state;
459 __u8 path_mtu; 459 __u8 path_mtu;
460 __u8 path_mig_state; 460 __u8 path_mig_state;
461 __u8 en_sqd_async_notify; 461 __u8 sq_draining;
462 __u8 max_rd_atomic; 462 __u8 max_rd_atomic;
463 __u8 max_dest_rd_atomic; 463 __u8 max_dest_rd_atomic;
464 __u8 min_rnr_timer; 464 __u8 min_rnr_timer;