aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-05-07 23:37:51 -0400
committerPaul Mackerras <paulus@samba.org>2007-05-07 23:37:51 -0400
commit02bbc0f09c90cefdb2837605c96a66c5ce4ba2e1 (patch)
tree04ef573cd4de095c500c9fc3477f4278c0b36300 /drivers/infiniband
parent7487a2245b8841c77ba9db406cf99a483b9334e9 (diff)
parent5b94f675f57e4ff16c8fda09088d7480a84dcd91 (diff)
Merge branch 'linux-2.6'
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cm.c1
-rw-r--r--drivers/infiniband/core/fmr_pool.c32
-rw-r--r--drivers/infiniband/core/iwcm.c1
-rw-r--r--drivers/infiniband/core/mad.c2
-rw-r--r--drivers/infiniband/core/mad_priv.h1
-rw-r--r--drivers/infiniband/core/multicast.c1
-rw-r--r--drivers/infiniband/core/sa_query.c1
-rw-r--r--drivers/infiniband/core/user_mad.c1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c1
-rw-r--r--drivers/infiniband/core/uverbs_main.c2
-rw-r--r--drivers/infiniband/core/verbs.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c16
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c19
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c14
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c69
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c14
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.h8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_cq.c68
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mmap.c64
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c52
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c55
-rw-r--r--drivers/infiniband/hw/ipath/ipath_srq.c55
-rw-r--r--drivers/infiniband/hw/ipath/ipath_stats.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h24
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c14
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c89
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c2
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c27
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h1
50 files changed, 470 insertions, 241 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 842cd0b53e91..eff591deeb46 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -40,7 +40,6 @@
40#include <linux/err.h> 40#include <linux/err.h>
41#include <linux/idr.h> 41#include <linux/idr.h>
42#include <linux/interrupt.h> 42#include <linux/interrupt.h>
43#include <linux/pci.h>
44#include <linux/random.h> 43#include <linux/random.h>
45#include <linux/rbtree.h> 44#include <linux/rbtree.h>
46#include <linux/spinlock.h> 45#include <linux/spinlock.h>
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 1d796e7c8199..a06bcc65a871 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -43,6 +43,8 @@
43 43
44#include "core_priv.h" 44#include "core_priv.h"
45 45
46#define PFX "fmr_pool: "
47
46enum { 48enum {
47 IB_FMR_MAX_REMAPS = 32, 49 IB_FMR_MAX_REMAPS = 32,
48 50
@@ -150,7 +152,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
150 152
151#ifdef DEBUG 153#ifdef DEBUG
152 if (fmr->ref_count !=0) { 154 if (fmr->ref_count !=0) {
153 printk(KERN_WARNING "Unmapping FMR 0x%08x with ref count %d", 155 printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d",
154 fmr, fmr->ref_count); 156 fmr, fmr->ref_count);
155 } 157 }
156#endif 158#endif
@@ -168,7 +170,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
168 170
169 ret = ib_unmap_fmr(&fmr_list); 171 ret = ib_unmap_fmr(&fmr_list);
170 if (ret) 172 if (ret)
171 printk(KERN_WARNING "ib_unmap_fmr returned %d", ret); 173 printk(KERN_WARNING PFX "ib_unmap_fmr returned %d", ret);
172 174
173 spin_lock_irq(&pool->pool_lock); 175 spin_lock_irq(&pool->pool_lock);
174 list_splice(&unmap_list, &pool->free_list); 176 list_splice(&unmap_list, &pool->free_list);
@@ -226,20 +228,20 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
226 device = pd->device; 228 device = pd->device;
227 if (!device->alloc_fmr || !device->dealloc_fmr || 229 if (!device->alloc_fmr || !device->dealloc_fmr ||
228 !device->map_phys_fmr || !device->unmap_fmr) { 230 !device->map_phys_fmr || !device->unmap_fmr) {
229 printk(KERN_WARNING "Device %s does not support fast memory regions", 231 printk(KERN_INFO PFX "Device %s does not support FMRs\n",
230 device->name); 232 device->name);
231 return ERR_PTR(-ENOSYS); 233 return ERR_PTR(-ENOSYS);
232 } 234 }
233 235
234 attr = kmalloc(sizeof *attr, GFP_KERNEL); 236 attr = kmalloc(sizeof *attr, GFP_KERNEL);
235 if (!attr) { 237 if (!attr) {
236 printk(KERN_WARNING "couldn't allocate device attr struct"); 238 printk(KERN_WARNING PFX "couldn't allocate device attr struct");
237 return ERR_PTR(-ENOMEM); 239 return ERR_PTR(-ENOMEM);
238 } 240 }
239 241
240 ret = ib_query_device(device, attr); 242 ret = ib_query_device(device, attr);
241 if (ret) { 243 if (ret) {
242 printk(KERN_WARNING "couldn't query device"); 244 printk(KERN_WARNING PFX "couldn't query device: %d", ret);
243 kfree(attr); 245 kfree(attr);
244 return ERR_PTR(ret); 246 return ERR_PTR(ret);
245 } 247 }
@@ -253,7 +255,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
253 255
254 pool = kmalloc(sizeof *pool, GFP_KERNEL); 256 pool = kmalloc(sizeof *pool, GFP_KERNEL);
255 if (!pool) { 257 if (!pool) {
256 printk(KERN_WARNING "couldn't allocate pool struct"); 258 printk(KERN_WARNING PFX "couldn't allocate pool struct");
257 return ERR_PTR(-ENOMEM); 259 return ERR_PTR(-ENOMEM);
258 } 260 }
259 261
@@ -270,7 +272,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
270 kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket, 272 kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
271 GFP_KERNEL); 273 GFP_KERNEL);
272 if (!pool->cache_bucket) { 274 if (!pool->cache_bucket) {
273 printk(KERN_WARNING "Failed to allocate cache in pool"); 275 printk(KERN_WARNING PFX "Failed to allocate cache in pool");
274 ret = -ENOMEM; 276 ret = -ENOMEM;
275 goto out_free_pool; 277 goto out_free_pool;
276 } 278 }
@@ -294,7 +296,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
294 "ib_fmr(%s)", 296 "ib_fmr(%s)",
295 device->name); 297 device->name);
296 if (IS_ERR(pool->thread)) { 298 if (IS_ERR(pool->thread)) {
297 printk(KERN_WARNING "couldn't start cleanup thread"); 299 printk(KERN_WARNING PFX "couldn't start cleanup thread");
298 ret = PTR_ERR(pool->thread); 300 ret = PTR_ERR(pool->thread);
299 goto out_free_pool; 301 goto out_free_pool;
300 } 302 }
@@ -311,8 +313,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
311 fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64), 313 fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64),
312 GFP_KERNEL); 314 GFP_KERNEL);
313 if (!fmr) { 315 if (!fmr) {
314 printk(KERN_WARNING "failed to allocate fmr struct " 316 printk(KERN_WARNING PFX "failed to allocate fmr "
315 "for FMR %d", i); 317 "struct for FMR %d", i);
316 goto out_fail; 318 goto out_fail;
317 } 319 }
318 320
@@ -323,7 +325,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
323 325
324 fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr); 326 fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
325 if (IS_ERR(fmr->fmr)) { 327 if (IS_ERR(fmr->fmr)) {
326 printk(KERN_WARNING "fmr_create failed for FMR %d", i); 328 printk(KERN_WARNING PFX "fmr_create failed "
329 "for FMR %d", i);
327 kfree(fmr); 330 kfree(fmr);
328 goto out_fail; 331 goto out_fail;
329 } 332 }
@@ -378,7 +381,7 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
378 } 381 }
379 382
380 if (i < pool->pool_size) 383 if (i < pool->pool_size)
381 printk(KERN_WARNING "pool still has %d regions registered", 384 printk(KERN_WARNING PFX "pool still has %d regions registered",
382 pool->pool_size - i); 385 pool->pool_size - i);
383 386
384 kfree(pool->cache_bucket); 387 kfree(pool->cache_bucket);
@@ -463,8 +466,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
463 list_add(&fmr->list, &pool->free_list); 466 list_add(&fmr->list, &pool->free_list);
464 spin_unlock_irqrestore(&pool->pool_lock, flags); 467 spin_unlock_irqrestore(&pool->pool_lock, flags);
465 468
466 printk(KERN_WARNING "fmr_map returns %d\n", 469 printk(KERN_WARNING PFX "fmr_map returns %d\n", result);
467 result);
468 470
469 return ERR_PTR(result); 471 return ERR_PTR(result);
470 } 472 }
@@ -516,7 +518,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
516 518
517#ifdef DEBUG 519#ifdef DEBUG
518 if (fmr->ref_count < 0) 520 if (fmr->ref_count < 0)
519 printk(KERN_WARNING "FMR %p has ref count %d < 0", 521 printk(KERN_WARNING PFX "FMR %p has ref count %d < 0",
520 fmr, fmr->ref_count); 522 fmr, fmr->ref_count);
521#endif 523#endif
522 524
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 891d1fa7b2eb..223b1aa7d92b 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -39,7 +39,6 @@
39#include <linux/err.h> 39#include <linux/err.h>
40#include <linux/idr.h> 40#include <linux/idr.h>
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/pci.h>
43#include <linux/rbtree.h> 42#include <linux/rbtree.h>
44#include <linux/spinlock.h> 43#include <linux/spinlock.h>
45#include <linux/workqueue.h> 44#include <linux/workqueue.h>
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 6edfecf1be72..85ccf13b8041 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2771,7 +2771,7 @@ static int ib_mad_port_open(struct ib_device *device,
2771 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2; 2771 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2772 port_priv->cq = ib_create_cq(port_priv->device, 2772 port_priv->cq = ib_create_cq(port_priv->device,
2773 ib_mad_thread_completion_handler, 2773 ib_mad_thread_completion_handler,
2774 NULL, port_priv, cq_size); 2774 NULL, port_priv, cq_size, 0);
2775 if (IS_ERR(port_priv->cq)) { 2775 if (IS_ERR(port_priv->cq)) {
2776 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); 2776 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2777 ret = PTR_ERR(port_priv->cq); 2777 ret = PTR_ERR(port_priv->cq);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index de89717f49fe..9be5cc00a3a9 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -39,7 +39,6 @@
39 39
40#include <linux/completion.h> 40#include <linux/completion.h>
41#include <linux/err.h> 41#include <linux/err.h>
42#include <linux/pci.h>
43#include <linux/workqueue.h> 42#include <linux/workqueue.h>
44#include <rdma/ib_mad.h> 43#include <rdma/ib_mad.h>
45#include <rdma/ib_smi.h> 44#include <rdma/ib_smi.h>
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 4a579b3a1c90..1e13ab42b70b 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -34,7 +34,6 @@
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/err.h> 35#include <linux/err.h>
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/pci.h>
38#include <linux/bitops.h> 37#include <linux/bitops.h>
39#include <linux/random.h> 38#include <linux/random.h>
40 39
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 9a7eaadb1688..6469406ea9d8 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -40,7 +40,6 @@
40#include <linux/random.h> 40#include <linux/random.h>
41#include <linux/spinlock.h> 41#include <linux/spinlock.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/pci.h>
44#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
45#include <linux/kref.h> 44#include <linux/kref.h>
46#include <linux/idr.h> 45#include <linux/idr.h>
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 8199b83052a9..d97ded25c4ff 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -40,7 +40,6 @@
40#include <linux/err.h> 40#include <linux/err.h>
41#include <linux/fs.h> 41#include <linux/fs.h>
42#include <linux/cdev.h> 42#include <linux/cdev.h>
43#include <linux/pci.h>
44#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
45#include <linux/poll.h> 44#include <linux/poll.h>
46#include <linux/rwsem.h> 45#include <linux/rwsem.h>
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 4fd75afa6a3a..bab66769be14 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -802,6 +802,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
802 INIT_LIST_HEAD(&obj->async_list); 802 INIT_LIST_HEAD(&obj->async_list);
803 803
804 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 804 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
805 cmd.comp_vector,
805 file->ucontext, &udata); 806 file->ucontext, &udata);
806 if (IS_ERR(cq)) { 807 if (IS_ERR(cq)) {
807 ret = PTR_ERR(cq); 808 ret = PTR_ERR(cq);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index f8bc822a3cc3..d44e54799651 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -752,7 +752,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
752 spin_unlock(&map_lock); 752 spin_unlock(&map_lock);
753 753
754 uverbs_dev->ib_dev = device; 754 uverbs_dev->ib_dev = device;
755 uverbs_dev->num_comp_vectors = 1; 755 uverbs_dev->num_comp_vectors = device->num_comp_vectors;
756 756
757 uverbs_dev->dev = cdev_alloc(); 757 uverbs_dev->dev = cdev_alloc();
758 if (!uverbs_dev->dev) 758 if (!uverbs_dev->dev)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index ccdf93d30b01..86ed8af9c7e6 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -609,11 +609,11 @@ EXPORT_SYMBOL(ib_destroy_qp);
609struct ib_cq *ib_create_cq(struct ib_device *device, 609struct ib_cq *ib_create_cq(struct ib_device *device,
610 ib_comp_handler comp_handler, 610 ib_comp_handler comp_handler,
611 void (*event_handler)(struct ib_event *, void *), 611 void (*event_handler)(struct ib_event *, void *),
612 void *cq_context, int cqe) 612 void *cq_context, int cqe, int comp_vector)
613{ 613{
614 struct ib_cq *cq; 614 struct ib_cq *cq;
615 615
616 cq = device->create_cq(device, cqe, NULL, NULL); 616 cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
617 617
618 if (!IS_ERR(cq)) { 618 if (!IS_ERR(cq)) {
619 cq->device = device; 619 cq->device = device;
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index 04a9db5de881..fa58200217a1 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -519,7 +519,7 @@ extern void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
519extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index); 519extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
520extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index); 520extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
521extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); 521extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
522extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify); 522extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
523 523
524/* CM */ 524/* CM */
525extern int c2_llp_connect(struct iw_cm_id *cm_id, 525extern int c2_llp_connect(struct iw_cm_id *cm_id,
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index 5175c99ee586..d2b3366786d6 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -217,17 +217,19 @@ int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
217 return npolled; 217 return npolled;
218} 218}
219 219
220int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) 220int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
221{ 221{
222 struct c2_mq_shared __iomem *shared; 222 struct c2_mq_shared __iomem *shared;
223 struct c2_cq *cq; 223 struct c2_cq *cq;
224 unsigned long flags;
225 int ret = 0;
224 226
225 cq = to_c2cq(ibcq); 227 cq = to_c2cq(ibcq);
226 shared = cq->mq.peer; 228 shared = cq->mq.peer;
227 229
228 if (notify == IB_CQ_NEXT_COMP) 230 if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
229 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type); 231 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
230 else if (notify == IB_CQ_SOLICITED) 232 else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
231 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type); 233 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
232 else 234 else
233 return -EINVAL; 235 return -EINVAL;
@@ -241,7 +243,13 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
241 */ 243 */
242 readb(&shared->armed); 244 readb(&shared->armed);
243 245
244 return 0; 246 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
247 spin_lock_irqsave(&cq->lock, flags);
248 ret = !c2_mq_empty(&cq->mq);
249 spin_unlock_irqrestore(&cq->lock, flags);
250 }
251
252 return ret;
245} 253}
246 254
247static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) 255static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 607c09bf764c..109166223c09 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -290,7 +290,7 @@ static int c2_destroy_qp(struct ib_qp *ib_qp)
290 return 0; 290 return 0;
291} 291}
292 292
293static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, 293static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, int vector,
294 struct ib_ucontext *context, 294 struct ib_ucontext *context,
295 struct ib_udata *udata) 295 struct ib_udata *udata)
296{ 296{
@@ -795,6 +795,7 @@ int c2_register_device(struct c2_dev *dev)
795 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); 795 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
796 memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6); 796 memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6);
797 dev->ibdev.phys_port_cnt = 1; 797 dev->ibdev.phys_port_cnt = 1;
798 dev->ibdev.num_comp_vectors = 1;
798 dev->ibdev.dma_device = &dev->pcidev->dev; 799 dev->ibdev.dma_device = &dev->pcidev->dev;
799 dev->ibdev.query_device = c2_query_device; 800 dev->ibdev.query_device = c2_query_device;
800 dev->ibdev.query_port = c2_query_port; 801 dev->ibdev.query_port = c2_query_port;
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index f5e9aeec6f6e..76049afc7655 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -114,7 +114,10 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
114 return -EIO; 114 return -EIO;
115 } 115 }
116 } 116 }
117
118 return 1;
117 } 119 }
120
118 return 0; 121 return 0;
119} 122}
120 123
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 90d7b8972cb4..ff7290eacefb 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -38,6 +38,7 @@
38#include "firmware_exports.h" 38#include "firmware_exports.h"
39 39
40#define T3_MAX_SGE 4 40#define T3_MAX_SGE 4
41#define T3_MAX_INLINE 64
41 42
42#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr)) 43#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
43#define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \ 44#define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 3b4b0acd707f..b2faff5abce8 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1109,6 +1109,15 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1109 1109
1110 PDBG("%s ep %p\n", __FUNCTION__, ep); 1110 PDBG("%s ep %p\n", __FUNCTION__, ep);
1111 1111
1112 /*
1113 * We get 2 abort replies from the HW. The first one must
1114 * be ignored except for scribbling that we need one more.
1115 */
1116 if (!(ep->flags & ABORT_REQ_IN_PROGRESS)) {
1117 ep->flags |= ABORT_REQ_IN_PROGRESS;
1118 return CPL_RET_BUF_DONE;
1119 }
1120
1112 close_complete_upcall(ep); 1121 close_complete_upcall(ep);
1113 state_set(&ep->com, DEAD); 1122 state_set(&ep->com, DEAD);
1114 release_ep_resources(ep); 1123 release_ep_resources(ep);
@@ -1189,6 +1198,7 @@ static int listen_stop(struct iwch_listen_ep *ep)
1189 } 1198 }
1190 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req)); 1199 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
1191 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1200 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1201 req->cpu_idx = 0;
1192 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); 1202 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
1193 skb->priority = 1; 1203 skb->priority = 1;
1194 ep->com.tdev->send(ep->com.tdev, skb); 1204 ep->com.tdev->send(ep->com.tdev, skb);
@@ -1475,6 +1485,15 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1475 int ret; 1485 int ret;
1476 int state; 1486 int state;
1477 1487
1488 /*
1489 * We get 2 peer aborts from the HW. The first one must
1490 * be ignored except for scribbling that we need one more.
1491 */
1492 if (!(ep->flags & PEER_ABORT_IN_PROGRESS)) {
1493 ep->flags |= PEER_ABORT_IN_PROGRESS;
1494 return CPL_RET_BUF_DONE;
1495 }
1496
1478 if (is_neg_adv_abort(req->status)) { 1497 if (is_neg_adv_abort(req->status)) {
1479 PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep, 1498 PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep,
1480 ep->hwtid); 1499 ep->hwtid);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
index 0c6f281bd4a0..21a388c313cf 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
@@ -143,6 +143,11 @@ enum iwch_ep_state {
143 DEAD, 143 DEAD,
144}; 144};
145 145
146enum iwch_ep_flags {
147 PEER_ABORT_IN_PROGRESS = (1 << 0),
148 ABORT_REQ_IN_PROGRESS = (1 << 1),
149};
150
146struct iwch_ep_common { 151struct iwch_ep_common {
147 struct iw_cm_id *cm_id; 152 struct iw_cm_id *cm_id;
148 struct iwch_qp *qp; 153 struct iwch_qp *qp;
@@ -181,6 +186,7 @@ struct iwch_ep {
181 u16 plen; 186 u16 plen;
182 u32 ird; 187 u32 ird;
183 u32 ord; 188 u32 ord;
189 u32 flags;
184}; 190};
185 191
186static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id) 192static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index af28a317016d..a891493fd340 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -139,7 +139,7 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq)
139 return 0; 139 return 0;
140} 140}
141 141
142static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, 142static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector,
143 struct ib_ucontext *ib_context, 143 struct ib_ucontext *ib_context,
144 struct ib_udata *udata) 144 struct ib_udata *udata)
145{ 145{
@@ -292,7 +292,7 @@ static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
292#endif 292#endif
293} 293}
294 294
295static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) 295static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
296{ 296{
297 struct iwch_dev *rhp; 297 struct iwch_dev *rhp;
298 struct iwch_cq *chp; 298 struct iwch_cq *chp;
@@ -303,7 +303,7 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
303 303
304 chp = to_iwch_cq(ibcq); 304 chp = to_iwch_cq(ibcq);
305 rhp = chp->rhp; 305 rhp = chp->rhp;
306 if (notify == IB_CQ_SOLICITED) 306 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
307 cq_op = CQ_ARM_SE; 307 cq_op = CQ_ARM_SE;
308 else 308 else
309 cq_op = CQ_ARM_AN; 309 cq_op = CQ_ARM_AN;
@@ -317,9 +317,11 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
317 PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr); 317 PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr);
318 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0); 318 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
319 spin_unlock_irqrestore(&chp->lock, flag); 319 spin_unlock_irqrestore(&chp->lock, flag);
320 if (err) 320 if (err < 0)
321 printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err, 321 printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err,
322 chp->cq.cqid); 322 chp->cq.cqid);
323 if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
324 err = 0;
323 return err; 325 return err;
324} 326}
325 327
@@ -780,6 +782,9 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
780 if (rqsize > T3_MAX_RQ_SIZE) 782 if (rqsize > T3_MAX_RQ_SIZE)
781 return ERR_PTR(-EINVAL); 783 return ERR_PTR(-EINVAL);
782 784
785 if (attrs->cap.max_inline_data > T3_MAX_INLINE)
786 return ERR_PTR(-EINVAL);
787
783 /* 788 /*
784 * NOTE: The SQ and total WQ sizes don't need to be 789 * NOTE: The SQ and total WQ sizes don't need to be
785 * a power of two. However, all the code assumes 790 * a power of two. However, all the code assumes
@@ -1107,6 +1112,7 @@ int iwch_register_device(struct iwch_dev *dev)
1107 dev->ibdev.node_type = RDMA_NODE_RNIC; 1112 dev->ibdev.node_type = RDMA_NODE_RNIC;
1108 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC)); 1113 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1109 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports; 1114 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
1115 dev->ibdev.num_comp_vectors = 1;
1110 dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); 1116 dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
1111 dev->ibdev.query_device = iwch_query_device; 1117 dev->ibdev.query_device = iwch_query_device;
1112 dev->ibdev.query_port = iwch_query_port; 1118 dev->ibdev.query_port = iwch_query_port;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 0a472c9b44db..714dddbc9a98 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -471,43 +471,62 @@ int iwch_bind_mw(struct ib_qp *qp,
471 return err; 471 return err;
472} 472}
473 473
474static void build_term_codes(int t3err, u8 *layer_type, u8 *ecode, int tagged) 474static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
475 u8 *layer_type, u8 *ecode)
475{ 476{
476 switch (t3err) { 477 int status = TPT_ERR_INTERNAL_ERR;
478 int tagged = 0;
479 int opcode = -1;
480 int rqtype = 0;
481 int send_inv = 0;
482
483 if (rsp_msg) {
484 status = CQE_STATUS(rsp_msg->cqe);
485 opcode = CQE_OPCODE(rsp_msg->cqe);
486 rqtype = RQ_TYPE(rsp_msg->cqe);
487 send_inv = (opcode == T3_SEND_WITH_INV) ||
488 (opcode == T3_SEND_WITH_SE_INV);
489 tagged = (opcode == T3_RDMA_WRITE) ||
490 (rqtype && (opcode == T3_READ_RESP));
491 }
492
493 switch (status) {
477 case TPT_ERR_STAG: 494 case TPT_ERR_STAG:
478 if (tagged == 1) { 495 if (send_inv) {
479 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 496 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
480 *ecode = DDPT_INV_STAG; 497 *ecode = RDMAP_CANT_INV_STAG;
481 } else if (tagged == 2) { 498 } else {
482 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 499 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
483 *ecode = RDMAP_INV_STAG; 500 *ecode = RDMAP_INV_STAG;
484 } 501 }
485 break; 502 break;
486 case TPT_ERR_PDID: 503 case TPT_ERR_PDID:
504 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
505 if ((opcode == T3_SEND_WITH_INV) ||
506 (opcode == T3_SEND_WITH_SE_INV))
507 *ecode = RDMAP_CANT_INV_STAG;
508 else
509 *ecode = RDMAP_STAG_NOT_ASSOC;
510 break;
487 case TPT_ERR_QPID: 511 case TPT_ERR_QPID:
512 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
513 *ecode = RDMAP_STAG_NOT_ASSOC;
514 break;
488 case TPT_ERR_ACCESS: 515 case TPT_ERR_ACCESS:
489 if (tagged == 1) { 516 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
490 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 517 *ecode = RDMAP_ACC_VIOL;
491 *ecode = DDPT_STAG_NOT_ASSOC;
492 } else if (tagged == 2) {
493 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
494 *ecode = RDMAP_STAG_NOT_ASSOC;
495 }
496 break; 518 break;
497 case TPT_ERR_WRAP: 519 case TPT_ERR_WRAP:
498 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 520 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
499 *ecode = RDMAP_TO_WRAP; 521 *ecode = RDMAP_TO_WRAP;
500 break; 522 break;
501 case TPT_ERR_BOUND: 523 case TPT_ERR_BOUND:
502 if (tagged == 1) { 524 if (tagged) {
503 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 525 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
504 *ecode = DDPT_BASE_BOUNDS; 526 *ecode = DDPT_BASE_BOUNDS;
505 } else if (tagged == 2) { 527 } else {
506 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 528 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
507 *ecode = RDMAP_BASE_BOUNDS; 529 *ecode = RDMAP_BASE_BOUNDS;
508 } else {
509 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
510 *ecode = DDPU_MSG_TOOBIG;
511 } 530 }
512 break; 531 break;
513 case TPT_ERR_INVALIDATE_SHARED_MR: 532 case TPT_ERR_INVALIDATE_SHARED_MR:
@@ -591,8 +610,6 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
591{ 610{
592 union t3_wr *wqe; 611 union t3_wr *wqe;
593 struct terminate_message *term; 612 struct terminate_message *term;
594 int status;
595 int tagged = 0;
596 struct sk_buff *skb; 613 struct sk_buff *skb;
597 614
598 PDBG("%s %d\n", __FUNCTION__, __LINE__); 615 PDBG("%s %d\n", __FUNCTION__, __LINE__);
@@ -610,17 +627,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
610 627
611 /* immediate data starts here. */ 628 /* immediate data starts here. */
612 term = (struct terminate_message *)wqe->send.sgl; 629 term = (struct terminate_message *)wqe->send.sgl;
613 if (rsp_msg) { 630 build_term_codes(rsp_msg, &term->layer_etype, &term->ecode);
614 status = CQE_STATUS(rsp_msg->cqe);
615 if (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)
616 tagged = 1;
617 if ((CQE_OPCODE(rsp_msg->cqe) == T3_READ_REQ) ||
618 (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP))
619 tagged = 2;
620 } else {
621 status = TPT_ERR_INTERNAL_ERR;
622 }
623 build_term_codes(status, &term->layer_etype, &term->ecode, tagged);
624 build_fw_riwrh((void *)wqe, T3_WR_SEND, 631 build_fw_riwrh((void *)wqe, T3_WR_SEND,
625 T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 1, 632 T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 1,
626 qhp->ep->hwtid, 5); 633 qhp->ep->hwtid, 5);
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index e2cdc1a16fe9..67f0670fe3b1 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -113,7 +113,7 @@ struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
113 return ret; 113 return ret;
114} 114}
115 115
116struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, 116struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
117 struct ib_ucontext *context, 117 struct ib_ucontext *context,
118 struct ib_udata *udata) 118 struct ib_udata *udata)
119{ 119{
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index 95fd59fb4528..e14b029332c8 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -123,7 +123,7 @@ int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq);
123void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq); 123void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq);
124 124
125 125
126struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, 126struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
127 struct ib_ucontext *context, 127 struct ib_ucontext *context,
128 struct ib_udata *udata); 128 struct ib_udata *udata);
129 129
@@ -135,7 +135,7 @@ int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
135 135
136int ehca_peek_cq(struct ib_cq *cq, int wc_cnt); 136int ehca_peek_cq(struct ib_cq *cq, int wc_cnt);
137 137
138int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify); 138int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags);
139 139
140struct ib_qp *ehca_create_qp(struct ib_pd *pd, 140struct ib_qp *ehca_create_qp(struct ib_pd *pd,
141 struct ib_qp_init_attr *init_attr, 141 struct ib_qp_init_attr *init_attr,
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 50e426592d02..fe90e7454560 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -313,6 +313,7 @@ int ehca_init_device(struct ehca_shca *shca)
313 313
314 shca->ib_device.node_type = RDMA_NODE_IB_CA; 314 shca->ib_device.node_type = RDMA_NODE_IB_CA;
315 shca->ib_device.phys_port_cnt = shca->num_ports; 315 shca->ib_device.phys_port_cnt = shca->num_ports;
316 shca->ib_device.num_comp_vectors = 1;
316 shca->ib_device.dma_device = &shca->ibmebus_dev->ofdev.dev; 317 shca->ib_device.dma_device = &shca->ibmebus_dev->ofdev.dev;
317 shca->ib_device.query_device = ehca_query_device; 318 shca->ib_device.query_device = ehca_query_device;
318 shca->ib_device.query_port = ehca_query_port; 319 shca->ib_device.query_port = ehca_query_port;
@@ -375,7 +376,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
375 return -EPERM; 376 return -EPERM;
376 } 377 }
377 378
378 ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10); 379 ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10, 0);
379 if (IS_ERR(ibcq)) { 380 if (IS_ERR(ibcq)) {
380 ehca_err(&shca->ib_device, "Cannot create AQP1 CQ."); 381 ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
381 return PTR_ERR(ibcq); 382 return PTR_ERR(ibcq);
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 08d3f892d9f3..caec9dee09e1 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -634,11 +634,13 @@ poll_cq_exit0:
634 return ret; 634 return ret;
635} 635}
636 636
637int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify) 637int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
638{ 638{
639 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 639 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
640 unsigned long spl_flags;
641 int ret = 0;
640 642
641 switch (cq_notify) { 643 switch (notify_flags & IB_CQ_SOLICITED_MASK) {
642 case IB_CQ_SOLICITED: 644 case IB_CQ_SOLICITED:
643 hipz_set_cqx_n0(my_cq, 1); 645 hipz_set_cqx_n0(my_cq, 1);
644 break; 646 break;
@@ -649,5 +651,11 @@ int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify)
649 return -EINVAL; 651 return -EINVAL;
650 } 652 }
651 653
652 return 0; 654 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
655 spin_lock_irqsave(&my_cq->spinlock, spl_flags);
656 ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
657 spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
658 }
659
660 return ret;
653} 661}
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
index 8199c45768a3..57f141a36bce 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
@@ -140,6 +140,14 @@ static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
140 return cqe; 140 return cqe;
141} 141}
142 142
143static inline int ipz_qeit_is_valid(struct ipz_queue *queue)
144{
145 struct ehca_cqe *cqe = ipz_qeit_get(queue);
146 u32 cqe_flags = cqe->cqe_flags;
147
148 return cqe_flags >> 7 == (queue->toggle_state & 1);
149}
150
143/* 151/*
144 * returns and resets Queue Entry iterator 152 * returns and resets Queue Entry iterator
145 * returns address (kv) of first Queue Entry 153 * returns address (kv) of first Queue Entry
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
index ea78e6dddc90..3e9241badba0 100644
--- a/drivers/infiniband/hw/ipath/ipath_cq.c
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -204,7 +204,7 @@ static void send_complete(unsigned long data)
204 * 204 *
205 * Called by ib_create_cq() in the generic verbs code. 205 * Called by ib_create_cq() in the generic verbs code.
206 */ 206 */
207struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, 207struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
208 struct ib_ucontext *context, 208 struct ib_ucontext *context,
209 struct ib_udata *udata) 209 struct ib_udata *udata)
210{ 210{
@@ -243,33 +243,21 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
243 * See ipath_mmap() for details. 243 * See ipath_mmap() for details.
244 */ 244 */
245 if (udata && udata->outlen >= sizeof(__u64)) { 245 if (udata && udata->outlen >= sizeof(__u64)) {
246 struct ipath_mmap_info *ip;
247 __u64 offset = (__u64) wc;
248 int err; 246 int err;
247 u32 s = sizeof *wc + sizeof(struct ib_wc) * entries;
249 248
250 err = ib_copy_to_udata(udata, &offset, sizeof(offset)); 249 cq->ip = ipath_create_mmap_info(dev, s, context, wc);
251 if (err) { 250 if (!cq->ip) {
252 ret = ERR_PTR(err); 251 ret = ERR_PTR(-ENOMEM);
253 goto bail_wc; 252 goto bail_wc;
254 } 253 }
255 254
256 /* Allocate info for ipath_mmap(). */ 255 err = ib_copy_to_udata(udata, &cq->ip->offset,
257 ip = kmalloc(sizeof(*ip), GFP_KERNEL); 256 sizeof(cq->ip->offset));
258 if (!ip) { 257 if (err) {
259 ret = ERR_PTR(-ENOMEM); 258 ret = ERR_PTR(err);
260 goto bail_wc; 259 goto bail_ip;
261 } 260 }
262 cq->ip = ip;
263 ip->context = context;
264 ip->obj = wc;
265 kref_init(&ip->ref);
266 ip->mmap_cnt = 0;
267 ip->size = PAGE_ALIGN(sizeof(*wc) +
268 sizeof(struct ib_wc) * entries);
269 spin_lock_irq(&dev->pending_lock);
270 ip->next = dev->pending_mmaps;
271 dev->pending_mmaps = ip;
272 spin_unlock_irq(&dev->pending_lock);
273 } else 261 } else
274 cq->ip = NULL; 262 cq->ip = NULL;
275 263
@@ -277,12 +265,18 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
277 if (dev->n_cqs_allocated == ib_ipath_max_cqs) { 265 if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
278 spin_unlock(&dev->n_cqs_lock); 266 spin_unlock(&dev->n_cqs_lock);
279 ret = ERR_PTR(-ENOMEM); 267 ret = ERR_PTR(-ENOMEM);
280 goto bail_wc; 268 goto bail_ip;
281 } 269 }
282 270
283 dev->n_cqs_allocated++; 271 dev->n_cqs_allocated++;
284 spin_unlock(&dev->n_cqs_lock); 272 spin_unlock(&dev->n_cqs_lock);
285 273
274 if (cq->ip) {
275 spin_lock_irq(&dev->pending_lock);
276 list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
277 spin_unlock_irq(&dev->pending_lock);
278 }
279
286 /* 280 /*
287 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. 281 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
288 * The number of entries should be >= the number requested or return 282 * The number of entries should be >= the number requested or return
@@ -301,12 +295,12 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
301 295
302 goto done; 296 goto done;
303 297
298bail_ip:
299 kfree(cq->ip);
304bail_wc: 300bail_wc:
305 vfree(wc); 301 vfree(wc);
306
307bail_cq: 302bail_cq:
308 kfree(cq); 303 kfree(cq);
309
310done: 304done:
311 return ret; 305 return ret;
312} 306}
@@ -340,17 +334,18 @@ int ipath_destroy_cq(struct ib_cq *ibcq)
340/** 334/**
341 * ipath_req_notify_cq - change the notification type for a completion queue 335 * ipath_req_notify_cq - change the notification type for a completion queue
342 * @ibcq: the completion queue 336 * @ibcq: the completion queue
343 * @notify: the type of notification to request 337 * @notify_flags: the type of notification to request
344 * 338 *
345 * Returns 0 for success. 339 * Returns 0 for success.
346 * 340 *
347 * This may be called from interrupt context. Also called by 341 * This may be called from interrupt context. Also called by
348 * ib_req_notify_cq() in the generic verbs code. 342 * ib_req_notify_cq() in the generic verbs code.
349 */ 343 */
350int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) 344int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
351{ 345{
352 struct ipath_cq *cq = to_icq(ibcq); 346 struct ipath_cq *cq = to_icq(ibcq);
353 unsigned long flags; 347 unsigned long flags;
348 int ret = 0;
354 349
355 spin_lock_irqsave(&cq->lock, flags); 350 spin_lock_irqsave(&cq->lock, flags);
356 /* 351 /*
@@ -358,9 +353,15 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
358 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2). 353 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
359 */ 354 */
360 if (cq->notify != IB_CQ_NEXT_COMP) 355 if (cq->notify != IB_CQ_NEXT_COMP)
361 cq->notify = notify; 356 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
357
358 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
359 cq->queue->head != cq->queue->tail)
360 ret = 1;
361
362 spin_unlock_irqrestore(&cq->lock, flags); 362 spin_unlock_irqrestore(&cq->lock, flags);
363 return 0; 363
364 return ret;
364} 365}
365 366
366/** 367/**
@@ -443,13 +444,12 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
443 if (cq->ip) { 444 if (cq->ip) {
444 struct ipath_ibdev *dev = to_idev(ibcq->device); 445 struct ipath_ibdev *dev = to_idev(ibcq->device);
445 struct ipath_mmap_info *ip = cq->ip; 446 struct ipath_mmap_info *ip = cq->ip;
447 u32 s = sizeof *wc + sizeof(struct ib_wc) * cqe;
446 448
447 ip->obj = wc; 449 ipath_update_mmap_info(dev, ip, s, wc);
448 ip->size = PAGE_ALIGN(sizeof(*wc) +
449 sizeof(struct ib_wc) * cqe);
450 spin_lock_irq(&dev->pending_lock); 450 spin_lock_irq(&dev->pending_lock);
451 ip->next = dev->pending_mmaps; 451 if (list_empty(&ip->pending_mmaps))
452 dev->pending_mmaps = ip; 452 list_add(&ip->pending_mmaps, &dev->pending_mmaps);
453 spin_unlock_irq(&dev->pending_lock); 453 spin_unlock_irq(&dev->pending_lock);
454 } 454 }
455 455
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index ed55979bfd34..036ed1ef1796 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -38,7 +38,6 @@
38#include <linux/pagemap.h> 38#include <linux/pagemap.h>
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/namei.h> 40#include <linux/namei.h>
41#include <linux/pci.h>
42 41
43#include "ipath_kernel.h" 42#include "ipath_kernel.h"
44 43
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c
index e46aa4ed2a7e..05a1d2b01d9d 100644
--- a/drivers/infiniband/hw/ipath/ipath_layer.c
+++ b/drivers/infiniband/hw/ipath/ipath_layer.c
@@ -37,7 +37,6 @@
37 */ 37 */
38 38
39#include <linux/io.h> 39#include <linux/io.h>
40#include <linux/pci.h>
41#include <asm/byteorder.h> 40#include <asm/byteorder.h>
42 41
43#include "ipath_kernel.h" 42#include "ipath_kernel.h"
diff --git a/drivers/infiniband/hw/ipath/ipath_mmap.c b/drivers/infiniband/hw/ipath/ipath_mmap.c
index a82157db4689..937bc3396b53 100644
--- a/drivers/infiniband/hw/ipath/ipath_mmap.c
+++ b/drivers/infiniband/hw/ipath/ipath_mmap.c
@@ -46,6 +46,11 @@ void ipath_release_mmap_info(struct kref *ref)
46{ 46{
47 struct ipath_mmap_info *ip = 47 struct ipath_mmap_info *ip =
48 container_of(ref, struct ipath_mmap_info, ref); 48 container_of(ref, struct ipath_mmap_info, ref);
49 struct ipath_ibdev *dev = to_idev(ip->context->device);
50
51 spin_lock_irq(&dev->pending_lock);
52 list_del(&ip->pending_mmaps);
53 spin_unlock_irq(&dev->pending_lock);
49 54
50 vfree(ip->obj); 55 vfree(ip->obj);
51 kfree(ip); 56 kfree(ip);
@@ -60,14 +65,12 @@ static void ipath_vma_open(struct vm_area_struct *vma)
60 struct ipath_mmap_info *ip = vma->vm_private_data; 65 struct ipath_mmap_info *ip = vma->vm_private_data;
61 66
62 kref_get(&ip->ref); 67 kref_get(&ip->ref);
63 ip->mmap_cnt++;
64} 68}
65 69
66static void ipath_vma_close(struct vm_area_struct *vma) 70static void ipath_vma_close(struct vm_area_struct *vma)
67{ 71{
68 struct ipath_mmap_info *ip = vma->vm_private_data; 72 struct ipath_mmap_info *ip = vma->vm_private_data;
69 73
70 ip->mmap_cnt--;
71 kref_put(&ip->ref, ipath_release_mmap_info); 74 kref_put(&ip->ref, ipath_release_mmap_info);
72} 75}
73 76
@@ -87,7 +90,7 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
87 struct ipath_ibdev *dev = to_idev(context->device); 90 struct ipath_ibdev *dev = to_idev(context->device);
88 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 91 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
89 unsigned long size = vma->vm_end - vma->vm_start; 92 unsigned long size = vma->vm_end - vma->vm_start;
90 struct ipath_mmap_info *ip, **pp; 93 struct ipath_mmap_info *ip, *pp;
91 int ret = -EINVAL; 94 int ret = -EINVAL;
92 95
93 /* 96 /*
@@ -96,15 +99,16 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
96 * CQ, QP, or SRQ is soon followed by a call to mmap(). 99 * CQ, QP, or SRQ is soon followed by a call to mmap().
97 */ 100 */
98 spin_lock_irq(&dev->pending_lock); 101 spin_lock_irq(&dev->pending_lock);
99 for (pp = &dev->pending_mmaps; (ip = *pp); pp = &ip->next) { 102 list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
103 pending_mmaps) {
100 /* Only the creator is allowed to mmap the object */ 104 /* Only the creator is allowed to mmap the object */
101 if (context != ip->context || (void *) offset != ip->obj) 105 if (context != ip->context || (__u64) offset != ip->offset)
102 continue; 106 continue;
103 /* Don't allow a mmap larger than the object. */ 107 /* Don't allow a mmap larger than the object. */
104 if (size > ip->size) 108 if (size > ip->size)
105 break; 109 break;
106 110
107 *pp = ip->next; 111 list_del_init(&ip->pending_mmaps);
108 spin_unlock_irq(&dev->pending_lock); 112 spin_unlock_irq(&dev->pending_lock);
109 113
110 ret = remap_vmalloc_range(vma, ip->obj, 0); 114 ret = remap_vmalloc_range(vma, ip->obj, 0);
@@ -119,3 +123,51 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
119done: 123done:
120 return ret; 124 return ret;
121} 125}
126
127/*
128 * Allocate information for ipath_mmap
129 */
130struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
131 u32 size,
132 struct ib_ucontext *context,
133 void *obj) {
134 struct ipath_mmap_info *ip;
135
136 ip = kmalloc(sizeof *ip, GFP_KERNEL);
137 if (!ip)
138 goto bail;
139
140 size = PAGE_ALIGN(size);
141
142 spin_lock_irq(&dev->mmap_offset_lock);
143 if (dev->mmap_offset == 0)
144 dev->mmap_offset = PAGE_SIZE;
145 ip->offset = dev->mmap_offset;
146 dev->mmap_offset += size;
147 spin_unlock_irq(&dev->mmap_offset_lock);
148
149 INIT_LIST_HEAD(&ip->pending_mmaps);
150 ip->size = size;
151 ip->context = context;
152 ip->obj = obj;
153 kref_init(&ip->ref);
154
155bail:
156 return ip;
157}
158
159void ipath_update_mmap_info(struct ipath_ibdev *dev,
160 struct ipath_mmap_info *ip,
161 u32 size, void *obj) {
162 size = PAGE_ALIGN(size);
163
164 spin_lock_irq(&dev->mmap_offset_lock);
165 if (dev->mmap_offset == 0)
166 dev->mmap_offset = PAGE_SIZE;
167 ip->offset = dev->mmap_offset;
168 dev->mmap_offset += size;
169 spin_unlock_irq(&dev->mmap_offset_lock);
170
171 ip->size = size;
172 ip->obj = obj;
173}
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 16db9ac0b402..bfef08ecd342 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -844,34 +844,36 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
844 * See ipath_mmap() for details. 844 * See ipath_mmap() for details.
845 */ 845 */
846 if (udata && udata->outlen >= sizeof(__u64)) { 846 if (udata && udata->outlen >= sizeof(__u64)) {
847 struct ipath_mmap_info *ip;
848 __u64 offset = (__u64) qp->r_rq.wq;
849 int err; 847 int err;
850 848
851 err = ib_copy_to_udata(udata, &offset, sizeof(offset)); 849 if (!qp->r_rq.wq) {
852 if (err) { 850 __u64 offset = 0;
853 ret = ERR_PTR(err);
854 goto bail_rwq;
855 }
856 851
857 if (qp->r_rq.wq) { 852 err = ib_copy_to_udata(udata, &offset,
858 /* Allocate info for ipath_mmap(). */ 853 sizeof(offset));
859 ip = kmalloc(sizeof(*ip), GFP_KERNEL); 854 if (err) {
860 if (!ip) { 855 ret = ERR_PTR(err);
856 goto bail_rwq;
857 }
858 } else {
859 u32 s = sizeof(struct ipath_rwq) +
860 qp->r_rq.size * sz;
861
862 qp->ip =
863 ipath_create_mmap_info(dev, s,
864 ibpd->uobject->context,
865 qp->r_rq.wq);
866 if (!qp->ip) {
861 ret = ERR_PTR(-ENOMEM); 867 ret = ERR_PTR(-ENOMEM);
862 goto bail_rwq; 868 goto bail_rwq;
863 } 869 }
864 qp->ip = ip; 870
865 ip->context = ibpd->uobject->context; 871 err = ib_copy_to_udata(udata, &(qp->ip->offset),
866 ip->obj = qp->r_rq.wq; 872 sizeof(qp->ip->offset));
867 kref_init(&ip->ref); 873 if (err) {
868 ip->mmap_cnt = 0; 874 ret = ERR_PTR(err);
869 ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) + 875 goto bail_ip;
870 qp->r_rq.size * sz); 876 }
871 spin_lock_irq(&dev->pending_lock);
872 ip->next = dev->pending_mmaps;
873 dev->pending_mmaps = ip;
874 spin_unlock_irq(&dev->pending_lock);
875 } 877 }
876 } 878 }
877 879
@@ -885,6 +887,12 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
885 dev->n_qps_allocated++; 887 dev->n_qps_allocated++;
886 spin_unlock(&dev->n_qps_lock); 888 spin_unlock(&dev->n_qps_lock);
887 889
890 if (qp->ip) {
891 spin_lock_irq(&dev->pending_lock);
892 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
893 spin_unlock_irq(&dev->pending_lock);
894 }
895
888 ret = &qp->ibqp; 896 ret = &qp->ibqp;
889 goto bail; 897 goto bail;
890 898
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index b4b88d0b53f5..1915771fd038 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -98,13 +98,21 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
98 case OP(RDMA_READ_RESPONSE_LAST): 98 case OP(RDMA_READ_RESPONSE_LAST):
99 case OP(RDMA_READ_RESPONSE_ONLY): 99 case OP(RDMA_READ_RESPONSE_ONLY):
100 case OP(ATOMIC_ACKNOWLEDGE): 100 case OP(ATOMIC_ACKNOWLEDGE):
101 qp->s_ack_state = OP(ACKNOWLEDGE); 101 /*
102 * We can increment the tail pointer now that the last
103 * response has been sent instead of only being
104 * constructed.
105 */
106 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
107 qp->s_tail_ack_queue = 0;
102 /* FALLTHROUGH */ 108 /* FALLTHROUGH */
109 case OP(SEND_ONLY):
103 case OP(ACKNOWLEDGE): 110 case OP(ACKNOWLEDGE):
104 /* Check for no next entry in the queue. */ 111 /* Check for no next entry in the queue. */
105 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { 112 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
106 if (qp->s_flags & IPATH_S_ACK_PENDING) 113 if (qp->s_flags & IPATH_S_ACK_PENDING)
107 goto normal; 114 goto normal;
115 qp->s_ack_state = OP(ACKNOWLEDGE);
108 goto bail; 116 goto bail;
109 } 117 }
110 118
@@ -117,12 +125,8 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
117 if (len > pmtu) { 125 if (len > pmtu) {
118 len = pmtu; 126 len = pmtu;
119 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); 127 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
120 } else { 128 } else
121 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); 129 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
122 if (++qp->s_tail_ack_queue >
123 IPATH_MAX_RDMA_ATOMIC)
124 qp->s_tail_ack_queue = 0;
125 }
126 ohdr->u.aeth = ipath_compute_aeth(qp); 130 ohdr->u.aeth = ipath_compute_aeth(qp);
127 hwords++; 131 hwords++;
128 qp->s_ack_rdma_psn = e->psn; 132 qp->s_ack_rdma_psn = e->psn;
@@ -139,8 +143,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
139 cpu_to_be32(e->atomic_data); 143 cpu_to_be32(e->atomic_data);
140 hwords += sizeof(ohdr->u.at) / sizeof(u32); 144 hwords += sizeof(ohdr->u.at) / sizeof(u32);
141 bth2 = e->psn; 145 bth2 = e->psn;
142 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
143 qp->s_tail_ack_queue = 0;
144 } 146 }
145 bth0 = qp->s_ack_state << 24; 147 bth0 = qp->s_ack_state << 24;
146 break; 148 break;
@@ -156,8 +158,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
156 ohdr->u.aeth = ipath_compute_aeth(qp); 158 ohdr->u.aeth = ipath_compute_aeth(qp);
157 hwords++; 159 hwords++;
158 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); 160 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
159 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
160 qp->s_tail_ack_queue = 0;
161 } 161 }
162 bth0 = qp->s_ack_state << 24; 162 bth0 = qp->s_ack_state << 24;
163 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK; 163 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
@@ -171,7 +171,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
171 * the ACK before setting s_ack_state to ACKNOWLEDGE 171 * the ACK before setting s_ack_state to ACKNOWLEDGE
172 * (see above). 172 * (see above).
173 */ 173 */
174 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); 174 qp->s_ack_state = OP(SEND_ONLY);
175 qp->s_flags &= ~IPATH_S_ACK_PENDING; 175 qp->s_flags &= ~IPATH_S_ACK_PENDING;
176 qp->s_cur_sge = NULL; 176 qp->s_cur_sge = NULL;
177 if (qp->s_nak_state) 177 if (qp->s_nak_state)
@@ -223,23 +223,18 @@ int ipath_make_rc_req(struct ipath_qp *qp,
223 /* Sending responses has higher priority over sending requests. */ 223 /* Sending responses has higher priority over sending requests. */
224 if ((qp->r_head_ack_queue != qp->s_tail_ack_queue || 224 if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
225 (qp->s_flags & IPATH_S_ACK_PENDING) || 225 (qp->s_flags & IPATH_S_ACK_PENDING) ||
226 qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE) && 226 qp->s_ack_state != OP(ACKNOWLEDGE)) &&
227 ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p)) 227 ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p))
228 goto done; 228 goto done;
229 229
230 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) || 230 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
231 qp->s_rnr_timeout) 231 qp->s_rnr_timeout || qp->s_wait_credit)
232 goto bail; 232 goto bail;
233 233
234 /* Limit the number of packets sent without an ACK. */ 234 /* Limit the number of packets sent without an ACK. */
235 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) { 235 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) {
236 qp->s_wait_credit = 1; 236 qp->s_wait_credit = 1;
237 dev->n_rc_stalls++; 237 dev->n_rc_stalls++;
238 spin_lock(&dev->pending_lock);
239 if (list_empty(&qp->timerwait))
240 list_add_tail(&qp->timerwait,
241 &dev->pending[dev->pending_index]);
242 spin_unlock(&dev->pending_lock);
243 goto bail; 238 goto bail;
244 } 239 }
245 240
@@ -587,9 +582,12 @@ static void send_rc_ack(struct ipath_qp *qp)
587 u32 hwords; 582 u32 hwords;
588 struct ipath_ib_header hdr; 583 struct ipath_ib_header hdr;
589 struct ipath_other_headers *ohdr; 584 struct ipath_other_headers *ohdr;
585 unsigned long flags;
590 586
591 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ 587 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
592 if (qp->r_head_ack_queue != qp->s_tail_ack_queue) 588 if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
589 (qp->s_flags & IPATH_S_ACK_PENDING) ||
590 qp->s_ack_state != OP(ACKNOWLEDGE))
593 goto queue_ack; 591 goto queue_ack;
594 592
595 /* Construct the header. */ 593 /* Construct the header. */
@@ -640,11 +638,11 @@ static void send_rc_ack(struct ipath_qp *qp)
640 dev->n_rc_qacks++; 638 dev->n_rc_qacks++;
641 639
642queue_ack: 640queue_ack:
643 spin_lock_irq(&qp->s_lock); 641 spin_lock_irqsave(&qp->s_lock, flags);
644 qp->s_flags |= IPATH_S_ACK_PENDING; 642 qp->s_flags |= IPATH_S_ACK_PENDING;
645 qp->s_nak_state = qp->r_nak_state; 643 qp->s_nak_state = qp->r_nak_state;
646 qp->s_ack_psn = qp->r_ack_psn; 644 qp->s_ack_psn = qp->r_ack_psn;
647 spin_unlock_irq(&qp->s_lock); 645 spin_unlock_irqrestore(&qp->s_lock, flags);
648 646
649 /* Call ipath_do_rc_send() in another thread. */ 647 /* Call ipath_do_rc_send() in another thread. */
650 tasklet_hi_schedule(&qp->s_task); 648 tasklet_hi_schedule(&qp->s_task);
@@ -1261,6 +1259,7 @@ ack_err:
1261 wc.dlid_path_bits = 0; 1259 wc.dlid_path_bits = 0;
1262 wc.port_num = 0; 1260 wc.port_num = 0;
1263 ipath_sqerror_qp(qp, &wc); 1261 ipath_sqerror_qp(qp, &wc);
1262 spin_unlock_irqrestore(&qp->s_lock, flags);
1264bail: 1263bail:
1265 return; 1264 return;
1266} 1265}
@@ -1294,6 +1293,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1294 struct ipath_ack_entry *e; 1293 struct ipath_ack_entry *e;
1295 u8 i, prev; 1294 u8 i, prev;
1296 int old_req; 1295 int old_req;
1296 unsigned long flags;
1297 1297
1298 if (diff > 0) { 1298 if (diff > 0) {
1299 /* 1299 /*
@@ -1327,7 +1327,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1327 psn &= IPATH_PSN_MASK; 1327 psn &= IPATH_PSN_MASK;
1328 e = NULL; 1328 e = NULL;
1329 old_req = 1; 1329 old_req = 1;
1330 spin_lock_irq(&qp->s_lock); 1330 spin_lock_irqsave(&qp->s_lock, flags);
1331 for (i = qp->r_head_ack_queue; ; i = prev) { 1331 for (i = qp->r_head_ack_queue; ; i = prev) {
1332 if (i == qp->s_tail_ack_queue) 1332 if (i == qp->s_tail_ack_queue)
1333 old_req = 0; 1333 old_req = 0;
@@ -1425,7 +1425,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1425 * after all the previous RDMA reads and atomics. 1425 * after all the previous RDMA reads and atomics.
1426 */ 1426 */
1427 if (i == qp->r_head_ack_queue) { 1427 if (i == qp->r_head_ack_queue) {
1428 spin_unlock_irq(&qp->s_lock); 1428 spin_unlock_irqrestore(&qp->s_lock, flags);
1429 qp->r_nak_state = 0; 1429 qp->r_nak_state = 0;
1430 qp->r_ack_psn = qp->r_psn - 1; 1430 qp->r_ack_psn = qp->r_psn - 1;
1431 goto send_ack; 1431 goto send_ack;
@@ -1439,11 +1439,10 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1439 break; 1439 break;
1440 } 1440 }
1441 qp->r_nak_state = 0; 1441 qp->r_nak_state = 0;
1442 spin_unlock_irq(&qp->s_lock);
1443 tasklet_hi_schedule(&qp->s_task); 1442 tasklet_hi_schedule(&qp->s_task);
1444 1443
1445unlock_done: 1444unlock_done:
1446 spin_unlock_irq(&qp->s_lock); 1445 spin_unlock_irqrestore(&qp->s_lock, flags);
1447done: 1446done:
1448 return 1; 1447 return 1;
1449 1448
@@ -1453,10 +1452,12 @@ send_ack:
1453 1452
1454static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) 1453static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
1455{ 1454{
1456 spin_lock_irq(&qp->s_lock); 1455 unsigned long flags;
1456
1457 spin_lock_irqsave(&qp->s_lock, flags);
1457 qp->state = IB_QPS_ERR; 1458 qp->state = IB_QPS_ERR;
1458 ipath_error_qp(qp, err); 1459 ipath_error_qp(qp, err);
1459 spin_unlock_irq(&qp->s_lock); 1460 spin_unlock_irqrestore(&qp->s_lock, flags);
1460} 1461}
1461 1462
1462/** 1463/**
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
index 94033503400c..03acae66ba81 100644
--- a/drivers/infiniband/hw/ipath/ipath_srq.c
+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
@@ -139,33 +139,24 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
139 * See ipath_mmap() for details. 139 * See ipath_mmap() for details.
140 */ 140 */
141 if (udata && udata->outlen >= sizeof(__u64)) { 141 if (udata && udata->outlen >= sizeof(__u64)) {
142 struct ipath_mmap_info *ip;
143 __u64 offset = (__u64) srq->rq.wq;
144 int err; 142 int err;
143 u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz;
145 144
146 err = ib_copy_to_udata(udata, &offset, sizeof(offset)); 145 srq->ip =
147 if (err) { 146 ipath_create_mmap_info(dev, s,
148 ret = ERR_PTR(err); 147 ibpd->uobject->context,
148 srq->rq.wq);
149 if (!srq->ip) {
150 ret = ERR_PTR(-ENOMEM);
149 goto bail_wq; 151 goto bail_wq;
150 } 152 }
151 153
152 /* Allocate info for ipath_mmap(). */ 154 err = ib_copy_to_udata(udata, &srq->ip->offset,
153 ip = kmalloc(sizeof(*ip), GFP_KERNEL); 155 sizeof(srq->ip->offset));
154 if (!ip) { 156 if (err) {
155 ret = ERR_PTR(-ENOMEM); 157 ret = ERR_PTR(err);
156 goto bail_wq; 158 goto bail_ip;
157 } 159 }
158 srq->ip = ip;
159 ip->context = ibpd->uobject->context;
160 ip->obj = srq->rq.wq;
161 kref_init(&ip->ref);
162 ip->mmap_cnt = 0;
163 ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
164 srq->rq.size * sz);
165 spin_lock_irq(&dev->pending_lock);
166 ip->next = dev->pending_mmaps;
167 dev->pending_mmaps = ip;
168 spin_unlock_irq(&dev->pending_lock);
169 } else 160 } else
170 srq->ip = NULL; 161 srq->ip = NULL;
171 162
@@ -181,21 +172,27 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
181 if (dev->n_srqs_allocated == ib_ipath_max_srqs) { 172 if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
182 spin_unlock(&dev->n_srqs_lock); 173 spin_unlock(&dev->n_srqs_lock);
183 ret = ERR_PTR(-ENOMEM); 174 ret = ERR_PTR(-ENOMEM);
184 goto bail_wq; 175 goto bail_ip;
185 } 176 }
186 177
187 dev->n_srqs_allocated++; 178 dev->n_srqs_allocated++;
188 spin_unlock(&dev->n_srqs_lock); 179 spin_unlock(&dev->n_srqs_lock);
189 180
181 if (srq->ip) {
182 spin_lock_irq(&dev->pending_lock);
183 list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
184 spin_unlock_irq(&dev->pending_lock);
185 }
186
190 ret = &srq->ibsrq; 187 ret = &srq->ibsrq;
191 goto done; 188 goto done;
192 189
190bail_ip:
191 kfree(srq->ip);
193bail_wq: 192bail_wq:
194 vfree(srq->rq.wq); 193 vfree(srq->rq.wq);
195
196bail_srq: 194bail_srq:
197 kfree(srq); 195 kfree(srq);
198
199done: 196done:
200 return ret; 197 return ret;
201} 198}
@@ -312,13 +309,13 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
312 if (srq->ip) { 309 if (srq->ip) {
313 struct ipath_mmap_info *ip = srq->ip; 310 struct ipath_mmap_info *ip = srq->ip;
314 struct ipath_ibdev *dev = to_idev(srq->ibsrq.device); 311 struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
312 u32 s = sizeof(struct ipath_rwq) + size * sz;
315 313
316 ip->obj = wq; 314 ipath_update_mmap_info(dev, ip, s, wq);
317 ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
318 size * sz);
319 spin_lock_irq(&dev->pending_lock); 315 spin_lock_irq(&dev->pending_lock);
320 ip->next = dev->pending_mmaps; 316 if (list_empty(&ip->pending_mmaps))
321 dev->pending_mmaps = ip; 317 list_add(&ip->pending_mmaps,
318 &dev->pending_mmaps);
322 spin_unlock_irq(&dev->pending_lock); 319 spin_unlock_irq(&dev->pending_lock);
323 } 320 }
324 } else if (attr_mask & IB_SRQ_LIMIT) { 321 } else if (attr_mask & IB_SRQ_LIMIT) {
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c
index 9307f7187ca5..d8b5e4cefe25 100644
--- a/drivers/infiniband/hw/ipath/ipath_stats.c
+++ b/drivers/infiniband/hw/ipath/ipath_stats.c
@@ -31,8 +31,6 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/pci.h>
35
36#include "ipath_kernel.h" 34#include "ipath_kernel.h"
37 35
38struct infinipath_stats ipath_stats; 36struct infinipath_stats ipath_stats;
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index ffa6318ad0cc..4dc398d5e011 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -32,7 +32,6 @@
32 */ 32 */
33 33
34#include <linux/ctype.h> 34#include <linux/ctype.h>
35#include <linux/pci.h>
36 35
37#include "ipath_kernel.h" 36#include "ipath_kernel.h"
38#include "ipath_common.h" 37#include "ipath_common.h"
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 18c6df2052c2..12933e77c7e9 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1476,7 +1476,10 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1476 ret = -ENOMEM; 1476 ret = -ENOMEM;
1477 goto err_lk; 1477 goto err_lk;
1478 } 1478 }
1479 INIT_LIST_HEAD(&idev->pending_mmaps);
1479 spin_lock_init(&idev->pending_lock); 1480 spin_lock_init(&idev->pending_lock);
1481 idev->mmap_offset = PAGE_SIZE;
1482 spin_lock_init(&idev->mmap_offset_lock);
1480 INIT_LIST_HEAD(&idev->pending[0]); 1483 INIT_LIST_HEAD(&idev->pending[0]);
1481 INIT_LIST_HEAD(&idev->pending[1]); 1484 INIT_LIST_HEAD(&idev->pending[1]);
1482 INIT_LIST_HEAD(&idev->pending[2]); 1485 INIT_LIST_HEAD(&idev->pending[2]);
@@ -1558,6 +1561,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1558 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); 1561 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
1559 dev->node_type = RDMA_NODE_IB_CA; 1562 dev->node_type = RDMA_NODE_IB_CA;
1560 dev->phys_port_cnt = 1; 1563 dev->phys_port_cnt = 1;
1564 dev->num_comp_vectors = 1;
1561 dev->dma_device = &dd->pcidev->dev; 1565 dev->dma_device = &dd->pcidev->dev;
1562 dev->query_device = ipath_query_device; 1566 dev->query_device = ipath_query_device;
1563 dev->modify_device = ipath_modify_device; 1567 dev->modify_device = ipath_modify_device;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 7c4929f1cb5b..7064fc222727 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -173,12 +173,12 @@ struct ipath_ah {
173 * this as its vm_private_data. 173 * this as its vm_private_data.
174 */ 174 */
175struct ipath_mmap_info { 175struct ipath_mmap_info {
176 struct ipath_mmap_info *next; 176 struct list_head pending_mmaps;
177 struct ib_ucontext *context; 177 struct ib_ucontext *context;
178 void *obj; 178 void *obj;
179 __u64 offset;
179 struct kref ref; 180 struct kref ref;
180 unsigned size; 181 unsigned size;
181 unsigned mmap_cnt;
182}; 182};
183 183
184/* 184/*
@@ -422,7 +422,7 @@ struct ipath_qp {
422#define IPATH_S_RDMAR_PENDING 0x04 422#define IPATH_S_RDMAR_PENDING 0x04
423#define IPATH_S_ACK_PENDING 0x08 423#define IPATH_S_ACK_PENDING 0x08
424 424
425#define IPATH_PSN_CREDIT 2048 425#define IPATH_PSN_CREDIT 512
426 426
427/* 427/*
428 * Since struct ipath_swqe is not a fixed size, we can't simply index into 428 * Since struct ipath_swqe is not a fixed size, we can't simply index into
@@ -485,9 +485,10 @@ struct ipath_opcode_stats {
485 485
486struct ipath_ibdev { 486struct ipath_ibdev {
487 struct ib_device ibdev; 487 struct ib_device ibdev;
488 struct list_head dev_list;
489 struct ipath_devdata *dd; 488 struct ipath_devdata *dd;
490 struct ipath_mmap_info *pending_mmaps; 489 struct list_head pending_mmaps;
490 spinlock_t mmap_offset_lock;
491 u32 mmap_offset;
491 int ib_unit; /* This is the device number */ 492 int ib_unit; /* This is the device number */
492 u16 sm_lid; /* in host order */ 493 u16 sm_lid; /* in host order */
493 u8 sm_sl; 494 u8 sm_sl;
@@ -734,13 +735,13 @@ int ipath_destroy_srq(struct ib_srq *ibsrq);
734 735
735int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); 736int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
736 737
737struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, 738struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
738 struct ib_ucontext *context, 739 struct ib_ucontext *context,
739 struct ib_udata *udata); 740 struct ib_udata *udata);
740 741
741int ipath_destroy_cq(struct ib_cq *ibcq); 742int ipath_destroy_cq(struct ib_cq *ibcq);
742 743
743int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify); 744int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
744 745
745int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); 746int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
746 747
@@ -768,6 +769,15 @@ int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
768 769
769void ipath_release_mmap_info(struct kref *ref); 770void ipath_release_mmap_info(struct kref *ref);
770 771
772struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
773 u32 size,
774 struct ib_ucontext *context,
775 void *obj);
776
777void ipath_update_mmap_info(struct ipath_ibdev *dev,
778 struct ipath_mmap_info *ip,
779 u32 size, void *obj);
780
771int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 781int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
772 782
773void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev); 783void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev);
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index efd79ef109a6..cf0868f6e965 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -726,11 +726,12 @@ repoll:
726 return err == 0 || err == -EAGAIN ? npolled : err; 726 return err == 0 || err == -EAGAIN ? npolled : err;
727} 727}
728 728
729int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) 729int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags)
730{ 730{
731 __be32 doorbell[2]; 731 __be32 doorbell[2];
732 732
733 doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ? 733 doorbell[0] = cpu_to_be32(((flags & IB_CQ_SOLICITED_MASK) ==
734 IB_CQ_SOLICITED ?
734 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : 735 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :
735 MTHCA_TAVOR_CQ_DB_REQ_NOT) | 736 MTHCA_TAVOR_CQ_DB_REQ_NOT) |
736 to_mcq(cq)->cqn); 737 to_mcq(cq)->cqn);
@@ -743,7 +744,7 @@ int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)
743 return 0; 744 return 0;
744} 745}
745 746
746int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) 747int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
747{ 748{
748 struct mthca_cq *cq = to_mcq(ibcq); 749 struct mthca_cq *cq = to_mcq(ibcq);
749 __be32 doorbell[2]; 750 __be32 doorbell[2];
@@ -755,7 +756,8 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
755 756
756 doorbell[0] = ci; 757 doorbell[0] = ci;
757 doorbell[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) | 758 doorbell[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) |
758 (notify == IB_CQ_SOLICITED ? 1 : 2)); 759 ((flags & IB_CQ_SOLICITED_MASK) ==
760 IB_CQ_SOLICITED ? 1 : 2));
759 761
760 mthca_write_db_rec(doorbell, cq->arm_db); 762 mthca_write_db_rec(doorbell, cq->arm_db);
761 763
@@ -766,7 +768,7 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
766 wmb(); 768 wmb();
767 769
768 doorbell[0] = cpu_to_be32((sn << 28) | 770 doorbell[0] = cpu_to_be32((sn << 28) |
769 (notify == IB_CQ_SOLICITED ? 771 ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
770 MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL : 772 MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL :
771 MTHCA_ARBEL_CQ_DB_REQ_NOT) | 773 MTHCA_ARBEL_CQ_DB_REQ_NOT) |
772 cq->cqn); 774 cq->cqn);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index b7e42efaf43d..9bae3cc60603 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -495,8 +495,8 @@ void mthca_unmap_eq_icm(struct mthca_dev *dev);
495 495
496int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, 496int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
497 struct ib_wc *entry); 497 struct ib_wc *entry);
498int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify); 498int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
499int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify); 499int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
500int mthca_init_cq(struct mthca_dev *dev, int nent, 500int mthca_init_cq(struct mthca_dev *dev, int nent,
501 struct mthca_ucontext *ctx, u32 pdn, 501 struct mthca_ucontext *ctx, u32 pdn,
502 struct mthca_cq *cq); 502 struct mthca_cq *cq);
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h
index 594144145f45..a1ab06847b75 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.h
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.h
@@ -38,7 +38,6 @@
38#define MTHCA_MEMFREE_H 38#define MTHCA_MEMFREE_H
39 39
40#include <linux/list.h> 40#include <linux/list.h>
41#include <linux/pci.h>
42#include <linux/mutex.h> 41#include <linux/mutex.h>
43 42
44#define MTHCA_ICM_CHUNK_LEN \ 43#define MTHCA_ICM_CHUNK_LEN \
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 47e6fd46d9c2..1c05486c3c68 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -663,6 +663,7 @@ static int mthca_destroy_qp(struct ib_qp *qp)
663} 663}
664 664
665static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, 665static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
666 int comp_vector,
666 struct ib_ucontext *context, 667 struct ib_ucontext *context,
667 struct ib_udata *udata) 668 struct ib_udata *udata)
668{ 669{
@@ -1292,6 +1293,7 @@ int mthca_register_device(struct mthca_dev *dev)
1292 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST); 1293 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
1293 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 1294 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1294 dev->ib_dev.phys_port_cnt = dev->limits.num_ports; 1295 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
1296 dev->ib_dev.num_comp_vectors = 1;
1295 dev->ib_dev.dma_device = &dev->pdev->dev; 1297 dev->ib_dev.dma_device = &dev->pdev->dev;
1296 dev->ib_dev.query_device = mthca_query_device; 1298 dev->ib_dev.query_device = mthca_query_device;
1297 dev->ib_dev.query_port = mthca_query_port; 1299 dev->ib_dev.query_port = mthca_query_port;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 8fe6fee7a97a..fee60c852d14 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -701,6 +701,19 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
701 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); 701 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
702 } 702 }
703 703
704 if (ibqp->qp_type == IB_QPT_RC &&
705 cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
706 u8 sched_queue = ibqp->uobject ? 0x2 : 0x1;
707
708 if (mthca_is_memfree(dev))
709 qp_context->rlkey_arbel_sched_queue |= sched_queue;
710 else
711 qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue);
712
713 qp_param->opt_param_mask |=
714 cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE);
715 }
716
704 if (attr_mask & IB_QP_TIMEOUT) { 717 if (attr_mask & IB_QP_TIMEOUT) {
705 qp_context->pri_path.ackto = attr->timeout << 3; 718 qp_context->pri_path.ackto = attr->timeout << 3;
706 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); 719 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index fd558267d1cb..87310eeb6df0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -41,7 +41,6 @@
41#include <linux/skbuff.h> 41#include <linux/skbuff.h>
42#include <linux/netdevice.h> 42#include <linux/netdevice.h>
43#include <linux/workqueue.h> 43#include <linux/workqueue.h>
44#include <linux/pci.h>
45#include <linux/kref.h> 44#include <linux/kref.h>
46#include <linux/if_infiniband.h> 45#include <linux/if_infiniband.h>
47#include <linux/mutex.h> 46#include <linux/mutex.h>
@@ -311,6 +310,7 @@ extern struct workqueue_struct *ipoib_workqueue;
311 310
312/* functions */ 311/* functions */
313 312
313int ipoib_poll(struct net_device *dev, int *budget);
314void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr); 314void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
315 315
316struct ipoib_ah *ipoib_create_ah(struct net_device *dev, 316struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 0c4e59b906cd..785bc8505f2a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -370,7 +370,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
370 370
371 if (!likely(wr_id & IPOIB_CM_RX_UPDATE_MASK)) { 371 if (!likely(wr_id & IPOIB_CM_RX_UPDATE_MASK)) {
372 p = wc->qp->qp_context; 372 p = wc->qp->qp_context;
373 if (time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) { 373 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
374 spin_lock_irqsave(&priv->lock, flags); 374 spin_lock_irqsave(&priv->lock, flags);
375 p->jiffies = jiffies; 375 p->jiffies = jiffies;
376 /* Move this entry to list head, but do 376 /* Move this entry to list head, but do
@@ -416,7 +416,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
416 skb->dev = dev; 416 skb->dev = dev;
417 /* XXX get correct PACKET_ type here */ 417 /* XXX get correct PACKET_ type here */
418 skb->pkt_type = PACKET_HOST; 418 skb->pkt_type = PACKET_HOST;
419 netif_rx_ni(skb); 419 netif_receive_skb(skb);
420 420
421repost: 421repost:
422 if (unlikely(ipoib_cm_post_receive(dev, wr_id))) 422 if (unlikely(ipoib_cm_post_receive(dev, wr_id)))
@@ -592,7 +592,9 @@ int ipoib_cm_dev_open(struct net_device *dev)
592 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev); 592 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
593 if (IS_ERR(priv->cm.id)) { 593 if (IS_ERR(priv->cm.id)) {
594 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name); 594 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
595 return IS_ERR(priv->cm.id); 595 ret = PTR_ERR(priv->cm.id);
596 priv->cm.id = NULL;
597 return ret;
596 } 598 }
597 599
598 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), 600 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
@@ -601,6 +603,7 @@ int ipoib_cm_dev_open(struct net_device *dev)
601 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name, 603 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
602 IPOIB_CM_IETF_ID | priv->qp->qp_num); 604 IPOIB_CM_IETF_ID | priv->qp->qp_num);
603 ib_destroy_cm_id(priv->cm.id); 605 ib_destroy_cm_id(priv->cm.id);
606 priv->cm.id = NULL;
604 return ret; 607 return ret;
605 } 608 }
606 return 0; 609 return 0;
@@ -611,10 +614,11 @@ void ipoib_cm_dev_stop(struct net_device *dev)
611 struct ipoib_dev_priv *priv = netdev_priv(dev); 614 struct ipoib_dev_priv *priv = netdev_priv(dev);
612 struct ipoib_cm_rx *p; 615 struct ipoib_cm_rx *p;
613 616
614 if (!IPOIB_CM_SUPPORTED(dev->dev_addr)) 617 if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
615 return; 618 return;
616 619
617 ib_destroy_cm_id(priv->cm.id); 620 ib_destroy_cm_id(priv->cm.id);
621 priv->cm.id = NULL;
618 spin_lock_irq(&priv->lock); 622 spin_lock_irq(&priv->lock);
619 while (!list_empty(&priv->cm.passive_ids)) { 623 while (!list_empty(&priv->cm.passive_ids)) {
620 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); 624 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
@@ -789,7 +793,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
789 } 793 }
790 794
791 p->cq = ib_create_cq(priv->ca, ipoib_cm_tx_completion, NULL, p, 795 p->cq = ib_create_cq(priv->ca, ipoib_cm_tx_completion, NULL, p,
792 ipoib_sendq_size + 1); 796 ipoib_sendq_size + 1, 0);
793 if (IS_ERR(p->cq)) { 797 if (IS_ERR(p->cq)) {
794 ret = PTR_ERR(p->cq); 798 ret = PTR_ERR(p->cq);
795 ipoib_warn(priv, "failed to allocate tx cq: %d\n", ret); 799 ipoib_warn(priv, "failed to allocate tx cq: %d\n", ret);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 1bdb9101911a..68d72c6f7ffb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -226,7 +226,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
226 skb->dev = dev; 226 skb->dev = dev;
227 /* XXX get correct PACKET_ type here */ 227 /* XXX get correct PACKET_ type here */
228 skb->pkt_type = PACKET_HOST; 228 skb->pkt_type = PACKET_HOST;
229 netif_rx_ni(skb); 229 netif_receive_skb(skb);
230 } else { 230 } else {
231 ipoib_dbg_data(priv, "dropping loopback packet\n"); 231 ipoib_dbg_data(priv, "dropping loopback packet\n");
232 dev_kfree_skb_any(skb); 232 dev_kfree_skb_any(skb);
@@ -280,28 +280,63 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
280 wc->status, wr_id, wc->vendor_err); 280 wc->status, wr_id, wc->vendor_err);
281} 281}
282 282
283static void ipoib_ib_handle_wc(struct net_device *dev, struct ib_wc *wc) 283int ipoib_poll(struct net_device *dev, int *budget)
284{ 284{
285 if (wc->wr_id & IPOIB_CM_OP_SRQ) 285 struct ipoib_dev_priv *priv = netdev_priv(dev);
286 ipoib_cm_handle_rx_wc(dev, wc); 286 int max = min(*budget, dev->quota);
287 else if (wc->wr_id & IPOIB_OP_RECV) 287 int done;
288 ipoib_ib_handle_rx_wc(dev, wc); 288 int t;
289 else 289 int empty;
290 ipoib_ib_handle_tx_wc(dev, wc); 290 int n, i;
291
292 done = 0;
293 empty = 0;
294
295 while (max) {
296 t = min(IPOIB_NUM_WC, max);
297 n = ib_poll_cq(priv->cq, t, priv->ibwc);
298
299 for (i = 0; i < n; ++i) {
300 struct ib_wc *wc = priv->ibwc + i;
301
302 if (wc->wr_id & IPOIB_CM_OP_SRQ) {
303 ++done;
304 --max;
305 ipoib_cm_handle_rx_wc(dev, wc);
306 } else if (wc->wr_id & IPOIB_OP_RECV) {
307 ++done;
308 --max;
309 ipoib_ib_handle_rx_wc(dev, wc);
310 } else
311 ipoib_ib_handle_tx_wc(dev, wc);
312 }
313
314 if (n != t) {
315 empty = 1;
316 break;
317 }
318 }
319
320 dev->quota -= done;
321 *budget -= done;
322
323 if (empty) {
324 netif_rx_complete(dev);
325 if (unlikely(ib_req_notify_cq(priv->cq,
326 IB_CQ_NEXT_COMP |
327 IB_CQ_REPORT_MISSED_EVENTS)) &&
328 netif_rx_reschedule(dev, 0))
329 return 1;
330
331 return 0;
332 }
333
334 return 1;
291} 335}
292 336
293void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) 337void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
294{ 338{
295 struct net_device *dev = (struct net_device *) dev_ptr; 339 netif_rx_schedule(dev_ptr);
296 struct ipoib_dev_priv *priv = netdev_priv(dev);
297 int n, i;
298
299 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
300 do {
301 n = ib_poll_cq(cq, IPOIB_NUM_WC, priv->ibwc);
302 for (i = 0; i < n; ++i)
303 ipoib_ib_handle_wc(dev, priv->ibwc + i);
304 } while (n == IPOIB_NUM_WC);
305} 340}
306 341
307static inline int post_send(struct ipoib_dev_priv *priv, 342static inline int post_send(struct ipoib_dev_priv *priv,
@@ -514,9 +549,10 @@ int ipoib_ib_dev_stop(struct net_device *dev)
514 struct ib_qp_attr qp_attr; 549 struct ib_qp_attr qp_attr;
515 unsigned long begin; 550 unsigned long begin;
516 struct ipoib_tx_buf *tx_req; 551 struct ipoib_tx_buf *tx_req;
517 int i; 552 int i, n;
518 553
519 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 554 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
555 netif_poll_disable(dev);
520 556
521 ipoib_cm_dev_stop(dev); 557 ipoib_cm_dev_stop(dev);
522 558
@@ -568,6 +604,18 @@ int ipoib_ib_dev_stop(struct net_device *dev)
568 goto timeout; 604 goto timeout;
569 } 605 }
570 606
607 do {
608 n = ib_poll_cq(priv->cq, IPOIB_NUM_WC, priv->ibwc);
609 for (i = 0; i < n; ++i) {
610 if (priv->ibwc[i].wr_id & IPOIB_CM_OP_SRQ)
611 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
612 else if (priv->ibwc[i].wr_id & IPOIB_OP_RECV)
613 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
614 else
615 ipoib_ib_handle_tx_wc(dev, priv->ibwc + i);
616 }
617 } while (n == IPOIB_NUM_WC);
618
571 msleep(1); 619 msleep(1);
572 } 620 }
573 621
@@ -596,6 +644,9 @@ timeout:
596 msleep(1); 644 msleep(1);
597 } 645 }
598 646
647 netif_poll_enable(dev);
648 ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP);
649
599 return 0; 650 return 0;
600} 651}
601 652
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index b4c380c5a3ba..0a428f2b05c7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -948,6 +948,8 @@ static void ipoib_setup(struct net_device *dev)
948 dev->hard_header = ipoib_hard_header; 948 dev->hard_header = ipoib_hard_header;
949 dev->set_multicast_list = ipoib_set_mcast_list; 949 dev->set_multicast_list = ipoib_set_mcast_list;
950 dev->neigh_setup = ipoib_neigh_setup_dev; 950 dev->neigh_setup = ipoib_neigh_setup_dev;
951 dev->poll = ipoib_poll;
952 dev->weight = 100;
951 953
952 dev->watchdog_timeo = HZ; 954 dev->watchdog_timeo = HZ;
953 955
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 7f3ec205e35f..5c3c6a43a52b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -187,7 +187,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
187 if (!ret) 187 if (!ret)
188 size += ipoib_recvq_size; 188 size += ipoib_recvq_size;
189 189
190 priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size); 190 priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0);
191 if (IS_ERR(priv->cq)) { 191 if (IS_ERR(priv->cq)) {
192 printk(KERN_WARNING "%s: failed to create CQ\n", ca->name); 192 printk(KERN_WARNING "%s: failed to create CQ\n", ca->name);
193 goto out_free_mr; 193 goto out_free_mr;
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 278fcbccc2d9..3651072f6c1f 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -201,7 +201,7 @@ static int iser_post_receive_control(struct iscsi_conn *conn)
201 * what's common for both schemes is that the connection is not started 201 * what's common for both schemes is that the connection is not started
202 */ 202 */
203 if (conn->c_stage != ISCSI_CONN_STARTED) 203 if (conn->c_stage != ISCSI_CONN_STARTED)
204 rx_data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; 204 rx_data_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
205 else /* FIXME till user space sets conn->max_recv_dlength correctly */ 205 else /* FIXME till user space sets conn->max_recv_dlength correctly */
206 rx_data_size = 128; 206 rx_data_size = 128;
207 207
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 1fc967464a28..89d6008bb673 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -76,7 +76,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
76 iser_cq_callback, 76 iser_cq_callback,
77 iser_cq_event_callback, 77 iser_cq_event_callback,
78 (void *)device, 78 (void *)device,
79 ISER_MAX_CQ_LEN); 79 ISER_MAX_CQ_LEN, 0);
80 if (IS_ERR(device->cq)) 80 if (IS_ERR(device->cq))
81 goto cq_err; 81 goto cq_err;
82 82
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 5e8ac577f0ad..39bf057fbc43 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -197,7 +197,7 @@ static int srp_create_target_ib(struct srp_target_port *target)
197 return -ENOMEM; 197 return -ENOMEM;
198 198
199 target->cq = ib_create_cq(target->srp_host->dev->dev, srp_completion, 199 target->cq = ib_create_cq(target->srp_host->dev->dev, srp_completion,
200 NULL, target, SRP_CQ_SIZE); 200 NULL, target, SRP_CQ_SIZE, 0);
201 if (IS_ERR(target->cq)) { 201 if (IS_ERR(target->cq)) {
202 ret = PTR_ERR(target->cq); 202 ret = PTR_ERR(target->cq);
203 goto out; 203 goto out;
@@ -1468,6 +1468,25 @@ static ssize_t show_dgid(struct class_device *cdev, char *buf)
1468 be16_to_cpu(((__be16 *) target->path.dgid.raw)[7])); 1468 be16_to_cpu(((__be16 *) target->path.dgid.raw)[7]));
1469} 1469}
1470 1470
1471static ssize_t show_orig_dgid(struct class_device *cdev, char *buf)
1472{
1473 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1474
1475 if (target->state == SRP_TARGET_DEAD ||
1476 target->state == SRP_TARGET_REMOVED)
1477 return -ENODEV;
1478
1479 return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
1480 be16_to_cpu(target->orig_dgid[0]),
1481 be16_to_cpu(target->orig_dgid[1]),
1482 be16_to_cpu(target->orig_dgid[2]),
1483 be16_to_cpu(target->orig_dgid[3]),
1484 be16_to_cpu(target->orig_dgid[4]),
1485 be16_to_cpu(target->orig_dgid[5]),
1486 be16_to_cpu(target->orig_dgid[6]),
1487 be16_to_cpu(target->orig_dgid[7]));
1488}
1489
1471static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf) 1490static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf)
1472{ 1491{
1473 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1492 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
@@ -1498,6 +1517,7 @@ static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
1498static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); 1517static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
1499static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 1518static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1500static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); 1519static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
1520static CLASS_DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
1501static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); 1521static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
1502static CLASS_DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); 1522static CLASS_DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
1503static CLASS_DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); 1523static CLASS_DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
@@ -1508,6 +1528,7 @@ static struct class_device_attribute *srp_host_attrs[] = {
1508 &class_device_attr_service_id, 1528 &class_device_attr_service_id,
1509 &class_device_attr_pkey, 1529 &class_device_attr_pkey,
1510 &class_device_attr_dgid, 1530 &class_device_attr_dgid,
1531 &class_device_attr_orig_dgid,
1511 &class_device_attr_zero_req_lim, 1532 &class_device_attr_zero_req_lim,
1512 &class_device_attr_local_ib_port, 1533 &class_device_attr_local_ib_port,
1513 &class_device_attr_local_ib_device, 1534 &class_device_attr_local_ib_device,
@@ -1516,7 +1537,8 @@ static struct class_device_attribute *srp_host_attrs[] = {
1516 1537
1517static struct scsi_host_template srp_template = { 1538static struct scsi_host_template srp_template = {
1518 .module = THIS_MODULE, 1539 .module = THIS_MODULE,
1519 .name = DRV_NAME, 1540 .name = "InfiniBand SRP initiator",
1541 .proc_name = DRV_NAME,
1520 .info = srp_target_info, 1542 .info = srp_target_info,
1521 .queuecommand = srp_queuecommand, 1543 .queuecommand = srp_queuecommand,
1522 .eh_abort_handler = srp_abort, 1544 .eh_abort_handler = srp_abort,
@@ -1662,6 +1684,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
1662 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16); 1684 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
1663 } 1685 }
1664 kfree(p); 1686 kfree(p);
1687 memcpy(target->orig_dgid, target->path.dgid.raw, 16);
1665 break; 1688 break;
1666 1689
1667 case SRP_OPT_PKEY: 1690 case SRP_OPT_PKEY:
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 2f3319c719a5..1d53c7bc368f 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -129,6 +129,7 @@ struct srp_target_port {
129 unsigned int scsi_id; 129 unsigned int scsi_id;
130 130
131 struct ib_sa_path_rec path; 131 struct ib_sa_path_rec path;
132 __be16 orig_dgid[8];
132 struct ib_sa_query *path_query; 133 struct ib_sa_query *path_query;
133 int path_query_id; 134 int path_query_id;
134 135