aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-04 19:31:54 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-04 19:31:54 -0500
commitba77df570c6710c9c19b31e0e48e4bcdf31cefe8 (patch)
tree997c210135b5578982df256dd5a0140e04d972d1
parent602d4a7e2f4b843d1a67375d4d7104073495b758 (diff)
parentd09e32764176b61c4afee9fd5e7fe04713bfa56f (diff)
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
-rw-r--r--drivers/infiniband/core/agent.c3
-rw-r--r--drivers/infiniband/core/cm.c6
-rw-r--r--drivers/infiniband/core/device.c10
-rw-r--r--drivers/infiniband/core/mad.c31
-rw-r--r--drivers/infiniband/core/sysfs.c6
-rw-r--r--drivers/infiniband/core/ucm.c9
-rw-r--r--drivers/infiniband/core/user_mad.c80
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c1
-rw-r--r--drivers/infiniband/core/uverbs_main.c13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c31
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c24
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c4
-rw-r--r--include/rdma/ib_user_cm.h19
25 files changed, 178 insertions, 124 deletions
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index 0c3c6952faae..7545775d38ef 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -155,13 +155,12 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
155 int ret; 155 int ret;
156 156
157 /* Create new device info */ 157 /* Create new device info */
158 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL); 158 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
159 if (!port_priv) { 159 if (!port_priv) {
160 printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n"); 160 printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n");
161 ret = -ENOMEM; 161 ret = -ENOMEM;
162 goto error1; 162 goto error1;
163 } 163 }
164 memset(port_priv, 0, sizeof *port_priv);
165 164
166 /* Obtain send only MAD agent for SMI QP */ 165 /* Obtain send only MAD agent for SMI QP */
167 port_priv->agent[0] = ib_register_mad_agent(device, port_num, 166 port_priv->agent[0] = ib_register_mad_agent(device, port_num,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 580c3a2bb102..02110e00d145 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -544,11 +544,10 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
544 struct cm_id_private *cm_id_priv; 544 struct cm_id_private *cm_id_priv;
545 int ret; 545 int ret;
546 546
547 cm_id_priv = kmalloc(sizeof *cm_id_priv, GFP_KERNEL); 547 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
548 if (!cm_id_priv) 548 if (!cm_id_priv)
549 return ERR_PTR(-ENOMEM); 549 return ERR_PTR(-ENOMEM);
550 550
551 memset(cm_id_priv, 0, sizeof *cm_id_priv);
552 cm_id_priv->id.state = IB_CM_IDLE; 551 cm_id_priv->id.state = IB_CM_IDLE;
553 cm_id_priv->id.device = device; 552 cm_id_priv->id.device = device;
554 cm_id_priv->id.cm_handler = cm_handler; 553 cm_id_priv->id.cm_handler = cm_handler;
@@ -621,10 +620,9 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
621{ 620{
622 struct cm_timewait_info *timewait_info; 621 struct cm_timewait_info *timewait_info;
623 622
624 timewait_info = kmalloc(sizeof *timewait_info, GFP_KERNEL); 623 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
625 if (!timewait_info) 624 if (!timewait_info)
626 return ERR_PTR(-ENOMEM); 625 return ERR_PTR(-ENOMEM);
627 memset(timewait_info, 0, sizeof *timewait_info);
628 626
629 timewait_info->work.local_id = local_id; 627 timewait_info->work.local_id = local_id;
630 INIT_WORK(&timewait_info->work.work, cm_work_handler, 628 INIT_WORK(&timewait_info->work.work, cm_work_handler,
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 5a6e44976405..e169e798354b 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -161,17 +161,9 @@ static int alloc_name(char *name)
161 */ 161 */
162struct ib_device *ib_alloc_device(size_t size) 162struct ib_device *ib_alloc_device(size_t size)
163{ 163{
164 void *dev;
165
166 BUG_ON(size < sizeof (struct ib_device)); 164 BUG_ON(size < sizeof (struct ib_device));
167 165
168 dev = kmalloc(size, GFP_KERNEL); 166 return kzalloc(size, GFP_KERNEL);
169 if (!dev)
170 return NULL;
171
172 memset(dev, 0, size);
173
174 return dev;
175} 167}
176EXPORT_SYMBOL(ib_alloc_device); 168EXPORT_SYMBOL(ib_alloc_device);
177 169
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 88f9f8c9eacc..3d8175e5f054 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -255,12 +255,11 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
255 } 255 }
256 256
257 /* Allocate structures */ 257 /* Allocate structures */
258 mad_agent_priv = kmalloc(sizeof *mad_agent_priv, GFP_KERNEL); 258 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
259 if (!mad_agent_priv) { 259 if (!mad_agent_priv) {
260 ret = ERR_PTR(-ENOMEM); 260 ret = ERR_PTR(-ENOMEM);
261 goto error1; 261 goto error1;
262 } 262 }
263 memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
264 263
265 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, 264 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
266 IB_ACCESS_LOCAL_WRITE); 265 IB_ACCESS_LOCAL_WRITE);
@@ -448,14 +447,13 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
448 goto error1; 447 goto error1;
449 } 448 }
450 /* Allocate structures */ 449 /* Allocate structures */
451 mad_snoop_priv = kmalloc(sizeof *mad_snoop_priv, GFP_KERNEL); 450 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
452 if (!mad_snoop_priv) { 451 if (!mad_snoop_priv) {
453 ret = ERR_PTR(-ENOMEM); 452 ret = ERR_PTR(-ENOMEM);
454 goto error1; 453 goto error1;
455 } 454 }
456 455
457 /* Now, fill in the various structures */ 456 /* Now, fill in the various structures */
458 memset(mad_snoop_priv, 0, sizeof *mad_snoop_priv);
459 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; 457 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
460 mad_snoop_priv->agent.device = device; 458 mad_snoop_priv->agent.device = device;
461 mad_snoop_priv->agent.recv_handler = recv_handler; 459 mad_snoop_priv->agent.recv_handler = recv_handler;
@@ -794,10 +792,9 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
794 (!rmpp_active && buf_size > sizeof(struct ib_mad))) 792 (!rmpp_active && buf_size > sizeof(struct ib_mad)))
795 return ERR_PTR(-EINVAL); 793 return ERR_PTR(-EINVAL);
796 794
797 buf = kmalloc(sizeof *mad_send_wr + buf_size, gfp_mask); 795 buf = kzalloc(sizeof *mad_send_wr + buf_size, gfp_mask);
798 if (!buf) 796 if (!buf)
799 return ERR_PTR(-ENOMEM); 797 return ERR_PTR(-ENOMEM);
800 memset(buf, 0, sizeof *mad_send_wr + buf_size);
801 798
802 mad_send_wr = buf + buf_size; 799 mad_send_wr = buf + buf_size;
803 mad_send_wr->send_buf.mad = buf; 800 mad_send_wr->send_buf.mad = buf;
@@ -1039,14 +1036,12 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method,
1039static int allocate_method_table(struct ib_mad_mgmt_method_table **method) 1036static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1040{ 1037{
1041 /* Allocate management method table */ 1038 /* Allocate management method table */
1042 *method = kmalloc(sizeof **method, GFP_ATOMIC); 1039 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1043 if (!*method) { 1040 if (!*method) {
1044 printk(KERN_ERR PFX "No memory for " 1041 printk(KERN_ERR PFX "No memory for "
1045 "ib_mad_mgmt_method_table\n"); 1042 "ib_mad_mgmt_method_table\n");
1046 return -ENOMEM; 1043 return -ENOMEM;
1047 } 1044 }
1048 /* Clear management method table */
1049 memset(*method, 0, sizeof **method);
1050 1045
1051 return 0; 1046 return 0;
1052} 1047}
@@ -1137,15 +1132,14 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1137 class = &port_priv->version[mad_reg_req->mgmt_class_version].class; 1132 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1138 if (!*class) { 1133 if (!*class) {
1139 /* Allocate management class table for "new" class version */ 1134 /* Allocate management class table for "new" class version */
1140 *class = kmalloc(sizeof **class, GFP_ATOMIC); 1135 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1141 if (!*class) { 1136 if (!*class) {
1142 printk(KERN_ERR PFX "No memory for " 1137 printk(KERN_ERR PFX "No memory for "
1143 "ib_mad_mgmt_class_table\n"); 1138 "ib_mad_mgmt_class_table\n");
1144 ret = -ENOMEM; 1139 ret = -ENOMEM;
1145 goto error1; 1140 goto error1;
1146 } 1141 }
1147 /* Clear management class table */ 1142
1148 memset(*class, 0, sizeof(**class));
1149 /* Allocate method table for this management class */ 1143 /* Allocate method table for this management class */
1150 method = &(*class)->method_table[mgmt_class]; 1144 method = &(*class)->method_table[mgmt_class];
1151 if ((ret = allocate_method_table(method))) 1145 if ((ret = allocate_method_table(method)))
@@ -1209,25 +1203,24 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1209 mad_reg_req->mgmt_class_version].vendor; 1203 mad_reg_req->mgmt_class_version].vendor;
1210 if (!*vendor_table) { 1204 if (!*vendor_table) {
1211 /* Allocate mgmt vendor class table for "new" class version */ 1205 /* Allocate mgmt vendor class table for "new" class version */
1212 vendor = kmalloc(sizeof *vendor, GFP_ATOMIC); 1206 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1213 if (!vendor) { 1207 if (!vendor) {
1214 printk(KERN_ERR PFX "No memory for " 1208 printk(KERN_ERR PFX "No memory for "
1215 "ib_mad_mgmt_vendor_class_table\n"); 1209 "ib_mad_mgmt_vendor_class_table\n");
1216 goto error1; 1210 goto error1;
1217 } 1211 }
1218 /* Clear management vendor class table */ 1212
1219 memset(vendor, 0, sizeof(*vendor));
1220 *vendor_table = vendor; 1213 *vendor_table = vendor;
1221 } 1214 }
1222 if (!(*vendor_table)->vendor_class[vclass]) { 1215 if (!(*vendor_table)->vendor_class[vclass]) {
1223 /* Allocate table for this management vendor class */ 1216 /* Allocate table for this management vendor class */
1224 vendor_class = kmalloc(sizeof *vendor_class, GFP_ATOMIC); 1217 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1225 if (!vendor_class) { 1218 if (!vendor_class) {
1226 printk(KERN_ERR PFX "No memory for " 1219 printk(KERN_ERR PFX "No memory for "
1227 "ib_mad_mgmt_vendor_class\n"); 1220 "ib_mad_mgmt_vendor_class\n");
1228 goto error2; 1221 goto error2;
1229 } 1222 }
1230 memset(vendor_class, 0, sizeof(*vendor_class)); 1223
1231 (*vendor_table)->vendor_class[vclass] = vendor_class; 1224 (*vendor_table)->vendor_class[vclass] = vendor_class;
1232 } 1225 }
1233 for (i = 0; i < MAX_MGMT_OUI; i++) { 1226 for (i = 0; i < MAX_MGMT_OUI; i++) {
@@ -2524,12 +2517,12 @@ static int ib_mad_port_open(struct ib_device *device,
2524 char name[sizeof "ib_mad123"]; 2517 char name[sizeof "ib_mad123"];
2525 2518
2526 /* Create new device info */ 2519 /* Create new device info */
2527 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL); 2520 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2528 if (!port_priv) { 2521 if (!port_priv) {
2529 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n"); 2522 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2530 return -ENOMEM; 2523 return -ENOMEM;
2531 } 2524 }
2532 memset(port_priv, 0, sizeof *port_priv); 2525
2533 port_priv->device = device; 2526 port_priv->device = device;
2534 port_priv->port_num = port_num; 2527 port_priv->port_num = port_num;
2535 spin_lock_init(&port_priv->reg_lock); 2528 spin_lock_init(&port_priv->reg_lock);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 7ce7a6c782fa..b8120650e711 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -307,14 +307,13 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
307 if (!p->ibdev->process_mad) 307 if (!p->ibdev->process_mad)
308 return sprintf(buf, "N/A (no PMA)\n"); 308 return sprintf(buf, "N/A (no PMA)\n");
309 309
310 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); 310 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
311 out_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); 311 out_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
312 if (!in_mad || !out_mad) { 312 if (!in_mad || !out_mad) {
313 ret = -ENOMEM; 313 ret = -ENOMEM;
314 goto out; 314 goto out;
315 } 315 }
316 316
317 memset(in_mad, 0, sizeof *in_mad);
318 in_mad->mad_hdr.base_version = 1; 317 in_mad->mad_hdr.base_version = 1;
319 in_mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_PERF_MGMT; 318 in_mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_PERF_MGMT;
320 in_mad->mad_hdr.class_version = 1; 319 in_mad->mad_hdr.class_version = 1;
@@ -508,10 +507,9 @@ static int add_port(struct ib_device *device, int port_num)
508 if (ret) 507 if (ret)
509 return ret; 508 return ret;
510 509
511 p = kmalloc(sizeof *p, GFP_KERNEL); 510 p = kzalloc(sizeof *p, GFP_KERNEL);
512 if (!p) 511 if (!p)
513 return -ENOMEM; 512 return -ENOMEM;
514 memset(p, 0, sizeof *p);
515 513
516 p->ibdev = device; 514 p->ibdev = device;
517 p->port_num = port_num; 515 p->port_num = port_num;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 28477565ecba..6e15787d1de1 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -172,11 +172,10 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
172 struct ib_ucm_context *ctx; 172 struct ib_ucm_context *ctx;
173 int result; 173 int result;
174 174
175 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 175 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
176 if (!ctx) 176 if (!ctx)
177 return NULL; 177 return NULL;
178 178
179 memset(ctx, 0, sizeof *ctx);
180 atomic_set(&ctx->ref, 1); 179 atomic_set(&ctx->ref, 1);
181 init_waitqueue_head(&ctx->wait); 180 init_waitqueue_head(&ctx->wait);
182 ctx->file = file; 181 ctx->file = file;
@@ -386,11 +385,10 @@ static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
386 385
387 ctx = cm_id->context; 386 ctx = cm_id->context;
388 387
389 uevent = kmalloc(sizeof(*uevent), GFP_KERNEL); 388 uevent = kzalloc(sizeof *uevent, GFP_KERNEL);
390 if (!uevent) 389 if (!uevent)
391 goto err1; 390 goto err1;
392 391
393 memset(uevent, 0, sizeof(*uevent));
394 uevent->ctx = ctx; 392 uevent->ctx = ctx;
395 uevent->cm_id = cm_id; 393 uevent->cm_id = cm_id;
396 uevent->resp.uid = ctx->uid; 394 uevent->resp.uid = ctx->uid;
@@ -1345,11 +1343,10 @@ static void ib_ucm_add_one(struct ib_device *device)
1345 if (!device->alloc_ucontext) 1343 if (!device->alloc_ucontext)
1346 return; 1344 return;
1347 1345
1348 ucm_dev = kmalloc(sizeof *ucm_dev, GFP_KERNEL); 1346 ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
1349 if (!ucm_dev) 1347 if (!ucm_dev)
1350 return; 1348 return;
1351 1349
1352 memset(ucm_dev, 0, sizeof *ucm_dev);
1353 ucm_dev->ib_dev = device; 1350 ucm_dev->ib_dev = device;
1354 1351
1355 ucm_dev->devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES); 1352 ucm_dev->devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 97128e25f78b..aed5ca23fb22 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -94,6 +94,9 @@ struct ib_umad_port {
94 struct class_device *sm_class_dev; 94 struct class_device *sm_class_dev;
95 struct semaphore sm_sem; 95 struct semaphore sm_sem;
96 96
97 struct rw_semaphore mutex;
98 struct list_head file_list;
99
97 struct ib_device *ib_dev; 100 struct ib_device *ib_dev;
98 struct ib_umad_device *umad_dev; 101 struct ib_umad_device *umad_dev;
99 int dev_num; 102 int dev_num;
@@ -108,10 +111,10 @@ struct ib_umad_device {
108 111
109struct ib_umad_file { 112struct ib_umad_file {
110 struct ib_umad_port *port; 113 struct ib_umad_port *port;
111 spinlock_t recv_lock;
112 struct list_head recv_list; 114 struct list_head recv_list;
115 struct list_head port_list;
116 spinlock_t recv_lock;
113 wait_queue_head_t recv_wait; 117 wait_queue_head_t recv_wait;
114 struct rw_semaphore agent_mutex;
115 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; 118 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
116 struct ib_mr *mr[IB_UMAD_MAX_AGENTS]; 119 struct ib_mr *mr[IB_UMAD_MAX_AGENTS];
117}; 120};
@@ -148,7 +151,7 @@ static int queue_packet(struct ib_umad_file *file,
148{ 151{
149 int ret = 1; 152 int ret = 1;
150 153
151 down_read(&file->agent_mutex); 154 down_read(&file->port->mutex);
152 for (packet->mad.hdr.id = 0; 155 for (packet->mad.hdr.id = 0;
153 packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; 156 packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
154 packet->mad.hdr.id++) 157 packet->mad.hdr.id++)
@@ -161,7 +164,7 @@ static int queue_packet(struct ib_umad_file *file,
161 break; 164 break;
162 } 165 }
163 166
164 up_read(&file->agent_mutex); 167 up_read(&file->port->mutex);
165 168
166 return ret; 169 return ret;
167} 170}
@@ -322,7 +325,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
322 goto err; 325 goto err;
323 } 326 }
324 327
325 down_read(&file->agent_mutex); 328 down_read(&file->port->mutex);
326 329
327 agent = file->agent[packet->mad.hdr.id]; 330 agent = file->agent[packet->mad.hdr.id];
328 if (!agent) { 331 if (!agent) {
@@ -419,7 +422,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
419 if (ret) 422 if (ret)
420 goto err_msg; 423 goto err_msg;
421 424
422 up_read(&file->agent_mutex); 425 up_read(&file->port->mutex);
423 426
424 return count; 427 return count;
425 428
@@ -430,7 +433,7 @@ err_ah:
430 ib_destroy_ah(ah); 433 ib_destroy_ah(ah);
431 434
432err_up: 435err_up:
433 up_read(&file->agent_mutex); 436 up_read(&file->port->mutex);
434 437
435err: 438err:
436 kfree(packet); 439 kfree(packet);
@@ -460,7 +463,12 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg)
460 int agent_id; 463 int agent_id;
461 int ret; 464 int ret;
462 465
463 down_write(&file->agent_mutex); 466 down_write(&file->port->mutex);
467
468 if (!file->port->ib_dev) {
469 ret = -EPIPE;
470 goto out;
471 }
464 472
465 if (copy_from_user(&ureq, (void __user *) arg, sizeof ureq)) { 473 if (copy_from_user(&ureq, (void __user *) arg, sizeof ureq)) {
466 ret = -EFAULT; 474 ret = -EFAULT;
@@ -522,7 +530,7 @@ err:
522 ib_unregister_mad_agent(agent); 530 ib_unregister_mad_agent(agent);
523 531
524out: 532out:
525 up_write(&file->agent_mutex); 533 up_write(&file->port->mutex);
526 return ret; 534 return ret;
527} 535}
528 536
@@ -531,7 +539,7 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg)
531 u32 id; 539 u32 id;
532 int ret = 0; 540 int ret = 0;
533 541
534 down_write(&file->agent_mutex); 542 down_write(&file->port->mutex);
535 543
536 if (get_user(id, (u32 __user *) arg)) { 544 if (get_user(id, (u32 __user *) arg)) {
537 ret = -EFAULT; 545 ret = -EFAULT;
@@ -548,7 +556,7 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg)
548 file->agent[id] = NULL; 556 file->agent[id] = NULL;
549 557
550out: 558out:
551 up_write(&file->agent_mutex); 559 up_write(&file->port->mutex);
552 return ret; 560 return ret;
553} 561}
554 562
@@ -569,6 +577,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
569{ 577{
570 struct ib_umad_port *port; 578 struct ib_umad_port *port;
571 struct ib_umad_file *file; 579 struct ib_umad_file *file;
580 int ret = 0;
572 581
573 spin_lock(&port_lock); 582 spin_lock(&port_lock);
574 port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE]; 583 port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE];
@@ -579,21 +588,32 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
579 if (!port) 588 if (!port)
580 return -ENXIO; 589 return -ENXIO;
581 590
591 down_write(&port->mutex);
592
593 if (!port->ib_dev) {
594 ret = -ENXIO;
595 goto out;
596 }
597
582 file = kzalloc(sizeof *file, GFP_KERNEL); 598 file = kzalloc(sizeof *file, GFP_KERNEL);
583 if (!file) { 599 if (!file) {
584 kref_put(&port->umad_dev->ref, ib_umad_release_dev); 600 kref_put(&port->umad_dev->ref, ib_umad_release_dev);
585 return -ENOMEM; 601 ret = -ENOMEM;
602 goto out;
586 } 603 }
587 604
588 spin_lock_init(&file->recv_lock); 605 spin_lock_init(&file->recv_lock);
589 init_rwsem(&file->agent_mutex);
590 INIT_LIST_HEAD(&file->recv_list); 606 INIT_LIST_HEAD(&file->recv_list);
591 init_waitqueue_head(&file->recv_wait); 607 init_waitqueue_head(&file->recv_wait);
592 608
593 file->port = port; 609 file->port = port;
594 filp->private_data = file; 610 filp->private_data = file;
595 611
596 return 0; 612 list_add_tail(&file->port_list, &port->file_list);
613
614out:
615 up_write(&port->mutex);
616 return ret;
597} 617}
598 618
599static int ib_umad_close(struct inode *inode, struct file *filp) 619static int ib_umad_close(struct inode *inode, struct file *filp)
@@ -603,6 +623,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
603 struct ib_umad_packet *packet, *tmp; 623 struct ib_umad_packet *packet, *tmp;
604 int i; 624 int i;
605 625
626 down_write(&file->port->mutex);
606 for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i) 627 for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
607 if (file->agent[i]) { 628 if (file->agent[i]) {
608 ib_dereg_mr(file->mr[i]); 629 ib_dereg_mr(file->mr[i]);
@@ -612,6 +633,9 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
612 list_for_each_entry_safe(packet, tmp, &file->recv_list, list) 633 list_for_each_entry_safe(packet, tmp, &file->recv_list, list)
613 kfree(packet); 634 kfree(packet);
614 635
636 list_del(&file->port_list);
637 up_write(&file->port->mutex);
638
615 kfree(file); 639 kfree(file);
616 640
617 kref_put(&dev->ref, ib_umad_release_dev); 641 kref_put(&dev->ref, ib_umad_release_dev);
@@ -680,9 +704,13 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
680 struct ib_port_modify props = { 704 struct ib_port_modify props = {
681 .clr_port_cap_mask = IB_PORT_SM 705 .clr_port_cap_mask = IB_PORT_SM
682 }; 706 };
683 int ret; 707 int ret = 0;
708
709 down_write(&port->mutex);
710 if (port->ib_dev)
711 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
712 up_write(&port->mutex);
684 713
685 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
686 up(&port->sm_sem); 714 up(&port->sm_sem);
687 715
688 kref_put(&port->umad_dev->ref, ib_umad_release_dev); 716 kref_put(&port->umad_dev->ref, ib_umad_release_dev);
@@ -745,6 +773,8 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
745 port->ib_dev = device; 773 port->ib_dev = device;
746 port->port_num = port_num; 774 port->port_num = port_num;
747 init_MUTEX(&port->sm_sem); 775 init_MUTEX(&port->sm_sem);
776 init_rwsem(&port->mutex);
777 INIT_LIST_HEAD(&port->file_list);
748 778
749 port->dev = cdev_alloc(); 779 port->dev = cdev_alloc();
750 if (!port->dev) 780 if (!port->dev)
@@ -813,6 +843,9 @@ err_cdev:
813 843
814static void ib_umad_kill_port(struct ib_umad_port *port) 844static void ib_umad_kill_port(struct ib_umad_port *port)
815{ 845{
846 struct ib_umad_file *file;
847 int id;
848
816 class_set_devdata(port->class_dev, NULL); 849 class_set_devdata(port->class_dev, NULL);
817 class_set_devdata(port->sm_class_dev, NULL); 850 class_set_devdata(port->sm_class_dev, NULL);
818 851
@@ -826,6 +859,21 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
826 umad_port[port->dev_num] = NULL; 859 umad_port[port->dev_num] = NULL;
827 spin_unlock(&port_lock); 860 spin_unlock(&port_lock);
828 861
862 down_write(&port->mutex);
863
864 port->ib_dev = NULL;
865
866 list_for_each_entry(file, &port->file_list, port_list)
867 for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) {
868 if (!file->agent[id])
869 continue;
870 ib_dereg_mr(file->mr[id]);
871 ib_unregister_mad_agent(file->agent[id]);
872 file->agent[id] = NULL;
873 }
874
875 up_write(&port->mutex);
876
829 clear_bit(port->dev_num, dev_map); 877 clear_bit(port->dev_num, dev_map);
830} 878}
831 879
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 031cdf3c066d..ecb830127865 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -113,6 +113,7 @@ struct ib_uevent_object {
113 113
114struct ib_ucq_object { 114struct ib_ucq_object {
115 struct ib_uobject uobject; 115 struct ib_uobject uobject;
116 struct ib_uverbs_file *uverbs_file;
116 struct list_head comp_list; 117 struct list_head comp_list;
117 struct list_head async_list; 118 struct list_head async_list;
118 u32 comp_events_reported; 119 u32 comp_events_reported;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 8c89abc8c764..63a74151c60b 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -602,6 +602,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
602 602
603 uobj->uobject.user_handle = cmd.user_handle; 603 uobj->uobject.user_handle = cmd.user_handle;
604 uobj->uobject.context = file->ucontext; 604 uobj->uobject.context = file->ucontext;
605 uobj->uverbs_file = file;
605 uobj->comp_events_reported = 0; 606 uobj->comp_events_reported = 0;
606 uobj->async_events_reported = 0; 607 uobj->async_events_reported = 0;
607 INIT_LIST_HEAD(&uobj->comp_list); 608 INIT_LIST_HEAD(&uobj->comp_list);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 0eb38f479b39..de6581d7cb8d 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -442,13 +442,10 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
442 442
443void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) 443void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
444{ 444{
445 struct ib_uverbs_event_file *ev_file = context_ptr; 445 struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
446 struct ib_ucq_object *uobj; 446 struct ib_ucq_object, uobject);
447 447
448 uobj = container_of(event->element.cq->uobject, 448 ib_uverbs_async_handler(uobj->uverbs_file, uobj->uobject.user_handle,
449 struct ib_ucq_object, uobject);
450
451 ib_uverbs_async_handler(ev_file->uverbs_file, uobj->uobject.user_handle,
452 event->event, &uobj->async_list, 449 event->event, &uobj->async_list,
453 &uobj->async_events_reported); 450 &uobj->async_events_reported);
454 451
@@ -728,12 +725,10 @@ static void ib_uverbs_add_one(struct ib_device *device)
728 if (!device->alloc_ucontext) 725 if (!device->alloc_ucontext)
729 return; 726 return;
730 727
731 uverbs_dev = kmalloc(sizeof *uverbs_dev, GFP_KERNEL); 728 uverbs_dev = kzalloc(sizeof *uverbs_dev, GFP_KERNEL);
732 if (!uverbs_dev) 729 if (!uverbs_dev)
733 return; 730 return;
734 731
735 memset(uverbs_dev, 0, sizeof *uverbs_dev);
736
737 kref_init(&uverbs_dev->ref); 732 kref_init(&uverbs_dev->ref);
738 733
739 spin_lock(&map_lock); 734 spin_lock(&map_lock);
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 8600b6c3e0c2..f98e23555826 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -208,7 +208,7 @@ static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
208 } 208 }
209} 209}
210 210
211void mthca_cq_event(struct mthca_dev *dev, u32 cqn) 211void mthca_cq_completion(struct mthca_dev *dev, u32 cqn)
212{ 212{
213 struct mthca_cq *cq; 213 struct mthca_cq *cq;
214 214
@@ -224,6 +224,35 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn)
224 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 224 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
225} 225}
226 226
227void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
228 enum ib_event_type event_type)
229{
230 struct mthca_cq *cq;
231 struct ib_event event;
232
233 spin_lock(&dev->cq_table.lock);
234
235 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
236
237 if (cq)
238 atomic_inc(&cq->refcount);
239 spin_unlock(&dev->cq_table.lock);
240
241 if (!cq) {
242 mthca_warn(dev, "Async event for bogus CQ %08x\n", cqn);
243 return;
244 }
245
246 event.device = &dev->ib_dev;
247 event.event = event_type;
248 event.element.cq = &cq->ibcq;
249 if (cq->ibcq.event_handler)
250 cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
251
252 if (atomic_dec_and_test(&cq->refcount))
253 wake_up(&cq->wait);
254}
255
227void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, 256void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
228 struct mthca_srq *srq) 257 struct mthca_srq *srq)
229{ 258{
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 7e68bd4a3780..e7e5d3b4f004 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -460,7 +460,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
460 struct mthca_cq *cq); 460 struct mthca_cq *cq);
461void mthca_free_cq(struct mthca_dev *dev, 461void mthca_free_cq(struct mthca_dev *dev,
462 struct mthca_cq *cq); 462 struct mthca_cq *cq);
463void mthca_cq_event(struct mthca_dev *dev, u32 cqn); 463void mthca_cq_completion(struct mthca_dev *dev, u32 cqn);
464void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
465 enum ib_event_type event_type);
464void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, 466void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
465 struct mthca_srq *srq); 467 struct mthca_srq *srq);
466 468
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index e5a047a6dbeb..34d68e5a72d8 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -292,7 +292,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
292 case MTHCA_EVENT_TYPE_COMP: 292 case MTHCA_EVENT_TYPE_COMP:
293 disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; 293 disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
294 disarm_cq(dev, eq->eqn, disarm_cqn); 294 disarm_cq(dev, eq->eqn, disarm_cqn);
295 mthca_cq_event(dev, disarm_cqn); 295 mthca_cq_completion(dev, disarm_cqn);
296 break; 296 break;
297 297
298 case MTHCA_EVENT_TYPE_PATH_MIG: 298 case MTHCA_EVENT_TYPE_PATH_MIG:
@@ -364,6 +364,8 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
364 eqe->event.cq_err.syndrome == 1 ? 364 eqe->event.cq_err.syndrome == 1 ?
365 "overrun" : "access violation", 365 "overrun" : "access violation",
366 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); 366 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
367 mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
368 IB_EVENT_CQ_ERR);
367 break; 369 break;
368 370
369 case MTHCA_EVENT_TYPE_EQ_OVERFLOW: 371 case MTHCA_EVENT_TYPE_EQ_OVERFLOW:
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 883d1e5a79bc..45c6328e780c 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -1057,7 +1057,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
1057 goto err_cmd; 1057 goto err_cmd;
1058 1058
1059 if (mdev->fw_ver < mthca_hca_table[id->driver_data].latest_fw) { 1059 if (mdev->fw_ver < mthca_hca_table[id->driver_data].latest_fw) {
1060 mthca_warn(mdev, "HCA FW version %x.%x.%x is old (%x.%x.%x is current).\n", 1060 mthca_warn(mdev, "HCA FW version %d.%d.%d is old (%d.%d.%d is current).\n",
1061 (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, 1061 (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
1062 (int) (mdev->fw_ver & 0xffff), 1062 (int) (mdev->fw_ver & 0xffff),
1063 (int) (mthca_hca_table[id->driver_data].latest_fw >> 32), 1063 (int) (mthca_hca_table[id->driver_data].latest_fw >> 32),
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 1f97a44477f5..e995e2aa016d 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -140,13 +140,11 @@ static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
140 buddy->max_order = max_order; 140 buddy->max_order = max_order;
141 spin_lock_init(&buddy->lock); 141 spin_lock_init(&buddy->lock);
142 142
143 buddy->bits = kmalloc((buddy->max_order + 1) * sizeof (long *), 143 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
144 GFP_KERNEL); 144 GFP_KERNEL);
145 if (!buddy->bits) 145 if (!buddy->bits)
146 goto err_out; 146 goto err_out;
147 147
148 memset(buddy->bits, 0, (buddy->max_order + 1) * sizeof (long *));
149
150 for (i = 0; i <= buddy->max_order; ++i) { 148 for (i = 0; i <= buddy->max_order; ++i) {
151 s = BITS_TO_LONGS(1 << (buddy->max_order - i)); 149 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
152 buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); 150 buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index bd1338682074..08a909371b0a 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -82,12 +82,10 @@ u64 mthca_make_profile(struct mthca_dev *dev,
82 struct mthca_resource tmp; 82 struct mthca_resource tmp;
83 int i, j; 83 int i, j;
84 84
85 profile = kmalloc(MTHCA_RES_NUM * sizeof *profile, GFP_KERNEL); 85 profile = kzalloc(MTHCA_RES_NUM * sizeof *profile, GFP_KERNEL);
86 if (!profile) 86 if (!profile)
87 return -ENOMEM; 87 return -ENOMEM;
88 88
89 memset(profile, 0, MTHCA_RES_NUM * sizeof *profile);
90
91 profile[MTHCA_RES_QP].size = dev_lim->qpc_entry_sz; 89 profile[MTHCA_RES_QP].size = dev_lim->qpc_entry_sz;
92 profile[MTHCA_RES_EEC].size = dev_lim->eec_entry_sz; 90 profile[MTHCA_RES_EEC].size = dev_lim->eec_entry_sz;
93 profile[MTHCA_RES_SRQ].size = dev_lim->srq_entry_sz; 91 profile[MTHCA_RES_SRQ].size = dev_lim->srq_entry_sz;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 1b9477edbd7b..6b0166668269 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1028,7 +1028,7 @@ static ssize_t show_rev(struct class_device *cdev, char *buf)
1028static ssize_t show_fw_ver(struct class_device *cdev, char *buf) 1028static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
1029{ 1029{
1030 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev); 1030 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1031 return sprintf(buf, "%x.%x.%x\n", (int) (dev->fw_ver >> 32), 1031 return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32),
1032 (int) (dev->fw_ver >> 16) & 0xffff, 1032 (int) (dev->fw_ver >> 16) & 0xffff,
1033 (int) dev->fw_ver & 0xffff); 1033 (int) dev->fw_ver & 0xffff);
1034} 1034}
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 7c9afde5ace5..8852ea477c21 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -584,6 +584,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
584 return -EINVAL; 584 return -EINVAL;
585 } 585 }
586 586
587 if ((attr_mask & IB_QP_PKEY_INDEX) &&
588 attr->pkey_index >= dev->limits.pkey_table_len) {
589 mthca_dbg(dev, "PKey index (%u) too large. max is %d\n",
590 attr->pkey_index,dev->limits.pkey_table_len-1);
591 return -EINVAL;
592 }
593
587 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 594 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
588 if (IS_ERR(mailbox)) 595 if (IS_ERR(mailbox))
589 return PTR_ERR(mailbox); 596 return PTR_ERR(mailbox);
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 64f70aa1b3c0..292f55be8cbd 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -75,15 +75,16 @@ static void *get_wqe(struct mthca_srq *srq, int n)
75 75
76/* 76/*
77 * Return a pointer to the location within a WQE that we're using as a 77 * Return a pointer to the location within a WQE that we're using as a
78 * link when the WQE is in the free list. We use an offset of 4 78 * link when the WQE is in the free list. We use the imm field
79 * because in the Tavor case, posting a WQE may overwrite the first 79 * because in the Tavor case, posting a WQE may overwrite the next
80 * four bytes of the previous WQE. The offset avoids corrupting our 80 * segment of the previous WQE, but a receive WQE will never touch the
81 * free list if the WQE has already completed and been put on the free 81 * imm field. This avoids corrupting our free list if the previous
82 * list when we post the next WQE. 82 * WQE has already completed and been put on the free list when we
83 * post the next WQE.
83 */ 84 */
84static inline int *wqe_to_link(void *wqe) 85static inline int *wqe_to_link(void *wqe)
85{ 86{
86 return (int *) (wqe + 4); 87 return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
87} 88}
88 89
89static void mthca_tavor_init_srq_context(struct mthca_dev *dev, 90static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index c994a916a58a..0095acc0fbbe 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -235,6 +235,7 @@ static inline void ipoib_put_ah(struct ipoib_ah *ah)
235 kref_put(&ah->ref, ipoib_free_ah); 235 kref_put(&ah->ref, ipoib_free_ah);
236} 236}
237 237
238int ipoib_open(struct net_device *dev);
238int ipoib_add_pkey_attr(struct net_device *dev); 239int ipoib_add_pkey_attr(struct net_device *dev);
239 240
240void ipoib_send(struct net_device *dev, struct sk_buff *skb, 241void ipoib_send(struct net_device *dev, struct sk_buff *skb,
@@ -267,6 +268,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
267void ipoib_mcast_dev_down(struct net_device *dev); 268void ipoib_mcast_dev_down(struct net_device *dev);
268void ipoib_mcast_dev_flush(struct net_device *dev); 269void ipoib_mcast_dev_flush(struct net_device *dev);
269 270
271#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
270struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev); 272struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
271void ipoib_mcast_iter_free(struct ipoib_mcast_iter *iter); 273void ipoib_mcast_iter_free(struct ipoib_mcast_iter *iter);
272int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter); 274int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
@@ -276,6 +278,7 @@ void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
276 unsigned int *queuelen, 278 unsigned int *queuelen,
277 unsigned int *complete, 279 unsigned int *complete,
278 unsigned int *send_only); 280 unsigned int *send_only);
281#endif
279 282
280int ipoib_mcast_attach(struct net_device *dev, u16 mlid, 283int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
281 union ib_gid *mgid); 284 union ib_gid *mgid);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 192fef884e21..54ef2fea530f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -486,15 +486,16 @@ int ipoib_ib_dev_stop(struct net_device *dev)
486{ 486{
487 struct ipoib_dev_priv *priv = netdev_priv(dev); 487 struct ipoib_dev_priv *priv = netdev_priv(dev);
488 struct ib_qp_attr qp_attr; 488 struct ib_qp_attr qp_attr;
489 int attr_mask;
490 unsigned long begin; 489 unsigned long begin;
491 struct ipoib_tx_buf *tx_req; 490 struct ipoib_tx_buf *tx_req;
492 int i; 491 int i;
493 492
494 /* Kill the existing QP and allocate a new one */ 493 /*
494 * Move our QP to the error state and then reinitialize in
495 * when all work requests have completed or have been flushed.
496 */
495 qp_attr.qp_state = IB_QPS_ERR; 497 qp_attr.qp_state = IB_QPS_ERR;
496 attr_mask = IB_QP_STATE; 498 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
497 if (ib_modify_qp(priv->qp, &qp_attr, attr_mask))
498 ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); 499 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
499 500
500 /* Wait for all sends and receives to complete */ 501 /* Wait for all sends and receives to complete */
@@ -541,8 +542,7 @@ int ipoib_ib_dev_stop(struct net_device *dev)
541 542
542timeout: 543timeout:
543 qp_attr.qp_state = IB_QPS_RESET; 544 qp_attr.qp_state = IB_QPS_RESET;
544 attr_mask = IB_QP_STATE; 545 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
545 if (ib_modify_qp(priv->qp, &qp_attr, attr_mask))
546 ipoib_warn(priv, "Failed to modify QP to RESET state\n"); 546 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
547 547
548 /* Wait for all AHs to be reaped */ 548 /* Wait for all AHs to be reaped */
@@ -636,7 +636,6 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
636 * Bug #2507. This implementation will probably be removed when the P_Key 636 * Bug #2507. This implementation will probably be removed when the P_Key
637 * change async notification is available. 637 * change async notification is available.
638 */ 638 */
639int ipoib_open(struct net_device *dev);
640 639
641static void ipoib_pkey_dev_check_presence(struct net_device *dev) 640static void ipoib_pkey_dev_check_presence(struct net_device *dev)
642{ 641{
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index cd4f42328dbe..ce0296273e76 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -356,18 +356,15 @@ static struct ipoib_path *path_rec_create(struct net_device *dev,
356 struct ipoib_dev_priv *priv = netdev_priv(dev); 356 struct ipoib_dev_priv *priv = netdev_priv(dev);
357 struct ipoib_path *path; 357 struct ipoib_path *path;
358 358
359 path = kmalloc(sizeof *path, GFP_ATOMIC); 359 path = kzalloc(sizeof *path, GFP_ATOMIC);
360 if (!path) 360 if (!path)
361 return NULL; 361 return NULL;
362 362
363 path->dev = dev; 363 path->dev = dev;
364 path->pathrec.dlid = 0;
365 path->ah = NULL;
366 364
367 skb_queue_head_init(&path->queue); 365 skb_queue_head_init(&path->queue);
368 366
369 INIT_LIST_HEAD(&path->neigh_list); 367 INIT_LIST_HEAD(&path->neigh_list);
370 path->query = NULL;
371 init_completion(&path->done); 368 init_completion(&path->done);
372 369
373 memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid)); 370 memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid));
@@ -551,11 +548,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
551 struct ipoib_neigh *neigh; 548 struct ipoib_neigh *neigh;
552 unsigned long flags; 549 unsigned long flags;
553 550
554 local_irq_save(flags); 551 if (!spin_trylock_irqsave(&priv->tx_lock, flags))
555 if (!spin_trylock(&priv->tx_lock)) {
556 local_irq_restore(flags);
557 return NETDEV_TX_LOCKED; 552 return NETDEV_TX_LOCKED;
558 }
559 553
560 /* 554 /*
561 * Check if our queue is stopped. Since we have the LLTX bit 555 * Check if our queue is stopped. Since we have the LLTX bit
@@ -732,25 +726,21 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
732 726
733 /* Allocate RX/TX "rings" to hold queued skbs */ 727 /* Allocate RX/TX "rings" to hold queued skbs */
734 728
735 priv->rx_ring = kmalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf), 729 priv->rx_ring = kzalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf),
736 GFP_KERNEL); 730 GFP_KERNEL);
737 if (!priv->rx_ring) { 731 if (!priv->rx_ring) {
738 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 732 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
739 ca->name, IPOIB_RX_RING_SIZE); 733 ca->name, IPOIB_RX_RING_SIZE);
740 goto out; 734 goto out;
741 } 735 }
742 memset(priv->rx_ring, 0,
743 IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf));
744 736
745 priv->tx_ring = kmalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf), 737 priv->tx_ring = kzalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf),
746 GFP_KERNEL); 738 GFP_KERNEL);
747 if (!priv->tx_ring) { 739 if (!priv->tx_ring) {
748 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 740 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
749 ca->name, IPOIB_TX_RING_SIZE); 741 ca->name, IPOIB_TX_RING_SIZE);
750 goto out_rx_ring_cleanup; 742 goto out_rx_ring_cleanup;
751 } 743 }
752 memset(priv->tx_ring, 0,
753 IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf));
754 744
755 /* priv->tx_head & tx_tail are already 0 */ 745 /* priv->tx_head & tx_tail are already 0 */
756 746
@@ -807,10 +797,6 @@ static void ipoib_setup(struct net_device *dev)
807 797
808 dev->watchdog_timeo = HZ; 798 dev->watchdog_timeo = HZ;
809 799
810 dev->rebuild_header = NULL;
811 dev->set_mac_address = NULL;
812 dev->header_cache_update = NULL;
813
814 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 800 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
815 801
816 /* 802 /*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 36ce29836bf2..3ecf78a9493a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -135,12 +135,10 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
135{ 135{
136 struct ipoib_mcast *mcast; 136 struct ipoib_mcast *mcast;
137 137
138 mcast = kmalloc(sizeof (*mcast), can_sleep ? GFP_KERNEL : GFP_ATOMIC); 138 mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
139 if (!mcast) 139 if (!mcast)
140 return NULL; 140 return NULL;
141 141
142 memset(mcast, 0, sizeof (*mcast));
143
144 init_completion(&mcast->done); 142 init_completion(&mcast->done);
145 143
146 mcast->dev = dev; 144 mcast->dev = dev;
@@ -919,6 +917,8 @@ void ipoib_mcast_restart_task(void *dev_ptr)
919 ipoib_mcast_start_thread(dev); 917 ipoib_mcast_start_thread(dev);
920} 918}
921 919
920#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
921
922struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev) 922struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
923{ 923{
924 struct ipoib_mcast_iter *iter; 924 struct ipoib_mcast_iter *iter;
@@ -991,3 +991,5 @@ void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
991 *complete = iter->complete; 991 *complete = iter->complete;
992 *send_only = iter->send_only; 992 *send_only = iter->send_only;
993} 993}
994
995#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index b5902a7ec240..e829e10400e3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -41,7 +41,6 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
41{ 41{
42 struct ipoib_dev_priv *priv = netdev_priv(dev); 42 struct ipoib_dev_priv *priv = netdev_priv(dev);
43 struct ib_qp_attr *qp_attr; 43 struct ib_qp_attr *qp_attr;
44 int attr_mask;
45 int ret; 44 int ret;
46 u16 pkey_index; 45 u16 pkey_index;
47 46
@@ -59,8 +58,7 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
59 58
60 /* set correct QKey for QP */ 59 /* set correct QKey for QP */
61 qp_attr->qkey = priv->qkey; 60 qp_attr->qkey = priv->qkey;
62 attr_mask = IB_QP_QKEY; 61 ret = ib_modify_qp(priv->qp, qp_attr, IB_QP_QKEY);
63 ret = ib_modify_qp(priv->qp, qp_attr, attr_mask);
64 if (ret) { 62 if (ret) {
65 ipoib_warn(priv, "failed to modify QP, ret = %d\n", ret); 63 ipoib_warn(priv, "failed to modify QP, ret = %d\n", ret);
66 goto out; 64 goto out;
diff --git a/include/rdma/ib_user_cm.h b/include/rdma/ib_user_cm.h
index 3037588b8464..19be116047f6 100644
--- a/include/rdma/ib_user_cm.h
+++ b/include/rdma/ib_user_cm.h
@@ -38,7 +38,7 @@
38 38
39#include <linux/types.h> 39#include <linux/types.h>
40 40
41#define IB_USER_CM_ABI_VERSION 3 41#define IB_USER_CM_ABI_VERSION 4
42 42
43enum { 43enum {
44 IB_USER_CM_CMD_CREATE_ID, 44 IB_USER_CM_CMD_CREATE_ID,
@@ -84,6 +84,7 @@ struct ib_ucm_create_id_resp {
84struct ib_ucm_destroy_id { 84struct ib_ucm_destroy_id {
85 __u64 response; 85 __u64 response;
86 __u32 id; 86 __u32 id;
87 __u32 reserved;
87}; 88};
88 89
89struct ib_ucm_destroy_id_resp { 90struct ib_ucm_destroy_id_resp {
@@ -93,6 +94,7 @@ struct ib_ucm_destroy_id_resp {
93struct ib_ucm_attr_id { 94struct ib_ucm_attr_id {
94 __u64 response; 95 __u64 response;
95 __u32 id; 96 __u32 id;
97 __u32 reserved;
96}; 98};
97 99
98struct ib_ucm_attr_id_resp { 100struct ib_ucm_attr_id_resp {
@@ -164,6 +166,7 @@ struct ib_ucm_listen {
164 __be64 service_id; 166 __be64 service_id;
165 __be64 service_mask; 167 __be64 service_mask;
166 __u32 id; 168 __u32 id;
169 __u32 reserved;
167}; 170};
168 171
169struct ib_ucm_establish { 172struct ib_ucm_establish {
@@ -219,7 +222,7 @@ struct ib_ucm_req {
219 __u8 rnr_retry_count; 222 __u8 rnr_retry_count;
220 __u8 max_cm_retries; 223 __u8 max_cm_retries;
221 __u8 srq; 224 __u8 srq;
222 __u8 reserved[1]; 225 __u8 reserved[5];
223}; 226};
224 227
225struct ib_ucm_rep { 228struct ib_ucm_rep {
@@ -236,6 +239,7 @@ struct ib_ucm_rep {
236 __u8 flow_control; 239 __u8 flow_control;
237 __u8 rnr_retry_count; 240 __u8 rnr_retry_count;
238 __u8 srq; 241 __u8 srq;
242 __u8 reserved[4];
239}; 243};
240 244
241struct ib_ucm_info { 245struct ib_ucm_info {
@@ -245,7 +249,7 @@ struct ib_ucm_info {
245 __u64 data; 249 __u64 data;
246 __u8 info_len; 250 __u8 info_len;
247 __u8 data_len; 251 __u8 data_len;
248 __u8 reserved[2]; 252 __u8 reserved[6];
249}; 253};
250 254
251struct ib_ucm_mra { 255struct ib_ucm_mra {
@@ -273,6 +277,7 @@ struct ib_ucm_sidr_req {
273 __u16 pkey; 277 __u16 pkey;
274 __u8 len; 278 __u8 len;
275 __u8 max_cm_retries; 279 __u8 max_cm_retries;
280 __u8 reserved[4];
276}; 281};
277 282
278struct ib_ucm_sidr_rep { 283struct ib_ucm_sidr_rep {
@@ -284,7 +289,7 @@ struct ib_ucm_sidr_rep {
284 __u64 data; 289 __u64 data;
285 __u8 info_len; 290 __u8 info_len;
286 __u8 data_len; 291 __u8 data_len;
287 __u8 reserved[2]; 292 __u8 reserved[6];
288}; 293};
289/* 294/*
290 * event notification ABI structures. 295 * event notification ABI structures.
@@ -295,7 +300,7 @@ struct ib_ucm_event_get {
295 __u64 info; 300 __u64 info;
296 __u8 data_len; 301 __u8 data_len;
297 __u8 info_len; 302 __u8 info_len;
298 __u8 reserved[2]; 303 __u8 reserved[6];
299}; 304};
300 305
301struct ib_ucm_req_event_resp { 306struct ib_ucm_req_event_resp {
@@ -315,6 +320,7 @@ struct ib_ucm_req_event_resp {
315 __u8 rnr_retry_count; 320 __u8 rnr_retry_count;
316 __u8 srq; 321 __u8 srq;
317 __u8 port; 322 __u8 port;
323 __u8 reserved[7];
318}; 324};
319 325
320struct ib_ucm_rep_event_resp { 326struct ib_ucm_rep_event_resp {
@@ -329,7 +335,7 @@ struct ib_ucm_rep_event_resp {
329 __u8 flow_control; 335 __u8 flow_control;
330 __u8 rnr_retry_count; 336 __u8 rnr_retry_count;
331 __u8 srq; 337 __u8 srq;
332 __u8 reserved[1]; 338 __u8 reserved[5];
333}; 339};
334 340
335struct ib_ucm_rej_event_resp { 341struct ib_ucm_rej_event_resp {
@@ -374,6 +380,7 @@ struct ib_ucm_event_resp {
374 __u32 id; 380 __u32 id;
375 __u32 event; 381 __u32 event;
376 __u32 present; 382 __u32 present;
383 __u32 reserved;
377 union { 384 union {
378 struct ib_ucm_req_event_resp req_resp; 385 struct ib_ucm_req_event_resp req_resp;
379 struct ib_ucm_rep_event_resp rep_resp; 386 struct ib_ucm_rep_event_resp rep_resp;