aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/infiniband/core/ucma.c3
-rw-r--r--drivers/infiniband/core/umem_odp.c3
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c158
-rw-r--r--drivers/infiniband/core/uverbs_main.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h29
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_wc_ppc64.c13
-rw-r--r--drivers/infiniband/hw/ipath/ipath_wc_x86_64.c15
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c7
-rw-r--r--drivers/infiniband/hw/mlx4/main.c10
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h38
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c38
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c312
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c12
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h68
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c241
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c183
-rw-r--r--drivers/infiniband/hw/qib/qib.h16
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h4
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c198
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c52
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c20
-rw-r--r--drivers/infiniband/hw/qib/qib_mmap.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c13
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c28
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs_mcast.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c7
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c16
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c9
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c27
-rw-r--r--include/uapi/rdma/ib_user_verbs.h23
63 files changed, 1211 insertions, 559 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index d66a97dd3a12..d2357a1da410 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8450,7 +8450,7 @@ S: Maintained
8450F: drivers/scsi/sr* 8450F: drivers/scsi/sr*
8451 8451
8452SCSI RDMA PROTOCOL (SRP) INITIATOR 8452SCSI RDMA PROTOCOL (SRP) INITIATOR
8453M: Bart Van Assche <bvanassche@acm.org> 8453M: Bart Van Assche <bart.vanassche@sandisk.com>
8454L: linux-rdma@vger.kernel.org 8454L: linux-rdma@vger.kernel.org
8455S: Supported 8455S: Supported
8456W: http://www.openfabrics.org 8456W: http://www.openfabrics.org
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 56a4b7ca7ee3..45d67e9228d7 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1124,6 +1124,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
1124 if (!optlen) 1124 if (!optlen)
1125 return -EINVAL; 1125 return -EINVAL;
1126 1126
1127 memset(&sa_path, 0, sizeof(sa_path));
1128 sa_path.vlan_id = 0xffff;
1129
1127 ib_sa_unpack_path(path_data->path_rec, &sa_path); 1130 ib_sa_unpack_path(path_data->path_rec, &sa_path);
1128 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1); 1131 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
1129 if (ret) 1132 if (ret)
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 6095872549e7..8b8cc6fa0ab0 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -294,7 +294,8 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
294 if (likely(ib_umem_start(umem) != ib_umem_end(umem))) 294 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
295 rbt_ib_umem_insert(&umem->odp_data->interval_tree, 295 rbt_ib_umem_insert(&umem->odp_data->interval_tree,
296 &context->umem_tree); 296 &context->umem_tree);
297 if (likely(!atomic_read(&context->notifier_count))) 297 if (likely(!atomic_read(&context->notifier_count)) ||
298 context->odp_mrs_count == 1)
298 umem->odp_data->mn_counters_active = true; 299 umem->odp_data->mn_counters_active = true;
299 else 300 else
300 list_add(&umem->odp_data->no_private_counters, 301 list_add(&umem->odp_data->no_private_counters,
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 643c08a025a5..b716b0815644 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -258,5 +258,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
258 258
259IB_UVERBS_DECLARE_EX_CMD(create_flow); 259IB_UVERBS_DECLARE_EX_CMD(create_flow);
260IB_UVERBS_DECLARE_EX_CMD(destroy_flow); 260IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
261IB_UVERBS_DECLARE_EX_CMD(query_device);
261 262
262#endif /* UVERBS_H */ 263#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index b7943ff16ed3..a9f048990dfc 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -400,6 +400,52 @@ err:
400 return ret; 400 return ret;
401} 401}
402 402
403static void copy_query_dev_fields(struct ib_uverbs_file *file,
404 struct ib_uverbs_query_device_resp *resp,
405 struct ib_device_attr *attr)
406{
407 resp->fw_ver = attr->fw_ver;
408 resp->node_guid = file->device->ib_dev->node_guid;
409 resp->sys_image_guid = attr->sys_image_guid;
410 resp->max_mr_size = attr->max_mr_size;
411 resp->page_size_cap = attr->page_size_cap;
412 resp->vendor_id = attr->vendor_id;
413 resp->vendor_part_id = attr->vendor_part_id;
414 resp->hw_ver = attr->hw_ver;
415 resp->max_qp = attr->max_qp;
416 resp->max_qp_wr = attr->max_qp_wr;
417 resp->device_cap_flags = attr->device_cap_flags;
418 resp->max_sge = attr->max_sge;
419 resp->max_sge_rd = attr->max_sge_rd;
420 resp->max_cq = attr->max_cq;
421 resp->max_cqe = attr->max_cqe;
422 resp->max_mr = attr->max_mr;
423 resp->max_pd = attr->max_pd;
424 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
425 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
426 resp->max_res_rd_atom = attr->max_res_rd_atom;
427 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
428 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
429 resp->atomic_cap = attr->atomic_cap;
430 resp->max_ee = attr->max_ee;
431 resp->max_rdd = attr->max_rdd;
432 resp->max_mw = attr->max_mw;
433 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
434 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
435 resp->max_mcast_grp = attr->max_mcast_grp;
436 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
437 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
438 resp->max_ah = attr->max_ah;
439 resp->max_fmr = attr->max_fmr;
440 resp->max_map_per_fmr = attr->max_map_per_fmr;
441 resp->max_srq = attr->max_srq;
442 resp->max_srq_wr = attr->max_srq_wr;
443 resp->max_srq_sge = attr->max_srq_sge;
444 resp->max_pkeys = attr->max_pkeys;
445 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
446 resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt;
447}
448
403ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 449ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
404 const char __user *buf, 450 const char __user *buf,
405 int in_len, int out_len) 451 int in_len, int out_len)
@@ -420,47 +466,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
420 return ret; 466 return ret;
421 467
422 memset(&resp, 0, sizeof resp); 468 memset(&resp, 0, sizeof resp);
423 469 copy_query_dev_fields(file, &resp, &attr);
424 resp.fw_ver = attr.fw_ver;
425 resp.node_guid = file->device->ib_dev->node_guid;
426 resp.sys_image_guid = attr.sys_image_guid;
427 resp.max_mr_size = attr.max_mr_size;
428 resp.page_size_cap = attr.page_size_cap;
429 resp.vendor_id = attr.vendor_id;
430 resp.vendor_part_id = attr.vendor_part_id;
431 resp.hw_ver = attr.hw_ver;
432 resp.max_qp = attr.max_qp;
433 resp.max_qp_wr = attr.max_qp_wr;
434 resp.device_cap_flags = attr.device_cap_flags;
435 resp.max_sge = attr.max_sge;
436 resp.max_sge_rd = attr.max_sge_rd;
437 resp.max_cq = attr.max_cq;
438 resp.max_cqe = attr.max_cqe;
439 resp.max_mr = attr.max_mr;
440 resp.max_pd = attr.max_pd;
441 resp.max_qp_rd_atom = attr.max_qp_rd_atom;
442 resp.max_ee_rd_atom = attr.max_ee_rd_atom;
443 resp.max_res_rd_atom = attr.max_res_rd_atom;
444 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
445 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
446 resp.atomic_cap = attr.atomic_cap;
447 resp.max_ee = attr.max_ee;
448 resp.max_rdd = attr.max_rdd;
449 resp.max_mw = attr.max_mw;
450 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
451 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
452 resp.max_mcast_grp = attr.max_mcast_grp;
453 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
454 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
455 resp.max_ah = attr.max_ah;
456 resp.max_fmr = attr.max_fmr;
457 resp.max_map_per_fmr = attr.max_map_per_fmr;
458 resp.max_srq = attr.max_srq;
459 resp.max_srq_wr = attr.max_srq_wr;
460 resp.max_srq_sge = attr.max_srq_sge;
461 resp.max_pkeys = attr.max_pkeys;
462 resp.local_ca_ack_delay = attr.local_ca_ack_delay;
463 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
464 470
465 if (copy_to_user((void __user *) (unsigned long) cmd.response, 471 if (copy_to_user((void __user *) (unsigned long) cmd.response,
466 &resp, sizeof resp)) 472 &resp, sizeof resp))
@@ -2091,20 +2097,21 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2091 if (qp->real_qp == qp) { 2097 if (qp->real_qp == qp) {
2092 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask); 2098 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
2093 if (ret) 2099 if (ret)
2094 goto out; 2100 goto release_qp;
2095 ret = qp->device->modify_qp(qp, attr, 2101 ret = qp->device->modify_qp(qp, attr,
2096 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 2102 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
2097 } else { 2103 } else {
2098 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 2104 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
2099 } 2105 }
2100 2106
2101 put_qp_read(qp);
2102
2103 if (ret) 2107 if (ret)
2104 goto out; 2108 goto release_qp;
2105 2109
2106 ret = in_len; 2110 ret = in_len;
2107 2111
2112release_qp:
2113 put_qp_read(qp);
2114
2108out: 2115out:
2109 kfree(attr); 2116 kfree(attr);
2110 2117
@@ -3287,3 +3294,64 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3287 3294
3288 return ret ? ret : in_len; 3295 return ret ? ret : in_len;
3289} 3296}
3297
3298int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3299 struct ib_udata *ucore,
3300 struct ib_udata *uhw)
3301{
3302 struct ib_uverbs_ex_query_device_resp resp;
3303 struct ib_uverbs_ex_query_device cmd;
3304 struct ib_device_attr attr;
3305 struct ib_device *device;
3306 int err;
3307
3308 device = file->device->ib_dev;
3309 if (ucore->inlen < sizeof(cmd))
3310 return -EINVAL;
3311
3312 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3313 if (err)
3314 return err;
3315
3316 if (cmd.comp_mask)
3317 return -EINVAL;
3318
3319 if (cmd.reserved)
3320 return -EINVAL;
3321
3322 resp.response_length = offsetof(typeof(resp), odp_caps);
3323
3324 if (ucore->outlen < resp.response_length)
3325 return -ENOSPC;
3326
3327 err = device->query_device(device, &attr);
3328 if (err)
3329 return err;
3330
3331 copy_query_dev_fields(file, &resp.base, &attr);
3332 resp.comp_mask = 0;
3333
3334 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3335 goto end;
3336
3337#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3338 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3339 resp.odp_caps.per_transport_caps.rc_odp_caps =
3340 attr.odp_caps.per_transport_caps.rc_odp_caps;
3341 resp.odp_caps.per_transport_caps.uc_odp_caps =
3342 attr.odp_caps.per_transport_caps.uc_odp_caps;
3343 resp.odp_caps.per_transport_caps.ud_odp_caps =
3344 attr.odp_caps.per_transport_caps.ud_odp_caps;
3345 resp.odp_caps.reserved = 0;
3346#else
3347 memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
3348#endif
3349 resp.response_length += sizeof(resp.odp_caps);
3350
3351end:
3352 err = ib_copy_to_udata(ucore, &resp, resp.response_length);
3353 if (err)
3354 return err;
3355
3356 return 0;
3357}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 5db1a8cc388d..259dcc7779f5 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -123,6 +123,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
123 struct ib_udata *uhw) = { 123 struct ib_udata *uhw) = {
124 [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, 124 [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
125 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow, 125 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow,
126 [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device,
126}; 127};
127 128
128static void ib_uverbs_add_one(struct ib_device *device); 129static void ib_uverbs_add_one(struct ib_device *device);
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index c9df0549f51d..4498a89f4ced 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -225,13 +225,20 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
225 struct c4iw_cq *chp; 225 struct c4iw_cq *chp;
226 unsigned long flag; 226 unsigned long flag;
227 227
228 spin_lock_irqsave(&dev->lock, flag);
228 chp = get_chp(dev, qid); 229 chp = get_chp(dev, qid);
229 if (chp) { 230 if (chp) {
231 atomic_inc(&chp->refcnt);
232 spin_unlock_irqrestore(&dev->lock, flag);
230 t4_clear_cq_armed(&chp->cq); 233 t4_clear_cq_armed(&chp->cq);
231 spin_lock_irqsave(&chp->comp_handler_lock, flag); 234 spin_lock_irqsave(&chp->comp_handler_lock, flag);
232 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); 235 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
233 spin_unlock_irqrestore(&chp->comp_handler_lock, flag); 236 spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
234 } else 237 if (atomic_dec_and_test(&chp->refcnt))
238 wake_up(&chp->wait);
239 } else {
235 PDBG("%s unknown cqid 0x%x\n", __func__, qid); 240 PDBG("%s unknown cqid 0x%x\n", __func__, qid);
241 spin_unlock_irqrestore(&dev->lock, flag);
242 }
236 return 0; 243 return 0;
237} 244}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index b5678ac97393..d87e1650f643 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -196,7 +196,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
196 return (int)(rdev->lldi.vr->stag.size >> 5); 196 return (int)(rdev->lldi.vr->stag.size >> 5);
197} 197}
198 198
199#define C4IW_WR_TO (30*HZ) 199#define C4IW_WR_TO (60*HZ)
200 200
201struct c4iw_wr_wait { 201struct c4iw_wr_wait {
202 struct completion completion; 202 struct completion completion;
@@ -220,22 +220,21 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
220 u32 hwtid, u32 qpid, 220 u32 hwtid, u32 qpid,
221 const char *func) 221 const char *func)
222{ 222{
223 unsigned to = C4IW_WR_TO;
224 int ret; 223 int ret;
225 224
226 do { 225 if (c4iw_fatal_error(rdev)) {
227 ret = wait_for_completion_timeout(&wr_waitp->completion, to); 226 wr_waitp->ret = -EIO;
228 if (!ret) { 227 goto out;
229 printk(KERN_ERR MOD "%s - Device %s not responding - " 228 }
230 "tid %u qpid %u\n", func, 229
231 pci_name(rdev->lldi.pdev), hwtid, qpid); 230 ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
232 if (c4iw_fatal_error(rdev)) { 231 if (!ret) {
233 wr_waitp->ret = -EIO; 232 PDBG("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
234 break; 233 func, pci_name(rdev->lldi.pdev), hwtid, qpid);
235 } 234 rdev->flags |= T4_FATAL_ERROR;
236 to = to << 2; 235 wr_waitp->ret = -EIO;
237 } 236 }
238 } while (!ret); 237out:
239 if (wr_waitp->ret) 238 if (wr_waitp->ret)
240 PDBG("%s: FW reply %d tid %u qpid %u\n", 239 PDBG("%s: FW reply %d tid %u qpid %u\n",
241 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); 240 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 6559af60bffd..e08db7020cd4 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -908,9 +908,6 @@ void ipath_chip_cleanup(struct ipath_devdata *);
908/* clean up any chip type-specific stuff */ 908/* clean up any chip type-specific stuff */
909void ipath_chip_done(void); 909void ipath_chip_done(void);
910 910
911/* check to see if we have to force ordering for write combining */
912int ipath_unordered_wc(void);
913
914void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first, 911void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
915 unsigned cnt); 912 unsigned cnt);
916void ipath_cancel_sends(struct ipath_devdata *, int); 913void ipath_cancel_sends(struct ipath_devdata *, int);
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
index 1d7bd82a1fb1..1a7e20a75149 100644
--- a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
+++ b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
@@ -47,16 +47,3 @@ int ipath_enable_wc(struct ipath_devdata *dd)
47{ 47{
48 return 0; 48 return 0;
49} 49}
50
51/**
52 * ipath_unordered_wc - indicate whether write combining is unordered
53 *
54 * Because our performance depends on our ability to do write
55 * combining mmio writes in the most efficient way, we need to
56 * know if we are on a processor that may reorder stores when
57 * write combining.
58 */
59int ipath_unordered_wc(void)
60{
61 return 1;
62}
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
index 3428acb0868c..4ad0b932df1f 100644
--- a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
+++ b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
@@ -167,18 +167,3 @@ void ipath_disable_wc(struct ipath_devdata *dd)
167 dd->ipath_wc_cookie = 0; /* even on failure */ 167 dd->ipath_wc_cookie = 0; /* even on failure */
168 } 168 }
169} 169}
170
171/**
172 * ipath_unordered_wc - indicate whether write combining is ordered
173 *
174 * Because our performance depends on our ability to do write combining mmio
175 * writes in the most efficient way, we need to know if we are on an Intel
176 * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in
177 * the order completed, and so no special flushing is required to get
178 * correct ordering. Intel processors, however, will flush write buffers
179 * out in "random" orders, and so explicit ordering is needed at times.
180 */
181int ipath_unordered_wc(void)
182{
183 return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
184}
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 56a593e0ae5d..39a488889fc7 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -372,7 +372,7 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
372 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id); 372 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
373 if (*slave < 0) { 373 if (*slave < 0) {
374 mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n", 374 mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
375 gid.global.interface_id); 375 be64_to_cpu(gid.global.interface_id));
376 return -ENOENT; 376 return -ENOENT;
377 } 377 }
378 return 0; 378 return 0;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index a3b70f6c4035..cb63ecd2276f 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -367,8 +367,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
367 int err; 367 int err;
368 368
369 mutex_lock(&cq->resize_mutex); 369 mutex_lock(&cq->resize_mutex);
370 370 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
371 if (entries < 1) {
372 err = -EINVAL; 371 err = -EINVAL;
373 goto out; 372 goto out;
374 } 373 }
@@ -379,7 +378,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
379 goto out; 378 goto out;
380 } 379 }
381 380
382 if (entries > dev->dev->caps.max_cqes) { 381 if (entries > dev->dev->caps.max_cqes + 1) {
383 err = -EINVAL; 382 err = -EINVAL;
384 goto out; 383 goto out;
385 } 384 }
@@ -392,7 +391,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
392 /* Can't be smaller than the number of outstanding CQEs */ 391 /* Can't be smaller than the number of outstanding CQEs */
393 outst_cqe = mlx4_ib_get_outstanding_cqes(cq); 392 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
394 if (entries < outst_cqe + 1) { 393 if (entries < outst_cqe + 1) {
395 err = 0; 394 err = -EINVAL;
396 goto out; 395 goto out;
397 } 396 }
398 397
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 9117b7a2d5f8..0b280b1c98df 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1222,8 +1222,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1222 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1222 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1223 u64 reg_id; 1223 u64 reg_id;
1224 struct mlx4_ib_steering *ib_steering = NULL; 1224 struct mlx4_ib_steering *ib_steering = NULL;
1225 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? 1225 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1226 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1227 1226
1228 if (mdev->dev->caps.steering_mode == 1227 if (mdev->dev->caps.steering_mode ==
1229 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1228 MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -1236,8 +1235,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1236 !!(mqp->flags & 1235 !!(mqp->flags &
1237 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 1236 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1238 prot, &reg_id); 1237 prot, &reg_id);
1239 if (err) 1238 if (err) {
1239 pr_err("multicast attach op failed, err %d\n", err);
1240 goto err_malloc; 1240 goto err_malloc;
1241 }
1241 1242
1242 err = add_gid_entry(ibqp, gid); 1243 err = add_gid_entry(ibqp, gid);
1243 if (err) 1244 if (err)
@@ -1285,8 +1286,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1285 struct net_device *ndev; 1286 struct net_device *ndev;
1286 struct mlx4_ib_gid_entry *ge; 1287 struct mlx4_ib_gid_entry *ge;
1287 u64 reg_id = 0; 1288 u64 reg_id = 0;
1288 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? 1289 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1289 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1290 1290
1291 if (mdev->dev->caps.steering_mode == 1291 if (mdev->dev->caps.steering_mode ==
1292 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1292 MLX4_STEERING_MODE_DEVICE_MANAGED) {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index cf000b7ad64f..c880329b4d64 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1674,8 +1674,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1674 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || 1674 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
1675 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { 1675 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
1676 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); 1676 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
1677 if (err) 1677 if (err) {
1678 return -EINVAL; 1678 err = -EINVAL;
1679 goto out;
1680 }
1679 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) 1681 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
1680 dev->qp1_proxy[qp->port - 1] = qp; 1682 dev->qp1_proxy[qp->port - 1] = qp;
1681 } 1683 }
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b1eda4a602a8..cc4ac1e583b2 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1331,6 +1331,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1331 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 1331 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
1332 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 1332 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
1333 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 1333 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
1334 dev->ib_dev.uverbs_ex_cmd_mask =
1335 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
1334 1336
1335 dev->ib_dev.query_device = mlx5_ib_query_device; 1337 dev->ib_dev.query_device = mlx5_ib_query_device;
1336 dev->ib_dev.query_port = mlx5_ib_query_port; 1338 dev->ib_dev.query_port = mlx5_ib_query_port;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 32a28bd50b20..cd9822eeacae 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1012,6 +1012,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
1012 goto err_2; 1012 goto err_2;
1013 } 1013 }
1014 mr->umem = umem; 1014 mr->umem = umem;
1015 mr->dev = dev;
1015 mr->live = 1; 1016 mr->live = 1;
1016 kvfree(in); 1017 kvfree(in);
1017 1018
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index b43456ae124b..c9780d919769 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -40,7 +40,7 @@
40#include <be_roce.h> 40#include <be_roce.h>
41#include "ocrdma_sli.h" 41#include "ocrdma_sli.h"
42 42
43#define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u" 43#define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u"
44 44
45#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" 45#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
46#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" 46#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
@@ -55,12 +55,19 @@
55#define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 55#define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
56 56
57#define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo) 57#define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
58#define EQ_INTR_PER_SEC_THRSH_HI 150000
59#define EQ_INTR_PER_SEC_THRSH_LOW 100000
60#define EQ_AIC_MAX_EQD 20
61#define EQ_AIC_MIN_EQD 0
62
63void ocrdma_eqd_set_task(struct work_struct *work);
58 64
59struct ocrdma_dev_attr { 65struct ocrdma_dev_attr {
60 u8 fw_ver[32]; 66 u8 fw_ver[32];
61 u32 vendor_id; 67 u32 vendor_id;
62 u32 device_id; 68 u32 device_id;
63 u16 max_pd; 69 u16 max_pd;
70 u16 max_dpp_pds;
64 u16 max_cq; 71 u16 max_cq;
65 u16 max_cqe; 72 u16 max_cqe;
66 u16 max_qp; 73 u16 max_qp;
@@ -116,12 +123,19 @@ struct ocrdma_queue_info {
116 bool created; 123 bool created;
117}; 124};
118 125
126struct ocrdma_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
127 u32 prev_eqd;
128 u64 eq_intr_cnt;
129 u64 prev_eq_intr_cnt;
130};
131
119struct ocrdma_eq { 132struct ocrdma_eq {
120 struct ocrdma_queue_info q; 133 struct ocrdma_queue_info q;
121 u32 vector; 134 u32 vector;
122 int cq_cnt; 135 int cq_cnt;
123 struct ocrdma_dev *dev; 136 struct ocrdma_dev *dev;
124 char irq_name[32]; 137 char irq_name[32];
138 struct ocrdma_aic_obj aic_obj;
125}; 139};
126 140
127struct ocrdma_mq { 141struct ocrdma_mq {
@@ -171,6 +185,21 @@ struct ocrdma_stats {
171 struct ocrdma_dev *dev; 185 struct ocrdma_dev *dev;
172}; 186};
173 187
188struct ocrdma_pd_resource_mgr {
189 u32 pd_norm_start;
190 u16 pd_norm_count;
191 u16 pd_norm_thrsh;
192 u16 max_normal_pd;
193 u32 pd_dpp_start;
194 u16 pd_dpp_count;
195 u16 pd_dpp_thrsh;
196 u16 max_dpp_pd;
197 u16 dpp_page_index;
198 unsigned long *pd_norm_bitmap;
199 unsigned long *pd_dpp_bitmap;
200 bool pd_prealloc_valid;
201};
202
174struct stats_mem { 203struct stats_mem {
175 struct ocrdma_mqe mqe; 204 struct ocrdma_mqe mqe;
176 void *va; 205 void *va;
@@ -198,6 +227,7 @@ struct ocrdma_dev {
198 227
199 struct ocrdma_eq *eq_tbl; 228 struct ocrdma_eq *eq_tbl;
200 int eq_cnt; 229 int eq_cnt;
230 struct delayed_work eqd_work;
201 u16 base_eqid; 231 u16 base_eqid;
202 u16 max_eq; 232 u16 max_eq;
203 233
@@ -255,7 +285,12 @@ struct ocrdma_dev {
255 struct ocrdma_stats rx_qp_err_stats; 285 struct ocrdma_stats rx_qp_err_stats;
256 struct ocrdma_stats tx_dbg_stats; 286 struct ocrdma_stats tx_dbg_stats;
257 struct ocrdma_stats rx_dbg_stats; 287 struct ocrdma_stats rx_dbg_stats;
288 struct ocrdma_stats driver_stats;
289 struct ocrdma_stats reset_stats;
258 struct dentry *dir; 290 struct dentry *dir;
291 atomic_t async_err_stats[OCRDMA_MAX_ASYNC_ERRORS];
292 atomic_t cqe_err_stats[OCRDMA_MAX_CQE_ERR];
293 struct ocrdma_pd_resource_mgr *pd_mgr;
259}; 294};
260 295
261struct ocrdma_cq { 296struct ocrdma_cq {
@@ -335,7 +370,6 @@ struct ocrdma_srq {
335 370
336struct ocrdma_qp { 371struct ocrdma_qp {
337 struct ib_qp ibqp; 372 struct ib_qp ibqp;
338 struct ocrdma_dev *dev;
339 373
340 u8 __iomem *sq_db; 374 u8 __iomem *sq_db;
341 struct ocrdma_qp_hwq_info sq; 375 struct ocrdma_qp_hwq_info sq;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index f3cc8c9e65ae..d812904f3984 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -29,19 +29,22 @@
29#include <net/netevent.h> 29#include <net/netevent.h>
30 30
31#include <rdma/ib_addr.h> 31#include <rdma/ib_addr.h>
32#include <rdma/ib_mad.h>
32 33
33#include "ocrdma.h" 34#include "ocrdma.h"
34#include "ocrdma_verbs.h" 35#include "ocrdma_verbs.h"
35#include "ocrdma_ah.h" 36#include "ocrdma_ah.h"
36#include "ocrdma_hw.h" 37#include "ocrdma_hw.h"
38#include "ocrdma_stats.h"
37 39
38#define OCRDMA_VID_PCP_SHIFT 0xD 40#define OCRDMA_VID_PCP_SHIFT 0xD
39 41
40static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, 42static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
41 struct ib_ah_attr *attr, union ib_gid *sgid, int pdid) 43 struct ib_ah_attr *attr, union ib_gid *sgid,
44 int pdid, bool *isvlan)
42{ 45{
43 int status = 0; 46 int status = 0;
44 u16 vlan_tag; bool vlan_enabled = false; 47 u16 vlan_tag;
45 struct ocrdma_eth_vlan eth; 48 struct ocrdma_eth_vlan eth;
46 struct ocrdma_grh grh; 49 struct ocrdma_grh grh;
47 int eth_sz; 50 int eth_sz;
@@ -59,7 +62,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
59 vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT; 62 vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
60 eth.vlan_tag = cpu_to_be16(vlan_tag); 63 eth.vlan_tag = cpu_to_be16(vlan_tag);
61 eth_sz = sizeof(struct ocrdma_eth_vlan); 64 eth_sz = sizeof(struct ocrdma_eth_vlan);
62 vlan_enabled = true; 65 *isvlan = true;
63 } else { 66 } else {
64 eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); 67 eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
65 eth_sz = sizeof(struct ocrdma_eth_basic); 68 eth_sz = sizeof(struct ocrdma_eth_basic);
@@ -82,7 +85,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
82 /* Eth HDR */ 85 /* Eth HDR */
83 memcpy(&ah->av->eth_hdr, &eth, eth_sz); 86 memcpy(&ah->av->eth_hdr, &eth, eth_sz);
84 memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); 87 memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
85 if (vlan_enabled) 88 if (*isvlan)
86 ah->av->valid |= OCRDMA_AV_VLAN_VALID; 89 ah->av->valid |= OCRDMA_AV_VLAN_VALID;
87 ah->av->valid = cpu_to_le32(ah->av->valid); 90 ah->av->valid = cpu_to_le32(ah->av->valid);
88 return status; 91 return status;
@@ -91,6 +94,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
91struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) 94struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
92{ 95{
93 u32 *ahid_addr; 96 u32 *ahid_addr;
97 bool isvlan = false;
94 int status; 98 int status;
95 struct ocrdma_ah *ah; 99 struct ocrdma_ah *ah;
96 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 100 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
@@ -127,15 +131,20 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
127 } 131 }
128 } 132 }
129 133
130 status = set_av_attr(dev, ah, attr, &sgid, pd->id); 134 status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan);
131 if (status) 135 if (status)
132 goto av_conf_err; 136 goto av_conf_err;
133 137
134 /* if pd is for the user process, pass the ah_id to user space */ 138 /* if pd is for the user process, pass the ah_id to user space */
135 if ((pd->uctx) && (pd->uctx->ah_tbl.va)) { 139 if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
136 ahid_addr = pd->uctx->ah_tbl.va + attr->dlid; 140 ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
137 *ahid_addr = ah->id; 141 *ahid_addr = 0;
142 *ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
143 if (isvlan)
144 *ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
145 OCRDMA_AH_VLAN_VALID_SHIFT);
138 } 146 }
147
139 return &ah->ibah; 148 return &ah->ibah;
140 149
141av_conf_err: 150av_conf_err:
@@ -191,5 +200,20 @@ int ocrdma_process_mad(struct ib_device *ibdev,
191 struct ib_grh *in_grh, 200 struct ib_grh *in_grh,
192 struct ib_mad *in_mad, struct ib_mad *out_mad) 201 struct ib_mad *in_mad, struct ib_mad *out_mad)
193{ 202{
194 return IB_MAD_RESULT_SUCCESS; 203 int status;
204 struct ocrdma_dev *dev;
205
206 switch (in_mad->mad_hdr.mgmt_class) {
207 case IB_MGMT_CLASS_PERF_MGMT:
208 dev = get_ocrdma_dev(ibdev);
209 if (!ocrdma_pma_counters(dev, out_mad))
210 status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
211 else
212 status = IB_MAD_RESULT_SUCCESS;
213 break;
214 default:
215 status = IB_MAD_RESULT_SUCCESS;
216 break;
217 }
218 return status;
195} 219}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 8ac49e7f96d1..726a87cf22dc 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -28,6 +28,12 @@
28#ifndef __OCRDMA_AH_H__ 28#ifndef __OCRDMA_AH_H__
29#define __OCRDMA_AH_H__ 29#define __OCRDMA_AH_H__
30 30
31enum {
32 OCRDMA_AH_ID_MASK = 0x3FF,
33 OCRDMA_AH_VLAN_VALID_MASK = 0x01,
34 OCRDMA_AH_VLAN_VALID_SHIFT = 0x1F
35};
36
31struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *); 37struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
32int ocrdma_destroy_ah(struct ib_ah *); 38int ocrdma_destroy_ah(struct ib_ah *);
33int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *); 39int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 638bff1ffc6c..0c9e95909a64 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -734,6 +734,9 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
734 break; 734 break;
735 } 735 }
736 736
737 if (type < OCRDMA_MAX_ASYNC_ERRORS)
738 atomic_inc(&dev->async_err_stats[type]);
739
737 if (qp_event) { 740 if (qp_event) {
738 if (qp->ibqp.event_handler) 741 if (qp->ibqp.event_handler)
739 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context); 742 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
@@ -831,20 +834,20 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
831 return 0; 834 return 0;
832} 835}
833 836
834static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, 837static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
835 struct ocrdma_cq *cq) 838 struct ocrdma_cq *cq, bool sq)
836{ 839{
837 unsigned long flags;
838 struct ocrdma_qp *qp; 840 struct ocrdma_qp *qp;
839 bool buddy_cq_found = false; 841 struct list_head *cur;
840 /* Go through list of QPs in error state which are using this CQ 842 struct ocrdma_cq *bcq = NULL;
841 * and invoke its callback handler to trigger CQE processing for 843 struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head);
842 * error/flushed CQE. It is rare to find more than few entries in 844
843 * this list as most consumers stops after getting error CQE. 845 list_for_each(cur, head) {
844 * List is traversed only once when a matching buddy cq found for a QP. 846 if (sq)
845 */ 847 qp = list_entry(cur, struct ocrdma_qp, sq_entry);
846 spin_lock_irqsave(&dev->flush_q_lock, flags); 848 else
847 list_for_each_entry(qp, &cq->sq_head, sq_entry) { 849 qp = list_entry(cur, struct ocrdma_qp, rq_entry);
850
848 if (qp->srq) 851 if (qp->srq)
849 continue; 852 continue;
850 /* if wq and rq share the same cq, than comp_handler 853 /* if wq and rq share the same cq, than comp_handler
@@ -856,19 +859,41 @@ static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
856 * if completion came on rq, sq's cq is buddy cq. 859 * if completion came on rq, sq's cq is buddy cq.
857 */ 860 */
858 if (qp->sq_cq == cq) 861 if (qp->sq_cq == cq)
859 cq = qp->rq_cq; 862 bcq = qp->rq_cq;
860 else 863 else
861 cq = qp->sq_cq; 864 bcq = qp->sq_cq;
862 buddy_cq_found = true; 865 return bcq;
863 break;
864 } 866 }
867 return NULL;
868}
869
870static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
871 struct ocrdma_cq *cq)
872{
873 unsigned long flags;
874 struct ocrdma_cq *bcq = NULL;
875
876 /* Go through list of QPs in error state which are using this CQ
877 * and invoke its callback handler to trigger CQE processing for
878 * error/flushed CQE. It is rare to find more than few entries in
879 * this list as most consumers stops after getting error CQE.
880 * List is traversed only once when a matching buddy cq found for a QP.
881 */
882 spin_lock_irqsave(&dev->flush_q_lock, flags);
883 /* Check if buddy CQ is present.
884 * true - Check for SQ CQ
885 * false - Check for RQ CQ
886 */
887 bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true);
888 if (bcq == NULL)
889 bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false);
865 spin_unlock_irqrestore(&dev->flush_q_lock, flags); 890 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
866 if (buddy_cq_found == false) 891
867 return; 892 /* if there is valid buddy cq, look for its completion handler */
868 if (cq->ibcq.comp_handler) { 893 if (bcq && bcq->ibcq.comp_handler) {
869 spin_lock_irqsave(&cq->comp_handler_lock, flags); 894 spin_lock_irqsave(&bcq->comp_handler_lock, flags);
870 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 895 (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context);
871 spin_unlock_irqrestore(&cq->comp_handler_lock, flags); 896 spin_unlock_irqrestore(&bcq->comp_handler_lock, flags);
872 } 897 }
873} 898}
874 899
@@ -935,6 +960,7 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
935 960
936 } while (budget); 961 } while (budget);
937 962
963 eq->aic_obj.eq_intr_cnt++;
938 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); 964 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
939 return IRQ_HANDLED; 965 return IRQ_HANDLED;
940} 966}
@@ -1050,6 +1076,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
1050 attr->max_pd = 1076 attr->max_pd =
1051 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >> 1077 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
1052 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT; 1078 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
1079 attr->max_dpp_pds =
1080 (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
1081 OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
1053 attr->max_qp = 1082 attr->max_qp =
1054 (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >> 1083 (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
1055 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT; 1084 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
@@ -1396,6 +1425,122 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1396 return status; 1425 return status;
1397} 1426}
1398 1427
1428
1429static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
1430{
1431 int status = -ENOMEM;
1432 size_t pd_bitmap_size;
1433 struct ocrdma_alloc_pd_range *cmd;
1434 struct ocrdma_alloc_pd_range_rsp *rsp;
1435
1436 /* Pre allocate the DPP PDs */
1437 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
1438 if (!cmd)
1439 return -ENOMEM;
1440 cmd->pd_count = dev->attr.max_dpp_pds;
1441 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1442 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1443 if (status)
1444 goto mbx_err;
1445 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1446
1447 if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) {
1448 dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
1449 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1450 dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
1451 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1452 dev->pd_mgr->max_dpp_pd = rsp->pd_count;
1453 pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1454 dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
1455 GFP_KERNEL);
1456 }
1457 kfree(cmd);
1458
1459 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
1460 if (!cmd)
1461 return -ENOMEM;
1462
1463 cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
1464 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1465 if (status)
1466 goto mbx_err;
1467 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1468 if (rsp->pd_count) {
1469 dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
1470 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1471 dev->pd_mgr->max_normal_pd = rsp->pd_count;
1472 pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1473 dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
1474 GFP_KERNEL);
1475 }
1476
1477 if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
1478 /* Enable PD resource manager */
1479 dev->pd_mgr->pd_prealloc_valid = true;
1480 } else {
1481 return -ENOMEM;
1482 }
1483mbx_err:
1484 kfree(cmd);
1485 return status;
1486}
1487
1488static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev)
1489{
1490 struct ocrdma_dealloc_pd_range *cmd;
1491
1492 /* return normal PDs to firmware */
1493 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd));
1494 if (!cmd)
1495 goto mbx_err;
1496
1497 if (dev->pd_mgr->max_normal_pd) {
1498 cmd->start_pd_id = dev->pd_mgr->pd_norm_start;
1499 cmd->pd_count = dev->pd_mgr->max_normal_pd;
1500 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1501 }
1502
1503 if (dev->pd_mgr->max_dpp_pd) {
1504 kfree(cmd);
1505 /* return DPP PDs to firmware */
1506 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE,
1507 sizeof(*cmd));
1508 if (!cmd)
1509 goto mbx_err;
1510
1511 cmd->start_pd_id = dev->pd_mgr->pd_dpp_start;
1512 cmd->pd_count = dev->pd_mgr->max_dpp_pd;
1513 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1514 }
1515mbx_err:
1516 kfree(cmd);
1517}
1518
1519void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
1520{
1521 int status;
1522
1523 dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
1524 GFP_KERNEL);
1525 if (!dev->pd_mgr) {
1526 pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id);
1527 return;
1528 }
1529 status = ocrdma_mbx_alloc_pd_range(dev);
1530 if (status) {
1531 pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
1532 __func__, dev->id);
1533 }
1534}
1535
1536static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
1537{
1538 ocrdma_mbx_dealloc_pd_range(dev);
1539 kfree(dev->pd_mgr->pd_norm_bitmap);
1540 kfree(dev->pd_mgr->pd_dpp_bitmap);
1541 kfree(dev->pd_mgr);
1542}
1543
1399static int ocrdma_build_q_conf(u32 *num_entries, int entry_size, 1544static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
1400 int *num_pages, int *page_size) 1545 int *num_pages, int *page_size)
1401{ 1546{
@@ -1896,8 +2041,9 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
1896{ 2041{
1897 bool found; 2042 bool found;
1898 unsigned long flags; 2043 unsigned long flags;
2044 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1899 2045
1900 spin_lock_irqsave(&qp->dev->flush_q_lock, flags); 2046 spin_lock_irqsave(&dev->flush_q_lock, flags);
1901 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); 2047 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1902 if (!found) 2048 if (!found)
1903 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head); 2049 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
@@ -1906,7 +2052,7 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
1906 if (!found) 2052 if (!found)
1907 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head); 2053 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
1908 } 2054 }
1909 spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags); 2055 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1910} 2056}
1911 2057
1912static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp) 2058static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
@@ -1972,7 +2118,8 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
1972 int status; 2118 int status;
1973 u32 len, hw_pages, hw_page_size; 2119 u32 len, hw_pages, hw_page_size;
1974 dma_addr_t pa; 2120 dma_addr_t pa;
1975 struct ocrdma_dev *dev = qp->dev; 2121 struct ocrdma_pd *pd = qp->pd;
2122 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1976 struct pci_dev *pdev = dev->nic_info.pdev; 2123 struct pci_dev *pdev = dev->nic_info.pdev;
1977 u32 max_wqe_allocated; 2124 u32 max_wqe_allocated;
1978 u32 max_sges = attrs->cap.max_send_sge; 2125 u32 max_sges = attrs->cap.max_send_sge;
@@ -2027,7 +2174,8 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
2027 int status; 2174 int status;
2028 u32 len, hw_pages, hw_page_size; 2175 u32 len, hw_pages, hw_page_size;
2029 dma_addr_t pa = 0; 2176 dma_addr_t pa = 0;
2030 struct ocrdma_dev *dev = qp->dev; 2177 struct ocrdma_pd *pd = qp->pd;
2178 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2031 struct pci_dev *pdev = dev->nic_info.pdev; 2179 struct pci_dev *pdev = dev->nic_info.pdev;
2032 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1; 2180 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
2033 2181
@@ -2086,7 +2234,8 @@ static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
2086static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd, 2234static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
2087 struct ocrdma_qp *qp) 2235 struct ocrdma_qp *qp)
2088{ 2236{
2089 struct ocrdma_dev *dev = qp->dev; 2237 struct ocrdma_pd *pd = qp->pd;
2238 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2090 struct pci_dev *pdev = dev->nic_info.pdev; 2239 struct pci_dev *pdev = dev->nic_info.pdev;
2091 dma_addr_t pa = 0; 2240 dma_addr_t pa = 0;
2092 int ird_page_size = dev->attr.ird_page_size; 2241 int ird_page_size = dev->attr.ird_page_size;
@@ -2157,8 +2306,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
2157{ 2306{
2158 int status = -ENOMEM; 2307 int status = -ENOMEM;
2159 u32 flags = 0; 2308 u32 flags = 0;
2160 struct ocrdma_dev *dev = qp->dev;
2161 struct ocrdma_pd *pd = qp->pd; 2309 struct ocrdma_pd *pd = qp->pd;
2310 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2162 struct pci_dev *pdev = dev->nic_info.pdev; 2311 struct pci_dev *pdev = dev->nic_info.pdev;
2163 struct ocrdma_cq *cq; 2312 struct ocrdma_cq *cq;
2164 struct ocrdma_create_qp_req *cmd; 2313 struct ocrdma_create_qp_req *cmd;
@@ -2281,11 +2430,12 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2281 union ib_gid sgid, zgid; 2430 union ib_gid sgid, zgid;
2282 u32 vlan_id; 2431 u32 vlan_id;
2283 u8 mac_addr[6]; 2432 u8 mac_addr[6];
2433 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2284 2434
2285 if ((ah_attr->ah_flags & IB_AH_GRH) == 0) 2435 if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
2286 return -EINVAL; 2436 return -EINVAL;
2287 if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0)) 2437 if (atomic_cmpxchg(&dev->update_sl, 1, 0))
2288 ocrdma_init_service_level(qp->dev); 2438 ocrdma_init_service_level(dev);
2289 cmd->params.tclass_sq_psn |= 2439 cmd->params.tclass_sq_psn |=
2290 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT); 2440 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
2291 cmd->params.rnt_rc_sl_fl |= 2441 cmd->params.rnt_rc_sl_fl |=
@@ -2296,7 +2446,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2296 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID; 2446 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
2297 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0], 2447 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
2298 sizeof(cmd->params.dgid)); 2448 sizeof(cmd->params.dgid));
2299 status = ocrdma_query_gid(&qp->dev->ibdev, 1, 2449 status = ocrdma_query_gid(&dev->ibdev, 1,
2300 ah_attr->grh.sgid_index, &sgid); 2450 ah_attr->grh.sgid_index, &sgid);
2301 if (status) 2451 if (status)
2302 return status; 2452 return status;
@@ -2307,7 +2457,9 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2307 2457
2308 qp->sgid_idx = ah_attr->grh.sgid_index; 2458 qp->sgid_idx = ah_attr->grh.sgid_index;
2309 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid)); 2459 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
2310 ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]); 2460 status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
2461 if (status)
2462 return status;
2311 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) | 2463 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
2312 (mac_addr[2] << 16) | (mac_addr[3] << 24); 2464 (mac_addr[2] << 16) | (mac_addr[3] << 24);
2313 /* convert them to LE format. */ 2465 /* convert them to LE format. */
@@ -2320,7 +2472,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2320 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; 2472 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2321 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; 2473 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
2322 cmd->params.rnt_rc_sl_fl |= 2474 cmd->params.rnt_rc_sl_fl |=
2323 (qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; 2475 (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
2324 } 2476 }
2325 return 0; 2477 return 0;
2326} 2478}
@@ -2330,6 +2482,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2330 struct ib_qp_attr *attrs, int attr_mask) 2482 struct ib_qp_attr *attrs, int attr_mask)
2331{ 2483{
2332 int status = 0; 2484 int status = 0;
2485 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2333 2486
2334 if (attr_mask & IB_QP_PKEY_INDEX) { 2487 if (attr_mask & IB_QP_PKEY_INDEX) {
2335 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index & 2488 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
@@ -2347,12 +2500,12 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2347 return status; 2500 return status;
2348 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) { 2501 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
2349 /* set the default mac address for UD, GSI QPs */ 2502 /* set the default mac address for UD, GSI QPs */
2350 cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] | 2503 cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
2351 (qp->dev->nic_info.mac_addr[1] << 8) | 2504 (dev->nic_info.mac_addr[1] << 8) |
2352 (qp->dev->nic_info.mac_addr[2] << 16) | 2505 (dev->nic_info.mac_addr[2] << 16) |
2353 (qp->dev->nic_info.mac_addr[3] << 24); 2506 (dev->nic_info.mac_addr[3] << 24);
2354 cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] | 2507 cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
2355 (qp->dev->nic_info.mac_addr[5] << 8); 2508 (dev->nic_info.mac_addr[5] << 8);
2356 } 2509 }
2357 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) && 2510 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
2358 attrs->en_sqd_async_notify) { 2511 attrs->en_sqd_async_notify) {
@@ -2409,7 +2562,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2409 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID; 2562 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
2410 } 2563 }
2411 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 2564 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2412 if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) { 2565 if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
2413 status = -EINVAL; 2566 status = -EINVAL;
2414 goto pmtu_err; 2567 goto pmtu_err;
2415 } 2568 }
@@ -2417,7 +2570,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2417 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID; 2570 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
2418 } 2571 }
2419 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 2572 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2420 if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) { 2573 if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
2421 status = -EINVAL; 2574 status = -EINVAL;
2422 goto pmtu_err; 2575 goto pmtu_err;
2423 } 2576 }
@@ -2870,6 +3023,82 @@ done:
2870 return status; 3023 return status;
2871} 3024}
2872 3025
3026static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3027 int num)
3028{
3029 int i, status = -ENOMEM;
3030 struct ocrdma_modify_eqd_req *cmd;
3031
3032 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd));
3033 if (!cmd)
3034 return status;
3035
3036 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
3037 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
3038
3039 cmd->cmd.num_eq = num;
3040 for (i = 0; i < num; i++) {
3041 cmd->cmd.set_eqd[i].eq_id = eq[i].q.id;
3042 cmd->cmd.set_eqd[i].phase = 0;
3043 cmd->cmd.set_eqd[i].delay_multiplier =
3044 (eq[i].aic_obj.prev_eqd * 65)/100;
3045 }
3046 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
3047 if (status)
3048 goto mbx_err;
3049mbx_err:
3050 kfree(cmd);
3051 return status;
3052}
3053
3054static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3055 int num)
3056{
3057 int num_eqs, i = 0;
3058 if (num > 8) {
3059 while (num) {
3060 num_eqs = min(num, 8);
3061 ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs);
3062 i += num_eqs;
3063 num -= num_eqs;
3064 }
3065 } else {
3066 ocrdma_mbx_modify_eqd(dev, eq, num);
3067 }
3068 return 0;
3069}
3070
3071void ocrdma_eqd_set_task(struct work_struct *work)
3072{
3073 struct ocrdma_dev *dev =
3074 container_of(work, struct ocrdma_dev, eqd_work.work);
3075 struct ocrdma_eq *eq = 0;
3076 int i, num = 0, status = -EINVAL;
3077 u64 eq_intr;
3078
3079 for (i = 0; i < dev->eq_cnt; i++) {
3080 eq = &dev->eq_tbl[i];
3081 if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) {
3082 eq_intr = eq->aic_obj.eq_intr_cnt -
3083 eq->aic_obj.prev_eq_intr_cnt;
3084 if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) &&
3085 (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) {
3086 eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD;
3087 num++;
3088 } else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) &&
3089 (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) {
3090 eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD;
3091 num++;
3092 }
3093 }
3094 eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt;
3095 }
3096
3097 if (num)
3098 status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
3099 schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
3100}
3101
2873int ocrdma_init_hw(struct ocrdma_dev *dev) 3102int ocrdma_init_hw(struct ocrdma_dev *dev)
2874{ 3103{
2875 int status; 3104 int status;
@@ -2915,6 +3144,7 @@ qpeq_err:
2915 3144
2916void ocrdma_cleanup_hw(struct ocrdma_dev *dev) 3145void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
2917{ 3146{
3147 ocrdma_free_pd_pool(dev);
2918 ocrdma_mbx_delete_ah_tbl(dev); 3148 ocrdma_mbx_delete_ah_tbl(dev);
2919 3149
2920 /* cleanup the eqs */ 3150 /* cleanup the eqs */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index 6eed8f191322..e905972fceb7 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -136,5 +136,7 @@ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
136int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset); 136int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
137char *port_speed_string(struct ocrdma_dev *dev); 137char *port_speed_string(struct ocrdma_dev *dev);
138void ocrdma_init_service_level(struct ocrdma_dev *); 138void ocrdma_init_service_level(struct ocrdma_dev *);
139void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev);
140void ocrdma_free_pd_range(struct ocrdma_dev *dev);
139 141
140#endif /* __OCRDMA_HW_H__ */ 142#endif /* __OCRDMA_HW_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index b0b2257b8e04..7a2b59aca004 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -239,7 +239,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
239 239
240 dev->ibdev.node_type = RDMA_NODE_IB_CA; 240 dev->ibdev.node_type = RDMA_NODE_IB_CA;
241 dev->ibdev.phys_port_cnt = 1; 241 dev->ibdev.phys_port_cnt = 1;
242 dev->ibdev.num_comp_vectors = 1; 242 dev->ibdev.num_comp_vectors = dev->eq_cnt;
243 243
244 /* mandatory verbs. */ 244 /* mandatory verbs. */
245 dev->ibdev.query_device = ocrdma_query_device; 245 dev->ibdev.query_device = ocrdma_query_device;
@@ -329,6 +329,8 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
329 if (dev->stag_arr == NULL) 329 if (dev->stag_arr == NULL)
330 goto alloc_err; 330 goto alloc_err;
331 331
332 ocrdma_alloc_pd_pool(dev);
333
332 spin_lock_init(&dev->av_tbl.lock); 334 spin_lock_init(&dev->av_tbl.lock);
333 spin_lock_init(&dev->flush_q_lock); 335 spin_lock_init(&dev->flush_q_lock);
334 return 0; 336 return 0;
@@ -491,6 +493,9 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
491 spin_unlock(&ocrdma_devlist_lock); 493 spin_unlock(&ocrdma_devlist_lock);
492 /* Init stats */ 494 /* Init stats */
493 ocrdma_add_port_stats(dev); 495 ocrdma_add_port_stats(dev);
496 /* Interrupt Moderation */
497 INIT_DELAYED_WORK(&dev->eqd_work, ocrdma_eqd_set_task);
498 schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
494 499
495 pr_info("%s %s: %s \"%s\" port %d\n", 500 pr_info("%s %s: %s \"%s\" port %d\n",
496 dev_name(&dev->nic_info.pdev->dev), hca_name(dev), 501 dev_name(&dev->nic_info.pdev->dev), hca_name(dev),
@@ -528,11 +533,12 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
528 /* first unregister with stack to stop all the active traffic 533 /* first unregister with stack to stop all the active traffic
529 * of the registered clients. 534 * of the registered clients.
530 */ 535 */
531 ocrdma_rem_port_stats(dev); 536 cancel_delayed_work_sync(&dev->eqd_work);
532 ocrdma_remove_sysfiles(dev); 537 ocrdma_remove_sysfiles(dev);
533
534 ib_unregister_device(&dev->ibdev); 538 ib_unregister_device(&dev->ibdev);
535 539
540 ocrdma_rem_port_stats(dev);
541
536 spin_lock(&ocrdma_devlist_lock); 542 spin_lock(&ocrdma_devlist_lock);
537 list_del_rcu(&dev->entry); 543 list_del_rcu(&dev->entry);
538 spin_unlock(&ocrdma_devlist_lock); 544 spin_unlock(&ocrdma_devlist_lock);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 4e036480c1a8..243c87c8bd65 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -75,6 +75,8 @@ enum {
75 OCRDMA_CMD_DESTROY_RBQ = 26, 75 OCRDMA_CMD_DESTROY_RBQ = 26,
76 76
77 OCRDMA_CMD_GET_RDMA_STATS = 27, 77 OCRDMA_CMD_GET_RDMA_STATS = 27,
78 OCRDMA_CMD_ALLOC_PD_RANGE = 28,
79 OCRDMA_CMD_DEALLOC_PD_RANGE = 29,
78 80
79 OCRDMA_CMD_MAX 81 OCRDMA_CMD_MAX
80}; 82};
@@ -87,6 +89,7 @@ enum {
87 OCRDMA_CMD_CREATE_MQ = 21, 89 OCRDMA_CMD_CREATE_MQ = 21,
88 OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32, 90 OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32,
89 OCRDMA_CMD_GET_FW_VER = 35, 91 OCRDMA_CMD_GET_FW_VER = 35,
92 OCRDMA_CMD_MODIFY_EQ_DELAY = 41,
90 OCRDMA_CMD_DELETE_MQ = 53, 93 OCRDMA_CMD_DELETE_MQ = 53,
91 OCRDMA_CMD_DELETE_CQ = 54, 94 OCRDMA_CMD_DELETE_CQ = 54,
92 OCRDMA_CMD_DELETE_EQ = 55, 95 OCRDMA_CMD_DELETE_EQ = 55,
@@ -101,7 +104,7 @@ enum {
101 QTYPE_MCCQ = 3 104 QTYPE_MCCQ = 3
102}; 105};
103 106
104#define OCRDMA_MAX_SGID 8 107#define OCRDMA_MAX_SGID 16
105 108
106#define OCRDMA_MAX_QP 2048 109#define OCRDMA_MAX_QP 2048
107#define OCRDMA_MAX_CQ 2048 110#define OCRDMA_MAX_CQ 2048
@@ -314,6 +317,29 @@ struct ocrdma_create_eq_rsp {
314 317
315#define OCRDMA_EQ_MINOR_OTHER 0x1 318#define OCRDMA_EQ_MINOR_OTHER 0x1
316 319
320struct ocrmda_set_eqd {
321 u32 eq_id;
322 u32 phase;
323 u32 delay_multiplier;
324};
325
326struct ocrdma_modify_eqd_cmd {
327 struct ocrdma_mbx_hdr req;
328 u32 num_eq;
329 struct ocrmda_set_eqd set_eqd[8];
330} __packed;
331
332struct ocrdma_modify_eqd_req {
333 struct ocrdma_mqe_hdr hdr;
334 struct ocrdma_modify_eqd_cmd cmd;
335};
336
337
338struct ocrdma_modify_eq_delay_rsp {
339 struct ocrdma_mbx_rsp hdr;
340 u32 rsvd0;
341} __packed;
342
317enum { 343enum {
318 OCRDMA_MCQE_STATUS_SHIFT = 0, 344 OCRDMA_MCQE_STATUS_SHIFT = 0,
319 OCRDMA_MCQE_STATUS_MASK = 0xFFFF, 345 OCRDMA_MCQE_STATUS_MASK = 0xFFFF,
@@ -441,7 +467,9 @@ enum OCRDMA_ASYNC_EVENT_TYPE {
441 OCRDMA_DEVICE_FATAL_EVENT = 0x08, 467 OCRDMA_DEVICE_FATAL_EVENT = 0x08,
442 OCRDMA_SRQCAT_ERROR = 0x0E, 468 OCRDMA_SRQCAT_ERROR = 0x0E,
443 OCRDMA_SRQ_LIMIT_EVENT = 0x0F, 469 OCRDMA_SRQ_LIMIT_EVENT = 0x0F,
444 OCRDMA_QP_LAST_WQE_EVENT = 0x10 470 OCRDMA_QP_LAST_WQE_EVENT = 0x10,
471
472 OCRDMA_MAX_ASYNC_ERRORS
445}; 473};
446 474
447/* mailbox command request and responses */ 475/* mailbox command request and responses */
@@ -1297,6 +1325,37 @@ struct ocrdma_dealloc_pd_rsp {
1297 struct ocrdma_mbx_rsp rsp; 1325 struct ocrdma_mbx_rsp rsp;
1298}; 1326};
1299 1327
1328struct ocrdma_alloc_pd_range {
1329 struct ocrdma_mqe_hdr hdr;
1330 struct ocrdma_mbx_hdr req;
1331 u32 enable_dpp_rsvd;
1332 u32 pd_count;
1333};
1334
1335struct ocrdma_alloc_pd_range_rsp {
1336 struct ocrdma_mqe_hdr hdr;
1337 struct ocrdma_mbx_rsp rsp;
1338 u32 dpp_page_pdid;
1339 u32 pd_count;
1340};
1341
1342enum {
1343 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK = 0xFFFF,
1344};
1345
1346struct ocrdma_dealloc_pd_range {
1347 struct ocrdma_mqe_hdr hdr;
1348 struct ocrdma_mbx_hdr req;
1349 u32 start_pd_id;
1350 u32 pd_count;
1351};
1352
1353struct ocrdma_dealloc_pd_range_rsp {
1354 struct ocrdma_mqe_hdr hdr;
1355 struct ocrdma_mbx_hdr req;
1356 u32 rsvd;
1357};
1358
1300enum { 1359enum {
1301 OCRDMA_ADDR_CHECK_ENABLE = 1, 1360 OCRDMA_ADDR_CHECK_ENABLE = 1,
1302 OCRDMA_ADDR_CHECK_DISABLE = 0 1361 OCRDMA_ADDR_CHECK_DISABLE = 0
@@ -1597,7 +1656,9 @@ enum OCRDMA_CQE_STATUS {
1597 OCRDMA_CQE_INV_EEC_STATE_ERR, 1656 OCRDMA_CQE_INV_EEC_STATE_ERR,
1598 OCRDMA_CQE_FATAL_ERR, 1657 OCRDMA_CQE_FATAL_ERR,
1599 OCRDMA_CQE_RESP_TIMEOUT_ERR, 1658 OCRDMA_CQE_RESP_TIMEOUT_ERR,
1600 OCRDMA_CQE_GENERAL_ERR 1659 OCRDMA_CQE_GENERAL_ERR,
1660
1661 OCRDMA_MAX_CQE_ERR
1601}; 1662};
1602 1663
1603enum { 1664enum {
@@ -1673,6 +1734,7 @@ enum {
1673 OCRDMA_FLAG_FENCE_R = 0x8, 1734 OCRDMA_FLAG_FENCE_R = 0x8,
1674 OCRDMA_FLAG_SOLICIT = 0x10, 1735 OCRDMA_FLAG_SOLICIT = 0x10,
1675 OCRDMA_FLAG_IMM = 0x20, 1736 OCRDMA_FLAG_IMM = 0x20,
1737 OCRDMA_FLAG_AH_VLAN_PR = 0x40,
1676 1738
1677 /* Stag flags */ 1739 /* Stag flags */
1678 OCRDMA_LKEY_FLAG_LOCAL_WR = 0x1, 1740 OCRDMA_LKEY_FLAG_LOCAL_WR = 0x1,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 41a9aec9998d..48d7ef51aa0c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -26,6 +26,7 @@
26 *******************************************************************/ 26 *******************************************************************/
27 27
28#include <rdma/ib_addr.h> 28#include <rdma/ib_addr.h>
29#include <rdma/ib_pma.h>
29#include "ocrdma_stats.h" 30#include "ocrdma_stats.h"
30 31
31static struct dentry *ocrdma_dbgfs_dir; 32static struct dentry *ocrdma_dbgfs_dir;
@@ -249,6 +250,27 @@ static char *ocrdma_rx_stats(struct ocrdma_dev *dev)
249 return stats; 250 return stats;
250} 251}
251 252
253static u64 ocrdma_sysfs_rcv_pkts(struct ocrdma_dev *dev)
254{
255 struct ocrdma_rdma_stats_resp *rdma_stats =
256 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
257 struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
258
259 return convert_to_64bit(rx_stats->roce_frames_lo,
260 rx_stats->roce_frames_hi) + (u64)rx_stats->roce_frame_icrc_drops
261 + (u64)rx_stats->roce_frame_payload_len_drops;
262}
263
264static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev)
265{
266 struct ocrdma_rdma_stats_resp *rdma_stats =
267 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
268 struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
269
270 return (convert_to_64bit(rx_stats->roce_frame_bytes_lo,
271 rx_stats->roce_frame_bytes_hi))/4;
272}
273
252static char *ocrdma_tx_stats(struct ocrdma_dev *dev) 274static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
253{ 275{
254 char *stats = dev->stats_mem.debugfs_mem, *pcur; 276 char *stats = dev->stats_mem.debugfs_mem, *pcur;
@@ -292,6 +314,37 @@ static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
292 return stats; 314 return stats;
293} 315}
294 316
317static u64 ocrdma_sysfs_xmit_pkts(struct ocrdma_dev *dev)
318{
319 struct ocrdma_rdma_stats_resp *rdma_stats =
320 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
321 struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
322
323 return (convert_to_64bit(tx_stats->send_pkts_lo,
324 tx_stats->send_pkts_hi) +
325 convert_to_64bit(tx_stats->write_pkts_lo, tx_stats->write_pkts_hi) +
326 convert_to_64bit(tx_stats->read_pkts_lo, tx_stats->read_pkts_hi) +
327 convert_to_64bit(tx_stats->read_rsp_pkts_lo,
328 tx_stats->read_rsp_pkts_hi) +
329 convert_to_64bit(tx_stats->ack_pkts_lo, tx_stats->ack_pkts_hi));
330}
331
332static u64 ocrdma_sysfs_xmit_data(struct ocrdma_dev *dev)
333{
334 struct ocrdma_rdma_stats_resp *rdma_stats =
335 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
336 struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
337
338 return (convert_to_64bit(tx_stats->send_bytes_lo,
339 tx_stats->send_bytes_hi) +
340 convert_to_64bit(tx_stats->write_bytes_lo,
341 tx_stats->write_bytes_hi) +
342 convert_to_64bit(tx_stats->read_req_bytes_lo,
343 tx_stats->read_req_bytes_hi) +
344 convert_to_64bit(tx_stats->read_rsp_bytes_lo,
345 tx_stats->read_rsp_bytes_hi))/4;
346}
347
295static char *ocrdma_wqe_stats(struct ocrdma_dev *dev) 348static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
296{ 349{
297 char *stats = dev->stats_mem.debugfs_mem, *pcur; 350 char *stats = dev->stats_mem.debugfs_mem, *pcur;
@@ -432,10 +485,118 @@ static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
432 return dev->stats_mem.debugfs_mem; 485 return dev->stats_mem.debugfs_mem;
433} 486}
434 487
488static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev)
489{
490 char *stats = dev->stats_mem.debugfs_mem, *pcur;
491
492
493 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
494
495 pcur = stats;
496 pcur += ocrdma_add_stat(stats, pcur, "async_cq_err",
497 (u64)(dev->async_err_stats
498 [OCRDMA_CQ_ERROR].counter));
499 pcur += ocrdma_add_stat(stats, pcur, "async_cq_overrun_err",
500 (u64)dev->async_err_stats
501 [OCRDMA_CQ_OVERRUN_ERROR].counter);
502 pcur += ocrdma_add_stat(stats, pcur, "async_cq_qpcat_err",
503 (u64)dev->async_err_stats
504 [OCRDMA_CQ_QPCAT_ERROR].counter);
505 pcur += ocrdma_add_stat(stats, pcur, "async_qp_access_err",
506 (u64)dev->async_err_stats
507 [OCRDMA_QP_ACCESS_ERROR].counter);
508 pcur += ocrdma_add_stat(stats, pcur, "async_qp_commm_est_evt",
509 (u64)dev->async_err_stats
510 [OCRDMA_QP_COMM_EST_EVENT].counter);
511 pcur += ocrdma_add_stat(stats, pcur, "async_sq_drained_evt",
512 (u64)dev->async_err_stats
513 [OCRDMA_SQ_DRAINED_EVENT].counter);
514 pcur += ocrdma_add_stat(stats, pcur, "async_dev_fatal_evt",
515 (u64)dev->async_err_stats
516 [OCRDMA_DEVICE_FATAL_EVENT].counter);
517 pcur += ocrdma_add_stat(stats, pcur, "async_srqcat_err",
518 (u64)dev->async_err_stats
519 [OCRDMA_SRQCAT_ERROR].counter);
520 pcur += ocrdma_add_stat(stats, pcur, "async_srq_limit_evt",
521 (u64)dev->async_err_stats
522 [OCRDMA_SRQ_LIMIT_EVENT].counter);
523 pcur += ocrdma_add_stat(stats, pcur, "async_qp_last_wqe_evt",
524 (u64)dev->async_err_stats
525 [OCRDMA_QP_LAST_WQE_EVENT].counter);
526
527 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_len_err",
528 (u64)dev->cqe_err_stats
529 [OCRDMA_CQE_LOC_LEN_ERR].counter);
530 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_qp_op_err",
531 (u64)dev->cqe_err_stats
532 [OCRDMA_CQE_LOC_QP_OP_ERR].counter);
533 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_eec_op_err",
534 (u64)dev->cqe_err_stats
535 [OCRDMA_CQE_LOC_EEC_OP_ERR].counter);
536 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_prot_err",
537 (u64)dev->cqe_err_stats
538 [OCRDMA_CQE_LOC_PROT_ERR].counter);
539 pcur += ocrdma_add_stat(stats, pcur, "cqe_wr_flush_err",
540 (u64)dev->cqe_err_stats
541 [OCRDMA_CQE_WR_FLUSH_ERR].counter);
542 pcur += ocrdma_add_stat(stats, pcur, "cqe_mw_bind_err",
543 (u64)dev->cqe_err_stats
544 [OCRDMA_CQE_MW_BIND_ERR].counter);
545 pcur += ocrdma_add_stat(stats, pcur, "cqe_bad_resp_err",
546 (u64)dev->cqe_err_stats
547 [OCRDMA_CQE_BAD_RESP_ERR].counter);
548 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_access_err",
549 (u64)dev->cqe_err_stats
550 [OCRDMA_CQE_LOC_ACCESS_ERR].counter);
551 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_req_err",
552 (u64)dev->cqe_err_stats
553 [OCRDMA_CQE_REM_INV_REQ_ERR].counter);
554 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_access_err",
555 (u64)dev->cqe_err_stats
556 [OCRDMA_CQE_REM_ACCESS_ERR].counter);
557 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_op_err",
558 (u64)dev->cqe_err_stats
559 [OCRDMA_CQE_REM_OP_ERR].counter);
560 pcur += ocrdma_add_stat(stats, pcur, "cqe_retry_exc_err",
561 (u64)dev->cqe_err_stats
562 [OCRDMA_CQE_RETRY_EXC_ERR].counter);
563 pcur += ocrdma_add_stat(stats, pcur, "cqe_rnr_retry_exc_err",
564 (u64)dev->cqe_err_stats
565 [OCRDMA_CQE_RNR_RETRY_EXC_ERR].counter);
566 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_rdd_viol_err",
567 (u64)dev->cqe_err_stats
568 [OCRDMA_CQE_LOC_RDD_VIOL_ERR].counter);
569 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_rd_req_err",
570 (u64)dev->cqe_err_stats
571 [OCRDMA_CQE_REM_INV_RD_REQ_ERR].counter);
572 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_abort_err",
573 (u64)dev->cqe_err_stats
574 [OCRDMA_CQE_REM_ABORT_ERR].counter);
575 pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eecn_err",
576 (u64)dev->cqe_err_stats
577 [OCRDMA_CQE_INV_EECN_ERR].counter);
578 pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eec_state_err",
579 (u64)dev->cqe_err_stats
580 [OCRDMA_CQE_INV_EEC_STATE_ERR].counter);
581 pcur += ocrdma_add_stat(stats, pcur, "cqe_fatal_err",
582 (u64)dev->cqe_err_stats
583 [OCRDMA_CQE_FATAL_ERR].counter);
584 pcur += ocrdma_add_stat(stats, pcur, "cqe_resp_timeout_err",
585 (u64)dev->cqe_err_stats
586 [OCRDMA_CQE_RESP_TIMEOUT_ERR].counter);
587 pcur += ocrdma_add_stat(stats, pcur, "cqe_general_err",
588 (u64)dev->cqe_err_stats
589 [OCRDMA_CQE_GENERAL_ERR].counter);
590 return stats;
591}
592
435static void ocrdma_update_stats(struct ocrdma_dev *dev) 593static void ocrdma_update_stats(struct ocrdma_dev *dev)
436{ 594{
437 ulong now = jiffies, secs; 595 ulong now = jiffies, secs;
438 int status = 0; 596 int status = 0;
597 struct ocrdma_rdma_stats_resp *rdma_stats =
598 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
599 struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
439 600
440 secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U; 601 secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U;
441 if (secs) { 602 if (secs) {
@@ -444,10 +605,74 @@ static void ocrdma_update_stats(struct ocrdma_dev *dev)
444 if (status) 605 if (status)
445 pr_err("%s: stats mbox failed with status = %d\n", 606 pr_err("%s: stats mbox failed with status = %d\n",
446 __func__, status); 607 __func__, status);
608 /* Update PD counters from PD resource manager */
609 if (dev->pd_mgr->pd_prealloc_valid) {
610 rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_count;
611 rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_count;
612 /* Threshold stata*/
613 rsrc_stats = &rdma_stats->th_rsrc_stats;
614 rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_thrsh;
615 rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_thrsh;
616 }
447 dev->last_stats_time = jiffies; 617 dev->last_stats_time = jiffies;
448 } 618 }
449} 619}
450 620
621static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
622 const char __user *buffer,
623 size_t count, loff_t *ppos)
624{
625 char tmp_str[32];
626 long reset;
627 int status = 0;
628 struct ocrdma_stats *pstats = filp->private_data;
629 struct ocrdma_dev *dev = pstats->dev;
630
631 if (count > 32)
632 goto err;
633
634 if (copy_from_user(tmp_str, buffer, count))
635 goto err;
636
637 tmp_str[count-1] = '\0';
638 if (kstrtol(tmp_str, 10, &reset))
639 goto err;
640
641 switch (pstats->type) {
642 case OCRDMA_RESET_STATS:
643 if (reset) {
644 status = ocrdma_mbx_rdma_stats(dev, true);
645 if (status) {
646 pr_err("Failed to reset stats = %d", status);
647 goto err;
648 }
649 }
650 break;
651 default:
652 goto err;
653 }
654
655 return count;
656err:
657 return -EFAULT;
658}
659
660int ocrdma_pma_counters(struct ocrdma_dev *dev,
661 struct ib_mad *out_mad)
662{
663 struct ib_pma_portcounters *pma_cnt;
664
665 memset(out_mad->data, 0, sizeof out_mad->data);
666 pma_cnt = (void *)(out_mad->data + 40);
667 ocrdma_update_stats(dev);
668
669 pma_cnt->port_xmit_data = cpu_to_be32(ocrdma_sysfs_xmit_data(dev));
670 pma_cnt->port_rcv_data = cpu_to_be32(ocrdma_sysfs_rcv_data(dev));
671 pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev));
672 pma_cnt->port_rcv_packets = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev));
673 return 0;
674}
675
451static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer, 676static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
452 size_t usr_buf_len, loff_t *ppos) 677 size_t usr_buf_len, loff_t *ppos)
453{ 678{
@@ -492,6 +717,9 @@ static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
492 case OCRDMA_RX_DBG_STATS: 717 case OCRDMA_RX_DBG_STATS:
493 data = ocrdma_rx_dbg_stats(dev); 718 data = ocrdma_rx_dbg_stats(dev);
494 break; 719 break;
720 case OCRDMA_DRV_STATS:
721 data = ocrdma_driver_dbg_stats(dev);
722 break;
495 723
496 default: 724 default:
497 status = -EFAULT; 725 status = -EFAULT;
@@ -514,6 +742,7 @@ static const struct file_operations ocrdma_dbg_ops = {
514 .owner = THIS_MODULE, 742 .owner = THIS_MODULE,
515 .open = simple_open, 743 .open = simple_open,
516 .read = ocrdma_dbgfs_ops_read, 744 .read = ocrdma_dbgfs_ops_read,
745 .write = ocrdma_dbgfs_ops_write,
517}; 746};
518 747
519void ocrdma_add_port_stats(struct ocrdma_dev *dev) 748void ocrdma_add_port_stats(struct ocrdma_dev *dev)
@@ -582,6 +811,18 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
582 &dev->rx_dbg_stats, &ocrdma_dbg_ops)) 811 &dev->rx_dbg_stats, &ocrdma_dbg_ops))
583 goto err; 812 goto err;
584 813
814 dev->driver_stats.type = OCRDMA_DRV_STATS;
815 dev->driver_stats.dev = dev;
816 if (!debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir,
817 &dev->driver_stats, &ocrdma_dbg_ops))
818 goto err;
819
820 dev->reset_stats.type = OCRDMA_RESET_STATS;
821 dev->reset_stats.dev = dev;
822 if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir,
823 &dev->reset_stats, &ocrdma_dbg_ops))
824 goto err;
825
585 /* Now create dma_mem for stats mbx command */ 826 /* Now create dma_mem for stats mbx command */
586 if (!ocrdma_alloc_stats_mem(dev)) 827 if (!ocrdma_alloc_stats_mem(dev))
587 goto err; 828 goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index 5f5e20c46d7c..091edd68a8a3 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -43,12 +43,16 @@ enum OCRDMA_STATS_TYPE {
43 OCRDMA_RXQP_ERRSTATS, 43 OCRDMA_RXQP_ERRSTATS,
44 OCRDMA_TXQP_ERRSTATS, 44 OCRDMA_TXQP_ERRSTATS,
45 OCRDMA_TX_DBG_STATS, 45 OCRDMA_TX_DBG_STATS,
46 OCRDMA_RX_DBG_STATS 46 OCRDMA_RX_DBG_STATS,
47 OCRDMA_DRV_STATS,
48 OCRDMA_RESET_STATS
47}; 49};
48 50
49void ocrdma_rem_debugfs(void); 51void ocrdma_rem_debugfs(void);
50void ocrdma_init_debugfs(void); 52void ocrdma_init_debugfs(void);
51void ocrdma_rem_port_stats(struct ocrdma_dev *dev); 53void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
52void ocrdma_add_port_stats(struct ocrdma_dev *dev); 54void ocrdma_add_port_stats(struct ocrdma_dev *dev);
55int ocrdma_pma_counters(struct ocrdma_dev *dev,
56 struct ib_mad *out_mad);
53 57
54#endif /* __OCRDMA_STATS_H__ */ 58#endif /* __OCRDMA_STATS_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index fb8d8c4dfbb9..877175563634 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
53 53
54 dev = get_ocrdma_dev(ibdev); 54 dev = get_ocrdma_dev(ibdev);
55 memset(sgid, 0, sizeof(*sgid)); 55 memset(sgid, 0, sizeof(*sgid));
56 if (index > OCRDMA_MAX_SGID) 56 if (index >= OCRDMA_MAX_SGID)
57 return -EINVAL; 57 return -EINVAL;
58 58
59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); 59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -253,6 +253,107 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
253 return found; 253 return found;
254} 254}
255 255
256
257static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
258{
259 u16 pd_bitmap_idx = 0;
260 const unsigned long *pd_bitmap;
261
262 if (dpp_pool) {
263 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
264 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
265 dev->pd_mgr->max_dpp_pd);
266 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
267 dev->pd_mgr->pd_dpp_count++;
268 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
269 dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
270 } else {
271 pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
272 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
273 dev->pd_mgr->max_normal_pd);
274 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
275 dev->pd_mgr->pd_norm_count++;
276 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
277 dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
278 }
279 return pd_bitmap_idx;
280}
281
282static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
283 bool dpp_pool)
284{
285 u16 pd_count;
286 u16 pd_bit_index;
287
288 pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
289 dev->pd_mgr->pd_norm_count;
290 if (pd_count == 0)
291 return -EINVAL;
292
293 if (dpp_pool) {
294 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
295 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
296 return -EINVAL;
297 } else {
298 __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
299 dev->pd_mgr->pd_dpp_count--;
300 }
301 } else {
302 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
303 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
304 return -EINVAL;
305 } else {
306 __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
307 dev->pd_mgr->pd_norm_count--;
308 }
309 }
310
311 return 0;
312}
313
314static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
315 bool dpp_pool)
316{
317 int status;
318
319 mutex_lock(&dev->dev_lock);
320 status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
321 mutex_unlock(&dev->dev_lock);
322 return status;
323}
324
325static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
326{
327 u16 pd_idx = 0;
328 int status = 0;
329
330 mutex_lock(&dev->dev_lock);
331 if (pd->dpp_enabled) {
332 /* try allocating DPP PD, if not available then normal PD */
333 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
334 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
335 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
336 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
337 } else if (dev->pd_mgr->pd_norm_count <
338 dev->pd_mgr->max_normal_pd) {
339 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
340 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
341 pd->dpp_enabled = false;
342 } else {
343 status = -EINVAL;
344 }
345 } else {
346 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
347 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
348 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
349 } else {
350 status = -EINVAL;
351 }
352 }
353 mutex_unlock(&dev->dev_lock);
354 return status;
355}
356
256static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, 357static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
257 struct ocrdma_ucontext *uctx, 358 struct ocrdma_ucontext *uctx,
258 struct ib_udata *udata) 359 struct ib_udata *udata)
@@ -272,6 +373,11 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
272 dev->attr.wqe_size) : 0; 373 dev->attr.wqe_size) : 0;
273 } 374 }
274 375
376 if (dev->pd_mgr->pd_prealloc_valid) {
377 status = ocrdma_get_pd_num(dev, pd);
378 return (status == 0) ? pd : ERR_PTR(status);
379 }
380
275retry: 381retry:
276 status = ocrdma_mbx_alloc_pd(dev, pd); 382 status = ocrdma_mbx_alloc_pd(dev, pd);
277 if (status) { 383 if (status) {
@@ -299,7 +405,11 @@ static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
299{ 405{
300 int status = 0; 406 int status = 0;
301 407
302 status = ocrdma_mbx_dealloc_pd(dev, pd); 408 if (dev->pd_mgr->pd_prealloc_valid)
409 status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
410 else
411 status = ocrdma_mbx_dealloc_pd(dev, pd);
412
303 kfree(pd); 413 kfree(pd);
304 return status; 414 return status;
305} 415}
@@ -325,7 +435,6 @@ err:
325 435
326static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) 436static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
327{ 437{
328 int status = 0;
329 struct ocrdma_pd *pd = uctx->cntxt_pd; 438 struct ocrdma_pd *pd = uctx->cntxt_pd;
330 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); 439 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
331 440
@@ -334,8 +443,8 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
334 __func__, dev->id, pd->id); 443 __func__, dev->id, pd->id);
335 } 444 }
336 uctx->cntxt_pd = NULL; 445 uctx->cntxt_pd = NULL;
337 status = _ocrdma_dealloc_pd(dev, pd); 446 (void)_ocrdma_dealloc_pd(dev, pd);
338 return status; 447 return 0;
339} 448}
340 449
341static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) 450static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
@@ -569,7 +678,7 @@ err:
569 if (is_uctx_pd) { 678 if (is_uctx_pd) {
570 ocrdma_release_ucontext_pd(uctx); 679 ocrdma_release_ucontext_pd(uctx);
571 } else { 680 } else {
572 status = ocrdma_mbx_dealloc_pd(dev, pd); 681 status = _ocrdma_dealloc_pd(dev, pd);
573 kfree(pd); 682 kfree(pd);
574 } 683 }
575exit: 684exit:
@@ -837,9 +946,8 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
837{ 946{
838 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); 947 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
839 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); 948 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
840 int status;
841 949
842 status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); 950 (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
843 951
844 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 952 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
845 953
@@ -850,11 +958,10 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
850 958
851 /* Don't stop cleanup, in case FW is unresponsive */ 959 /* Don't stop cleanup, in case FW is unresponsive */
852 if (dev->mqe_ctx.fw_error_state) { 960 if (dev->mqe_ctx.fw_error_state) {
853 status = 0;
854 pr_err("%s(%d) fw not responding.\n", 961 pr_err("%s(%d) fw not responding.\n",
855 __func__, dev->id); 962 __func__, dev->id);
856 } 963 }
857 return status; 964 return 0;
858} 965}
859 966
860static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, 967static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
@@ -986,7 +1093,6 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq)
986 1093
987int ocrdma_destroy_cq(struct ib_cq *ibcq) 1094int ocrdma_destroy_cq(struct ib_cq *ibcq)
988{ 1095{
989 int status;
990 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 1096 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
991 struct ocrdma_eq *eq = NULL; 1097 struct ocrdma_eq *eq = NULL;
992 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); 1098 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
@@ -1003,7 +1109,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
1003 synchronize_irq(irq); 1109 synchronize_irq(irq);
1004 ocrdma_flush_cq(cq); 1110 ocrdma_flush_cq(cq);
1005 1111
1006 status = ocrdma_mbx_destroy_cq(dev, cq); 1112 (void)ocrdma_mbx_destroy_cq(dev, cq);
1007 if (cq->ucontext) { 1113 if (cq->ucontext) {
1008 pdid = cq->ucontext->cntxt_pd->id; 1114 pdid = cq->ucontext->cntxt_pd->id;
1009 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, 1115 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
@@ -1014,7 +1120,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
1014 } 1120 }
1015 1121
1016 kfree(cq); 1122 kfree(cq);
1017 return status; 1123 return 0;
1018} 1124}
1019 1125
1020static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) 1126static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
@@ -1113,8 +1219,8 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1113 int status = 0; 1219 int status = 0;
1114 u64 usr_db; 1220 u64 usr_db;
1115 struct ocrdma_create_qp_uresp uresp; 1221 struct ocrdma_create_qp_uresp uresp;
1116 struct ocrdma_dev *dev = qp->dev;
1117 struct ocrdma_pd *pd = qp->pd; 1222 struct ocrdma_pd *pd = qp->pd;
1223 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1118 1224
1119 memset(&uresp, 0, sizeof(uresp)); 1225 memset(&uresp, 0, sizeof(uresp));
1120 usr_db = dev->nic_info.unmapped_db + 1226 usr_db = dev->nic_info.unmapped_db +
@@ -1253,7 +1359,6 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1253 status = -ENOMEM; 1359 status = -ENOMEM;
1254 goto gen_err; 1360 goto gen_err;
1255 } 1361 }
1256 qp->dev = dev;
1257 ocrdma_set_qp_init_params(qp, pd, attrs); 1362 ocrdma_set_qp_init_params(qp, pd, attrs);
1258 if (udata == NULL) 1363 if (udata == NULL)
1259 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | 1364 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
@@ -1312,7 +1417,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1312 enum ib_qp_state old_qps; 1417 enum ib_qp_state old_qps;
1313 1418
1314 qp = get_ocrdma_qp(ibqp); 1419 qp = get_ocrdma_qp(ibqp);
1315 dev = qp->dev; 1420 dev = get_ocrdma_dev(ibqp->device);
1316 if (attr_mask & IB_QP_STATE) 1421 if (attr_mask & IB_QP_STATE)
1317 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps); 1422 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1318 /* if new and previous states are same hw doesn't need to 1423 /* if new and previous states are same hw doesn't need to
@@ -1335,7 +1440,7 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1335 enum ib_qp_state old_qps, new_qps; 1440 enum ib_qp_state old_qps, new_qps;
1336 1441
1337 qp = get_ocrdma_qp(ibqp); 1442 qp = get_ocrdma_qp(ibqp);
1338 dev = qp->dev; 1443 dev = get_ocrdma_dev(ibqp->device);
1339 1444
1340 /* syncronize with multiple context trying to change, retrive qps */ 1445 /* syncronize with multiple context trying to change, retrive qps */
1341 mutex_lock(&dev->dev_lock); 1446 mutex_lock(&dev->dev_lock);
@@ -1402,7 +1507,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1402 u32 qp_state; 1507 u32 qp_state;
1403 struct ocrdma_qp_params params; 1508 struct ocrdma_qp_params params;
1404 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 1509 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1405 struct ocrdma_dev *dev = qp->dev; 1510 struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
1406 1511
1407 memset(&params, 0, sizeof(params)); 1512 memset(&params, 0, sizeof(params));
1408 mutex_lock(&dev->dev_lock); 1513 mutex_lock(&dev->dev_lock);
@@ -1412,8 +1517,6 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1412 goto mbx_err; 1517 goto mbx_err;
1413 if (qp->qp_type == IB_QPT_UD) 1518 if (qp->qp_type == IB_QPT_UD)
1414 qp_attr->qkey = params.qkey; 1519 qp_attr->qkey = params.qkey;
1415 qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
1416 qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
1417 qp_attr->path_mtu = 1520 qp_attr->path_mtu =
1418 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx & 1521 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1419 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >> 1522 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
@@ -1468,6 +1571,8 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1468 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); 1571 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1469 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >> 1572 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1470 OCRDMA_QP_PARAMS_STATE_SHIFT; 1573 OCRDMA_QP_PARAMS_STATE_SHIFT;
1574 qp_attr->qp_state = get_ibqp_state(qp_state);
1575 qp_attr->cur_qp_state = qp_attr->qp_state;
1471 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0; 1576 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1472 qp_attr->max_dest_rd_atomic = 1577 qp_attr->max_dest_rd_atomic =
1473 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT; 1578 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
@@ -1475,19 +1580,18 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1475 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK; 1580 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1476 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags & 1581 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1477 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0; 1582 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1583 /* Sync driver QP state with FW */
1584 ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
1478mbx_err: 1585mbx_err:
1479 return status; 1586 return status;
1480} 1587}
1481 1588
1482static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx) 1589static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
1483{ 1590{
1484 int i = idx / 32; 1591 unsigned int i = idx / 32;
1485 unsigned int mask = (1 << (idx % 32)); 1592 u32 mask = (1U << (idx % 32));
1486 1593
1487 if (srq->idx_bit_fields[i] & mask) 1594 srq->idx_bit_fields[i] ^= mask;
1488 srq->idx_bit_fields[i] &= ~mask;
1489 else
1490 srq->idx_bit_fields[i] |= mask;
1491} 1595}
1492 1596
1493static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) 1597static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
@@ -1596,7 +1700,7 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1596{ 1700{
1597 int found = false; 1701 int found = false;
1598 unsigned long flags; 1702 unsigned long flags;
1599 struct ocrdma_dev *dev = qp->dev; 1703 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1600 /* sync with any active CQ poll */ 1704 /* sync with any active CQ poll */
1601 1705
1602 spin_lock_irqsave(&dev->flush_q_lock, flags); 1706 spin_lock_irqsave(&dev->flush_q_lock, flags);
@@ -1613,7 +1717,6 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1613 1717
1614int ocrdma_destroy_qp(struct ib_qp *ibqp) 1718int ocrdma_destroy_qp(struct ib_qp *ibqp)
1615{ 1719{
1616 int status;
1617 struct ocrdma_pd *pd; 1720 struct ocrdma_pd *pd;
1618 struct ocrdma_qp *qp; 1721 struct ocrdma_qp *qp;
1619 struct ocrdma_dev *dev; 1722 struct ocrdma_dev *dev;
@@ -1622,7 +1725,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
1622 unsigned long flags; 1725 unsigned long flags;
1623 1726
1624 qp = get_ocrdma_qp(ibqp); 1727 qp = get_ocrdma_qp(ibqp);
1625 dev = qp->dev; 1728 dev = get_ocrdma_dev(ibqp->device);
1626 1729
1627 attrs.qp_state = IB_QPS_ERR; 1730 attrs.qp_state = IB_QPS_ERR;
1628 pd = qp->pd; 1731 pd = qp->pd;
@@ -1635,7 +1738,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
1635 * discarded until the old CQEs are discarded. 1738 * discarded until the old CQEs are discarded.
1636 */ 1739 */
1637 mutex_lock(&dev->dev_lock); 1740 mutex_lock(&dev->dev_lock);
1638 status = ocrdma_mbx_destroy_qp(dev, qp); 1741 (void) ocrdma_mbx_destroy_qp(dev, qp);
1639 1742
1640 /* 1743 /*
1641 * acquire CQ lock while destroy is in progress, in order to 1744 * acquire CQ lock while destroy is in progress, in order to
@@ -1670,7 +1773,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
1670 kfree(qp->wqe_wr_id_tbl); 1773 kfree(qp->wqe_wr_id_tbl);
1671 kfree(qp->rqe_wr_id_tbl); 1774 kfree(qp->rqe_wr_id_tbl);
1672 kfree(qp); 1775 kfree(qp);
1673 return status; 1776 return 0;
1674} 1777}
1675 1778
1676static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, 1779static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
@@ -1831,6 +1934,8 @@ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1831 else 1934 else
1832 ud_hdr->qkey = wr->wr.ud.remote_qkey; 1935 ud_hdr->qkey = wr->wr.ud.remote_qkey;
1833 ud_hdr->rsvd_ahid = ah->id; 1936 ud_hdr->rsvd_ahid = ah->id;
1937 if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
1938 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
1834} 1939}
1835 1940
1836static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, 1941static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
@@ -2007,11 +2112,12 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2007 u64 fbo; 2112 u64 fbo;
2008 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); 2113 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2009 struct ocrdma_mr *mr; 2114 struct ocrdma_mr *mr;
2115 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2010 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr); 2116 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2011 2117
2012 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); 2118 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2013 2119
2014 if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr) 2120 if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
2015 return -EINVAL; 2121 return -EINVAL;
2016 2122
2017 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); 2123 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
@@ -2039,7 +2145,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2039 fast_reg->size_sge = 2145 fast_reg->size_sge =
2040 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); 2146 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
2041 mr = (struct ocrdma_mr *) (unsigned long) 2147 mr = (struct ocrdma_mr *) (unsigned long)
2042 qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; 2148 dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
2043 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); 2149 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
2044 return 0; 2150 return 0;
2045} 2151}
@@ -2112,8 +2218,6 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2112 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); 2218 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2113 status = ocrdma_build_write(qp, hdr, wr); 2219 status = ocrdma_build_write(qp, hdr, wr);
2114 break; 2220 break;
2115 case IB_WR_RDMA_READ_WITH_INV:
2116 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2117 case IB_WR_RDMA_READ: 2221 case IB_WR_RDMA_READ:
2118 ocrdma_build_read(qp, hdr, wr); 2222 ocrdma_build_read(qp, hdr, wr);
2119 break; 2223 break;
@@ -2484,8 +2588,11 @@ static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2484 bool *polled, bool *stop) 2588 bool *polled, bool *stop)
2485{ 2589{
2486 bool expand; 2590 bool expand;
2591 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2487 int status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2592 int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2488 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2593 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2594 if (status < OCRDMA_MAX_CQE_ERR)
2595 atomic_inc(&dev->cqe_err_stats[status]);
2489 2596
2490 /* when hw sq is empty, but rq is not empty, so we continue 2597 /* when hw sq is empty, but rq is not empty, so we continue
2491 * to keep the cqe in order to get the cq event again. 2598 * to keep the cqe in order to get the cq event again.
@@ -2604,6 +2711,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2604 int status) 2711 int status)
2605{ 2712{
2606 bool expand; 2713 bool expand;
2714 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2715
2716 if (status < OCRDMA_MAX_CQE_ERR)
2717 atomic_inc(&dev->cqe_err_stats[status]);
2607 2718
2608 /* when hw_rq is empty, but wq is not empty, so continue 2719 /* when hw_rq is empty, but wq is not empty, so continue
2609 * to keep the cqe to get the cq event again. 2720 * to keep the cqe to get the cq event again.
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index c00ae093b6f8..ffd48bfc4923 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1082,12 +1082,6 @@ struct qib_devdata {
1082 /* control high-level access to EEPROM */ 1082 /* control high-level access to EEPROM */
1083 struct mutex eep_lock; 1083 struct mutex eep_lock;
1084 uint64_t traffic_wds; 1084 uint64_t traffic_wds;
1085 /* active time is kept in seconds, but logged in hours */
1086 atomic_t active_time;
1087 /* Below are nominal shadow of EEPROM, new since last EEPROM update */
1088 uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
1089 uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
1090 uint16_t eep_hrs;
1091 /* 1085 /*
1092 * masks for which bits of errs, hwerrs that cause 1086 * masks for which bits of errs, hwerrs that cause
1093 * each of the counters to increment. 1087 * each of the counters to increment.
@@ -1309,8 +1303,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
1309int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, 1303int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
1310 const void *buffer, int len); 1304 const void *buffer, int len);
1311void qib_get_eeprom_info(struct qib_devdata *); 1305void qib_get_eeprom_info(struct qib_devdata *);
1312int qib_update_eeprom_log(struct qib_devdata *dd); 1306#define qib_inc_eeprom_err(dd, eidx, incr)
1313void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
1314void qib_dump_lookup_output_queue(struct qib_devdata *); 1307void qib_dump_lookup_output_queue(struct qib_devdata *);
1315void qib_force_pio_avail_update(struct qib_devdata *); 1308void qib_force_pio_avail_update(struct qib_devdata *);
1316void qib_clear_symerror_on_linkup(unsigned long opaque); 1309void qib_clear_symerror_on_linkup(unsigned long opaque);
@@ -1467,11 +1460,14 @@ const char *qib_get_unit_name(int unit);
1467 * Flush write combining store buffers (if present) and perform a write 1460 * Flush write combining store buffers (if present) and perform a write
1468 * barrier. 1461 * barrier.
1469 */ 1462 */
1463static inline void qib_flush_wc(void)
1464{
1470#if defined(CONFIG_X86_64) 1465#if defined(CONFIG_X86_64)
1471#define qib_flush_wc() asm volatile("sfence" : : : "memory") 1466 asm volatile("sfence" : : : "memory");
1472#else 1467#else
1473#define qib_flush_wc() wmb() /* no reorder around wc flush */ 1468 wmb(); /* no reorder around wc flush */
1474#endif 1469#endif
1470}
1475 1471
1476/* global module parameter variables */ 1472/* global module parameter variables */
1477extern unsigned qib_ibmtu; 1473extern unsigned qib_ibmtu;
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
index 5670ace27c63..4fb78abd8ba1 100644
--- a/drivers/infiniband/hw/qib/qib_common.h
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -257,7 +257,7 @@ struct qib_base_info {
257 257
258 /* shared memory page for send buffer disarm status */ 258 /* shared memory page for send buffer disarm status */
259 __u64 spi_sendbuf_status; 259 __u64 spi_sendbuf_status;
260} __attribute__ ((aligned(8))); 260} __aligned(8);
261 261
262/* 262/*
263 * This version number is given to the driver by the user code during 263 * This version number is given to the driver by the user code during
@@ -361,7 +361,7 @@ struct qib_user_info {
361 */ 361 */
362 __u64 spu_base_info; 362 __u64 spu_base_info;
363 363
364} __attribute__ ((aligned(8))); 364} __aligned(8);
365 365
366/* User commands. */ 366/* User commands. */
367 367
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
index 6abd3ed3cd51..5e75b43c596b 100644
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
+++ b/drivers/infiniband/hw/qib/qib_debugfs.c
@@ -255,7 +255,6 @@ void qib_dbg_ibdev_init(struct qib_ibdev *ibd)
255 DEBUGFS_FILE_CREATE(opcode_stats); 255 DEBUGFS_FILE_CREATE(opcode_stats);
256 DEBUGFS_FILE_CREATE(ctx_stats); 256 DEBUGFS_FILE_CREATE(ctx_stats);
257 DEBUGFS_FILE_CREATE(qp_stats); 257 DEBUGFS_FILE_CREATE(qp_stats);
258 return;
259} 258}
260 259
261void qib_dbg_ibdev_exit(struct qib_ibdev *ibd) 260void qib_dbg_ibdev_exit(struct qib_ibdev *ibd)
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
index 5dfda4c5cc9c..8c34b23e5bf6 100644
--- a/drivers/infiniband/hw/qib/qib_diag.c
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -85,7 +85,7 @@ static struct qib_diag_client *get_client(struct qib_devdata *dd)
85 client_pool = dc->next; 85 client_pool = dc->next;
86 else 86 else
87 /* None in pool, alloc and init */ 87 /* None in pool, alloc and init */
88 dc = kmalloc(sizeof *dc, GFP_KERNEL); 88 dc = kmalloc(sizeof(*dc), GFP_KERNEL);
89 89
90 if (dc) { 90 if (dc) {
91 dc->next = NULL; 91 dc->next = NULL;
@@ -257,6 +257,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
257 if (dd->userbase) { 257 if (dd->userbase) {
258 /* If user regs mapped, they are after send, so set limit. */ 258 /* If user regs mapped, they are after send, so set limit. */
259 u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase; 259 u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
260
260 if (!dd->piovl15base) 261 if (!dd->piovl15base)
261 snd_lim = dd->uregbase; 262 snd_lim = dd->uregbase;
262 krb32 = (u32 __iomem *)dd->userbase; 263 krb32 = (u32 __iomem *)dd->userbase;
@@ -280,6 +281,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
280 snd_bottom = dd->pio2k_bufbase; 281 snd_bottom = dd->pio2k_bufbase;
281 if (snd_lim == 0) { 282 if (snd_lim == 0) {
282 u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign); 283 u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
284
283 snd_lim = snd_bottom + tot2k; 285 snd_lim = snd_bottom + tot2k;
284 } 286 }
285 /* If 4k buffers exist, account for them by bumping 287 /* If 4k buffers exist, account for them by bumping
@@ -398,6 +400,7 @@ static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
398 /* not very efficient, but it works for now */ 400 /* not very efficient, but it works for now */
399 while (reg_addr < reg_end) { 401 while (reg_addr < reg_end) {
400 u64 data; 402 u64 data;
403
401 if (copy_from_user(&data, uaddr, sizeof(data))) { 404 if (copy_from_user(&data, uaddr, sizeof(data))) {
402 ret = -EFAULT; 405 ret = -EFAULT;
403 goto bail; 406 goto bail;
@@ -698,7 +701,7 @@ int qib_register_observer(struct qib_devdata *dd,
698 701
699 if (!dd || !op) 702 if (!dd || !op)
700 return -EINVAL; 703 return -EINVAL;
701 olp = vmalloc(sizeof *olp); 704 olp = vmalloc(sizeof(*olp));
702 if (!olp) { 705 if (!olp) {
703 pr_err("vmalloc for observer failed\n"); 706 pr_err("vmalloc for observer failed\n");
704 return -ENOMEM; 707 return -ENOMEM;
@@ -796,6 +799,7 @@ static ssize_t qib_diag_read(struct file *fp, char __user *data,
796 op = diag_get_observer(dd, *off); 799 op = diag_get_observer(dd, *off);
797 if (op) { 800 if (op) {
798 u32 offset = *off; 801 u32 offset = *off;
802
799 ret = op->hook(dd, op, offset, &data64, 0, use_32); 803 ret = op->hook(dd, op, offset, &data64, 0, use_32);
800 } 804 }
801 /* 805 /*
@@ -873,6 +877,7 @@ static ssize_t qib_diag_write(struct file *fp, const char __user *data,
873 if (count == 4 || count == 8) { 877 if (count == 4 || count == 8) {
874 u64 data64; 878 u64 data64;
875 u32 offset = *off; 879 u32 offset = *off;
880
876 ret = copy_from_user(&data64, data, count); 881 ret = copy_from_user(&data64, data, count);
877 if (ret) { 882 if (ret) {
878 ret = -EFAULT; 883 ret = -EFAULT;
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 5bee08f16d74..f58fdc3d25a2 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -86,7 +86,7 @@ const char *qib_get_unit_name(int unit)
86{ 86{
87 static char iname[16]; 87 static char iname[16];
88 88
89 snprintf(iname, sizeof iname, "infinipath%u", unit); 89 snprintf(iname, sizeof(iname), "infinipath%u", unit);
90 return iname; 90 return iname;
91} 91}
92 92
@@ -349,6 +349,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
349 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; 349 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
350 if (qp_num != QIB_MULTICAST_QPN) { 350 if (qp_num != QIB_MULTICAST_QPN) {
351 int ruc_res; 351 int ruc_res;
352
352 qp = qib_lookup_qpn(ibp, qp_num); 353 qp = qib_lookup_qpn(ibp, qp_num);
353 if (!qp) 354 if (!qp)
354 goto drop; 355 goto drop;
@@ -461,6 +462,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
461 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; 462 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
462 if (dd->flags & QIB_NODMA_RTAIL) { 463 if (dd->flags & QIB_NODMA_RTAIL) {
463 u32 seq = qib_hdrget_seq(rhf_addr); 464 u32 seq = qib_hdrget_seq(rhf_addr);
465
464 if (seq != rcd->seq_cnt) 466 if (seq != rcd->seq_cnt)
465 goto bail; 467 goto bail;
466 hdrqtail = 0; 468 hdrqtail = 0;
@@ -651,6 +653,7 @@ bail:
651int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc) 653int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
652{ 654{
653 struct qib_devdata *dd = ppd->dd; 655 struct qib_devdata *dd = ppd->dd;
656
654 ppd->lid = lid; 657 ppd->lid = lid;
655 ppd->lmc = lmc; 658 ppd->lmc = lmc;
656 659
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
index 4d5d71aaa2b4..311ee6c3dd5e 100644
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -153,6 +153,7 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
153 153
154 if (t && dd0->nguid > 1 && t <= dd0->nguid) { 154 if (t && dd0->nguid > 1 && t <= dd0->nguid) {
155 u8 oguid; 155 u8 oguid;
156
156 dd->base_guid = dd0->base_guid; 157 dd->base_guid = dd0->base_guid;
157 bguid = (u8 *) &dd->base_guid; 158 bguid = (u8 *) &dd->base_guid;
158 159
@@ -251,206 +252,25 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
251 * This board has a Serial-prefix, which is stored 252 * This board has a Serial-prefix, which is stored
252 * elsewhere for backward-compatibility. 253 * elsewhere for backward-compatibility.
253 */ 254 */
254 memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix); 255 memcpy(snp, ifp->if_sprefix, sizeof(ifp->if_sprefix));
255 snp[sizeof ifp->if_sprefix] = '\0'; 256 snp[sizeof(ifp->if_sprefix)] = '\0';
256 len = strlen(snp); 257 len = strlen(snp);
257 snp += len; 258 snp += len;
258 len = (sizeof dd->serial) - len; 259 len = sizeof(dd->serial) - len;
259 if (len > sizeof ifp->if_serial) 260 if (len > sizeof(ifp->if_serial))
260 len = sizeof ifp->if_serial; 261 len = sizeof(ifp->if_serial);
261 memcpy(snp, ifp->if_serial, len); 262 memcpy(snp, ifp->if_serial, len);
262 } else 263 } else {
263 memcpy(dd->serial, ifp->if_serial, 264 memcpy(dd->serial, ifp->if_serial, sizeof(ifp->if_serial));
264 sizeof ifp->if_serial); 265 }
265 if (!strstr(ifp->if_comment, "Tested successfully")) 266 if (!strstr(ifp->if_comment, "Tested successfully"))
266 qib_dev_err(dd, 267 qib_dev_err(dd,
267 "Board SN %s did not pass functional test: %s\n", 268 "Board SN %s did not pass functional test: %s\n",
268 dd->serial, ifp->if_comment); 269 dd->serial, ifp->if_comment);
269 270
270 memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
271 /*
272 * Power-on (actually "active") hours are kept as little-endian value
273 * in EEPROM, but as seconds in a (possibly as small as 24-bit)
274 * atomic_t while running.
275 */
276 atomic_set(&dd->active_time, 0);
277 dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
278
279done: 271done:
280 vfree(buf); 272 vfree(buf);
281 273
282bail:; 274bail:;
283} 275}
284 276
285/**
286 * qib_update_eeprom_log - copy active-time and error counters to eeprom
287 * @dd: the qlogic_ib device
288 *
289 * Although the time is kept as seconds in the qib_devdata struct, it is
290 * rounded to hours for re-write, as we have only 16 bits in EEPROM.
291 * First-cut code reads whole (expected) struct qib_flash, modifies,
292 * re-writes. Future direction: read/write only what we need, assuming
293 * that the EEPROM had to have been "good enough" for driver init, and
294 * if not, we aren't making it worse.
295 *
296 */
297int qib_update_eeprom_log(struct qib_devdata *dd)
298{
299 void *buf;
300 struct qib_flash *ifp;
301 int len, hi_water;
302 uint32_t new_time, new_hrs;
303 u8 csum;
304 int ret, idx;
305 unsigned long flags;
306
307 /* first, check if we actually need to do anything. */
308 ret = 0;
309 for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
310 if (dd->eep_st_new_errs[idx]) {
311 ret = 1;
312 break;
313 }
314 }
315 new_time = atomic_read(&dd->active_time);
316
317 if (ret == 0 && new_time < 3600)
318 goto bail;
319
320 /*
321 * The quick-check above determined that there is something worthy
322 * of logging, so get current contents and do a more detailed idea.
323 * read full flash, not just currently used part, since it may have
324 * been written with a newer definition
325 */
326 len = sizeof(struct qib_flash);
327 buf = vmalloc(len);
328 ret = 1;
329 if (!buf) {
330 qib_dev_err(dd,
331 "Couldn't allocate memory to read %u bytes from eeprom for logging\n",
332 len);
333 goto bail;
334 }
335
336 /* Grab semaphore and read current EEPROM. If we get an
337 * error, let go, but if not, keep it until we finish write.
338 */
339 ret = mutex_lock_interruptible(&dd->eep_lock);
340 if (ret) {
341 qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
342 goto free_bail;
343 }
344 ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
345 if (ret) {
346 mutex_unlock(&dd->eep_lock);
347 qib_dev_err(dd, "Unable read EEPROM for logging\n");
348 goto free_bail;
349 }
350 ifp = (struct qib_flash *)buf;
351
352 csum = flash_csum(ifp, 0);
353 if (csum != ifp->if_csum) {
354 mutex_unlock(&dd->eep_lock);
355 qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
356 csum, ifp->if_csum);
357 ret = 1;
358 goto free_bail;
359 }
360 hi_water = 0;
361 spin_lock_irqsave(&dd->eep_st_lock, flags);
362 for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
363 int new_val = dd->eep_st_new_errs[idx];
364 if (new_val) {
365 /*
366 * If we have seen any errors, add to EEPROM values
367 * We need to saturate at 0xFF (255) and we also
368 * would need to adjust the checksum if we were
369 * trying to minimize EEPROM traffic
370 * Note that we add to actual current count in EEPROM,
371 * in case it was altered while we were running.
372 */
373 new_val += ifp->if_errcntp[idx];
374 if (new_val > 0xFF)
375 new_val = 0xFF;
376 if (ifp->if_errcntp[idx] != new_val) {
377 ifp->if_errcntp[idx] = new_val;
378 hi_water = offsetof(struct qib_flash,
379 if_errcntp) + idx;
380 }
381 /*
382 * update our shadow (used to minimize EEPROM
383 * traffic), to match what we are about to write.
384 */
385 dd->eep_st_errs[idx] = new_val;
386 dd->eep_st_new_errs[idx] = 0;
387 }
388 }
389 /*
390 * Now update active-time. We would like to round to the nearest hour
391 * but unless atomic_t are sure to be proper signed ints we cannot,
392 * because we need to account for what we "transfer" to EEPROM and
393 * if we log an hour at 31 minutes, then we would need to set
394 * active_time to -29 to accurately count the _next_ hour.
395 */
396 if (new_time >= 3600) {
397 new_hrs = new_time / 3600;
398 atomic_sub((new_hrs * 3600), &dd->active_time);
399 new_hrs += dd->eep_hrs;
400 if (new_hrs > 0xFFFF)
401 new_hrs = 0xFFFF;
402 dd->eep_hrs = new_hrs;
403 if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
404 ifp->if_powerhour[0] = new_hrs & 0xFF;
405 hi_water = offsetof(struct qib_flash, if_powerhour);
406 }
407 if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
408 ifp->if_powerhour[1] = new_hrs >> 8;
409 hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
410 }
411 }
412 /*
413 * There is a tiny possibility that we could somehow fail to write
414 * the EEPROM after updating our shadows, but problems from holding
415 * the spinlock too long are a much bigger issue.
416 */
417 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
418 if (hi_water) {
419 /* we made some change to the data, uopdate cksum and write */
420 csum = flash_csum(ifp, 1);
421 ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
422 }
423 mutex_unlock(&dd->eep_lock);
424 if (ret)
425 qib_dev_err(dd, "Failed updating EEPROM\n");
426
427free_bail:
428 vfree(buf);
429bail:
430 return ret;
431}
432
433/**
434 * qib_inc_eeprom_err - increment one of the four error counters
435 * that are logged to EEPROM.
436 * @dd: the qlogic_ib device
437 * @eidx: 0..3, the counter to increment
438 * @incr: how much to add
439 *
440 * Each counter is 8-bits, and saturates at 255 (0xFF). They
441 * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
442 * is called, but it can only be called in a context that allows sleep.
443 * This function can be called even at interrupt level.
444 */
445void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
446{
447 uint new_val;
448 unsigned long flags;
449
450 spin_lock_irqsave(&dd->eep_st_lock, flags);
451 new_val = dd->eep_st_new_errs[eidx] + incr;
452 if (new_val > 255)
453 new_val = 255;
454 dd->eep_st_new_errs[eidx] = new_val;
455 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
456}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index b15e34eeef68..41937c6f888a 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -351,9 +351,10 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
351 * unless perhaps the user has mpin'ed the pages 351 * unless perhaps the user has mpin'ed the pages
352 * themselves. 352 * themselves.
353 */ 353 */
354 qib_devinfo(dd->pcidev, 354 qib_devinfo(
355 "Failed to lock addr %p, %u pages: " 355 dd->pcidev,
356 "errno %d\n", (void *) vaddr, cnt, -ret); 356 "Failed to lock addr %p, %u pages: errno %d\n",
357 (void *) vaddr, cnt, -ret);
357 goto done; 358 goto done;
358 } 359 }
359 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { 360 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
@@ -437,7 +438,7 @@ cleanup:
437 goto cleanup; 438 goto cleanup;
438 } 439 }
439 if (copy_to_user((void __user *) (unsigned long) ti->tidmap, 440 if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
440 tidmap, sizeof tidmap)) { 441 tidmap, sizeof(tidmap))) {
441 ret = -EFAULT; 442 ret = -EFAULT;
442 goto cleanup; 443 goto cleanup;
443 } 444 }
@@ -484,7 +485,7 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
484 } 485 }
485 486
486 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, 487 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
487 sizeof tidmap)) { 488 sizeof(tidmap))) {
488 ret = -EFAULT; 489 ret = -EFAULT;
489 goto done; 490 goto done;
490 } 491 }
@@ -951,8 +952,8 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
951 /* rcvegrbufs are read-only on the slave */ 952 /* rcvegrbufs are read-only on the slave */
952 if (vma->vm_flags & VM_WRITE) { 953 if (vma->vm_flags & VM_WRITE) {
953 qib_devinfo(dd->pcidev, 954 qib_devinfo(dd->pcidev,
954 "Can't map eager buffers as " 955 "Can't map eager buffers as writable (flags=%lx)\n",
955 "writable (flags=%lx)\n", vma->vm_flags); 956 vma->vm_flags);
956 ret = -EPERM; 957 ret = -EPERM;
957 goto bail; 958 goto bail;
958 } 959 }
@@ -1185,6 +1186,7 @@ static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
1185 */ 1186 */
1186 if (weight >= qib_cpulist_count) { 1187 if (weight >= qib_cpulist_count) {
1187 int cpu; 1188 int cpu;
1189
1188 cpu = find_first_zero_bit(qib_cpulist, 1190 cpu = find_first_zero_bit(qib_cpulist,
1189 qib_cpulist_count); 1191 qib_cpulist_count);
1190 if (cpu == qib_cpulist_count) 1192 if (cpu == qib_cpulist_count)
@@ -1247,10 +1249,7 @@ static int init_subctxts(struct qib_devdata *dd,
1247 if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16, 1249 if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
1248 uinfo->spu_userversion & 0xffff)) { 1250 uinfo->spu_userversion & 0xffff)) {
1249 qib_devinfo(dd->pcidev, 1251 qib_devinfo(dd->pcidev,
1250 "Mismatched user version (%d.%d) and driver " 1252 "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
1251 "version (%d.%d) while context sharing. Ensure "
1252 "that driver and library are from the same "
1253 "release.\n",
1254 (int) (uinfo->spu_userversion >> 16), 1253 (int) (uinfo->spu_userversion >> 16),
1255 (int) (uinfo->spu_userversion & 0xffff), 1254 (int) (uinfo->spu_userversion & 0xffff),
1256 QIB_USER_SWMAJOR, QIB_USER_SWMINOR); 1255 QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
@@ -1391,6 +1390,7 @@ static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
1391 } 1390 }
1392 if (!ppd) { 1391 if (!ppd) {
1393 u32 pidx = ctxt % dd->num_pports; 1392 u32 pidx = ctxt % dd->num_pports;
1393
1394 if (usable(dd->pport + pidx)) 1394 if (usable(dd->pport + pidx))
1395 ppd = dd->pport + pidx; 1395 ppd = dd->pport + pidx;
1396 else { 1396 else {
@@ -1438,10 +1438,12 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1438 1438
1439 if (alg == QIB_PORT_ALG_ACROSS) { 1439 if (alg == QIB_PORT_ALG_ACROSS) {
1440 unsigned inuse = ~0U; 1440 unsigned inuse = ~0U;
1441
1441 /* find device (with ACTIVE ports) with fewest ctxts in use */ 1442 /* find device (with ACTIVE ports) with fewest ctxts in use */
1442 for (ndev = 0; ndev < devmax; ndev++) { 1443 for (ndev = 0; ndev < devmax; ndev++) {
1443 struct qib_devdata *dd = qib_lookup(ndev); 1444 struct qib_devdata *dd = qib_lookup(ndev);
1444 unsigned cused = 0, cfree = 0, pusable = 0; 1445 unsigned cused = 0, cfree = 0, pusable = 0;
1446
1445 if (!dd) 1447 if (!dd)
1446 continue; 1448 continue;
1447 if (port && port <= dd->num_pports && 1449 if (port && port <= dd->num_pports &&
@@ -1471,6 +1473,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1471 } else { 1473 } else {
1472 for (ndev = 0; ndev < devmax; ndev++) { 1474 for (ndev = 0; ndev < devmax; ndev++) {
1473 struct qib_devdata *dd = qib_lookup(ndev); 1475 struct qib_devdata *dd = qib_lookup(ndev);
1476
1474 if (dd) { 1477 if (dd) {
1475 ret = choose_port_ctxt(fp, dd, port, uinfo); 1478 ret = choose_port_ctxt(fp, dd, port, uinfo);
1476 if (!ret) 1479 if (!ret)
@@ -1556,6 +1559,7 @@ static int find_hca(unsigned int cpu, int *unit)
1556 } 1559 }
1557 for (ndev = 0; ndev < devmax; ndev++) { 1560 for (ndev = 0; ndev < devmax; ndev++) {
1558 struct qib_devdata *dd = qib_lookup(ndev); 1561 struct qib_devdata *dd = qib_lookup(ndev);
1562
1559 if (dd) { 1563 if (dd) {
1560 if (pcibus_to_node(dd->pcidev->bus) < 0) { 1564 if (pcibus_to_node(dd->pcidev->bus) < 0) {
1561 ret = -EINVAL; 1565 ret = -EINVAL;
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 81854586c081..55f240a363fe 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -106,7 +106,7 @@ static ssize_t driver_stats_read(struct file *file, char __user *buf,
106{ 106{
107 qib_stats.sps_ints = qib_sps_ints(); 107 qib_stats.sps_ints = qib_sps_ints();
108 return simple_read_from_buffer(buf, count, ppos, &qib_stats, 108 return simple_read_from_buffer(buf, count, ppos, &qib_stats,
109 sizeof qib_stats); 109 sizeof(qib_stats));
110} 110}
111 111
112/* 112/*
@@ -133,7 +133,7 @@ static ssize_t driver_names_read(struct file *file, char __user *buf,
133 size_t count, loff_t *ppos) 133 size_t count, loff_t *ppos)
134{ 134{
135 return simple_read_from_buffer(buf, count, ppos, qib_statnames, 135 return simple_read_from_buffer(buf, count, ppos, qib_statnames,
136 sizeof qib_statnames - 1); /* no null */ 136 sizeof(qib_statnames) - 1); /* no null */
137} 137}
138 138
139static const struct file_operations driver_ops[] = { 139static const struct file_operations driver_ops[] = {
@@ -379,7 +379,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
379 int ret, i; 379 int ret, i;
380 380
381 /* create the per-unit directory */ 381 /* create the per-unit directory */
382 snprintf(unit, sizeof unit, "%u", dd->unit); 382 snprintf(unit, sizeof(unit), "%u", dd->unit);
383 ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir, 383 ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
384 &simple_dir_operations, dd); 384 &simple_dir_operations, dd);
385 if (ret) { 385 if (ret) {
@@ -482,7 +482,7 @@ static int remove_device_files(struct super_block *sb,
482 482
483 root = dget(sb->s_root); 483 root = dget(sb->s_root);
484 mutex_lock(&root->d_inode->i_mutex); 484 mutex_lock(&root->d_inode->i_mutex);
485 snprintf(unit, sizeof unit, "%u", dd->unit); 485 snprintf(unit, sizeof(unit), "%u", dd->unit);
486 dir = lookup_one_len(unit, root, strlen(unit)); 486 dir = lookup_one_len(unit, root, strlen(unit));
487 487
488 if (IS_ERR(dir)) { 488 if (IS_ERR(dir)) {
@@ -560,6 +560,7 @@ static struct dentry *qibfs_mount(struct file_system_type *fs_type, int flags,
560 const char *dev_name, void *data) 560 const char *dev_name, void *data)
561{ 561{
562 struct dentry *ret; 562 struct dentry *ret;
563
563 ret = mount_single(fs_type, flags, data, qibfs_fill_super); 564 ret = mount_single(fs_type, flags, data, qibfs_fill_super);
564 if (!IS_ERR(ret)) 565 if (!IS_ERR(ret))
565 qib_super = ret->d_sb; 566 qib_super = ret->d_sb;
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index d68266ac7619..0d2ba59af30a 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -333,6 +333,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd,
333 enum qib_ureg regno, u64 value, int ctxt) 333 enum qib_ureg regno, u64 value, int ctxt)
334{ 334{
335 u64 __iomem *ubase; 335 u64 __iomem *ubase;
336
336 if (dd->userbase) 337 if (dd->userbase)
337 ubase = (u64 __iomem *) 338 ubase = (u64 __iomem *)
338 ((char __iomem *) dd->userbase + 339 ((char __iomem *) dd->userbase +
@@ -834,14 +835,14 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
834 bits = (u32) ((hwerrs >> 835 bits = (u32) ((hwerrs >>
835 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & 836 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
836 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); 837 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
837 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, 838 snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
838 "[PCIe Mem Parity Errs %x] ", bits); 839 "[PCIe Mem Parity Errs %x] ", bits);
839 strlcat(msg, bitsmsg, msgl); 840 strlcat(msg, bitsmsg, msgl);
840 } 841 }
841 842
842 if (hwerrs & _QIB_PLL_FAIL) { 843 if (hwerrs & _QIB_PLL_FAIL) {
843 isfatal = 1; 844 isfatal = 1;
844 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, 845 snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
845 "[PLL failed (%llx), InfiniPath hardware unusable]", 846 "[PLL failed (%llx), InfiniPath hardware unusable]",
846 (unsigned long long) hwerrs & _QIB_PLL_FAIL); 847 (unsigned long long) hwerrs & _QIB_PLL_FAIL);
847 strlcat(msg, bitsmsg, msgl); 848 strlcat(msg, bitsmsg, msgl);
@@ -1014,7 +1015,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
1014 1015
1015 /* do these first, they are most important */ 1016 /* do these first, they are most important */
1016 if (errs & ERR_MASK(HardwareErr)) 1017 if (errs & ERR_MASK(HardwareErr))
1017 qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); 1018 qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1018 else 1019 else
1019 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) 1020 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1020 if (errs & dd->eep_st_masks[log_idx].errs_to_log) 1021 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1062,7 +1063,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
1062 */ 1063 */
1063 mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) | 1064 mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
1064 ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr); 1065 ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
1065 qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); 1066 qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
1066 1067
1067 if (errs & E_SUM_PKTERRS) 1068 if (errs & E_SUM_PKTERRS)
1068 qib_stats.sps_rcverrs++; 1069 qib_stats.sps_rcverrs++;
@@ -1670,6 +1671,7 @@ static irqreturn_t qib_6120intr(int irq, void *data)
1670 } 1671 }
1671 if (crcs) { 1672 if (crcs) {
1672 u32 cntr = dd->cspec->lli_counter; 1673 u32 cntr = dd->cspec->lli_counter;
1674
1673 cntr += crcs; 1675 cntr += crcs;
1674 if (cntr) { 1676 if (cntr) {
1675 if (cntr > dd->cspec->lli_thresh) { 1677 if (cntr > dd->cspec->lli_thresh) {
@@ -1722,6 +1724,7 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd)
1722 "irq is 0, BIOS error? Interrupts won't work\n"); 1724 "irq is 0, BIOS error? Interrupts won't work\n");
1723 else { 1725 else {
1724 int ret; 1726 int ret;
1727
1725 ret = request_irq(dd->cspec->irq, qib_6120intr, 0, 1728 ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
1726 QIB_DRV_NAME, dd); 1729 QIB_DRV_NAME, dd);
1727 if (ret) 1730 if (ret)
@@ -2681,8 +2684,6 @@ static void qib_get_6120_faststats(unsigned long opaque)
2681 spin_lock_irqsave(&dd->eep_st_lock, flags); 2684 spin_lock_irqsave(&dd->eep_st_lock, flags);
2682 traffic_wds -= dd->traffic_wds; 2685 traffic_wds -= dd->traffic_wds;
2683 dd->traffic_wds += traffic_wds; 2686 dd->traffic_wds += traffic_wds;
2684 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
2685 atomic_add(5, &dd->active_time); /* S/B #define */
2686 spin_unlock_irqrestore(&dd->eep_st_lock, flags); 2687 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
2687 2688
2688 qib_chk_6120_errormask(dd); 2689 qib_chk_6120_errormask(dd);
@@ -2929,6 +2930,7 @@ bail:
2929static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what) 2930static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
2930{ 2931{
2931 int ret = 0; 2932 int ret = 0;
2933
2932 if (!strncmp(what, "ibc", 3)) { 2934 if (!strncmp(what, "ibc", 3)) {
2933 ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback); 2935 ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
2934 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", 2936 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
@@ -3170,6 +3172,7 @@ static void get_6120_chip_params(struct qib_devdata *dd)
3170static void set_6120_baseaddrs(struct qib_devdata *dd) 3172static void set_6120_baseaddrs(struct qib_devdata *dd)
3171{ 3173{
3172 u32 cregbase; 3174 u32 cregbase;
3175
3173 cregbase = qib_read_kreg32(dd, kr_counterregbase); 3176 cregbase = qib_read_kreg32(dd, kr_counterregbase);
3174 dd->cspec->cregbase = (u64 __iomem *) 3177 dd->cspec->cregbase = (u64 __iomem *)
3175 ((char __iomem *) dd->kregbase + cregbase); 3178 ((char __iomem *) dd->kregbase + cregbase);
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 7dec89fdc124..22affda8af88 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -902,7 +902,8 @@ static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
902 errs &= QLOGIC_IB_E_SDMAERRS; 902 errs &= QLOGIC_IB_E_SDMAERRS;
903 903
904 msg = dd->cspec->sdmamsgbuf; 904 msg = dd->cspec->sdmamsgbuf;
905 qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf); 905 qib_decode_7220_sdma_errs(ppd, errs, msg,
906 sizeof(dd->cspec->sdmamsgbuf));
906 spin_lock_irqsave(&ppd->sdma_lock, flags); 907 spin_lock_irqsave(&ppd->sdma_lock, flags);
907 908
908 if (errs & ERR_MASK(SendBufMisuseErr)) { 909 if (errs & ERR_MASK(SendBufMisuseErr)) {
@@ -1043,6 +1044,7 @@ done:
1043static void reenable_7220_chase(unsigned long opaque) 1044static void reenable_7220_chase(unsigned long opaque)
1044{ 1045{
1045 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; 1046 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1047
1046 ppd->cpspec->chase_timer.expires = 0; 1048 ppd->cpspec->chase_timer.expires = 0;
1047 qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, 1049 qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1048 QLOGIC_IB_IBCC_LINKINITCMD_POLL); 1050 QLOGIC_IB_IBCC_LINKINITCMD_POLL);
@@ -1101,7 +1103,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
1101 1103
1102 /* do these first, they are most important */ 1104 /* do these first, they are most important */
1103 if (errs & ERR_MASK(HardwareErr)) 1105 if (errs & ERR_MASK(HardwareErr))
1104 qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); 1106 qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1105 else 1107 else
1106 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) 1108 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1107 if (errs & dd->eep_st_masks[log_idx].errs_to_log) 1109 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1155,7 +1157,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
1155 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | 1157 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
1156 ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr); 1158 ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
1157 1159
1158 qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); 1160 qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
1159 1161
1160 if (errs & E_SUM_PKTERRS) 1162 if (errs & E_SUM_PKTERRS)
1161 qib_stats.sps_rcverrs++; 1163 qib_stats.sps_rcverrs++;
@@ -1380,7 +1382,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
1380 bits = (u32) ((hwerrs >> 1382 bits = (u32) ((hwerrs >>
1381 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & 1383 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
1382 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); 1384 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
1383 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, 1385 snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
1384 "[PCIe Mem Parity Errs %x] ", bits); 1386 "[PCIe Mem Parity Errs %x] ", bits);
1385 strlcat(msg, bitsmsg, msgl); 1387 strlcat(msg, bitsmsg, msgl);
1386 } 1388 }
@@ -1390,7 +1392,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
1390 1392
1391 if (hwerrs & _QIB_PLL_FAIL) { 1393 if (hwerrs & _QIB_PLL_FAIL) {
1392 isfatal = 1; 1394 isfatal = 1;
1393 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, 1395 snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
1394 "[PLL failed (%llx), InfiniPath hardware unusable]", 1396 "[PLL failed (%llx), InfiniPath hardware unusable]",
1395 (unsigned long long) hwerrs & _QIB_PLL_FAIL); 1397 (unsigned long long) hwerrs & _QIB_PLL_FAIL);
1396 strlcat(msg, bitsmsg, msgl); 1398 strlcat(msg, bitsmsg, msgl);
@@ -3297,8 +3299,6 @@ static void qib_get_7220_faststats(unsigned long opaque)
3297 spin_lock_irqsave(&dd->eep_st_lock, flags); 3299 spin_lock_irqsave(&dd->eep_st_lock, flags);
3298 traffic_wds -= dd->traffic_wds; 3300 traffic_wds -= dd->traffic_wds;
3299 dd->traffic_wds += traffic_wds; 3301 dd->traffic_wds += traffic_wds;
3300 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
3301 atomic_add(5, &dd->active_time); /* S/B #define */
3302 spin_unlock_irqrestore(&dd->eep_st_lock, flags); 3302 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
3303done: 3303done:
3304 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); 3304 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index a7eb32517a04..ef97b71c8f7d 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -117,7 +117,7 @@ MODULE_PARM_DESC(chase, "Enable state chase handling");
117 117
118static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */ 118static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
119module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO); 119module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
120MODULE_PARM_DESC(long_attenuation, \ 120MODULE_PARM_DESC(long_attenuation,
121 "attenuation cutoff (dB) for long copper cable setup"); 121 "attenuation cutoff (dB) for long copper cable setup");
122 122
123static ushort qib_singleport; 123static ushort qib_singleport;
@@ -153,11 +153,12 @@ static struct kparam_string kp_txselect = {
153static int setup_txselect(const char *, struct kernel_param *); 153static int setup_txselect(const char *, struct kernel_param *);
154module_param_call(txselect, setup_txselect, param_get_string, 154module_param_call(txselect, setup_txselect, param_get_string,
155 &kp_txselect, S_IWUSR | S_IRUGO); 155 &kp_txselect, S_IWUSR | S_IRUGO);
156MODULE_PARM_DESC(txselect, \ 156MODULE_PARM_DESC(txselect,
157 "Tx serdes indices (for no QSFP or invalid QSFP data)"); 157 "Tx serdes indices (for no QSFP or invalid QSFP data)");
158 158
159#define BOARD_QME7342 5 159#define BOARD_QME7342 5
160#define BOARD_QMH7342 6 160#define BOARD_QMH7342 6
161#define BOARD_QMH7360 9
161#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ 162#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
162 BOARD_QMH7342) 163 BOARD_QMH7342)
163#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ 164#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
@@ -817,6 +818,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd,
817 enum qib_ureg regno, u64 value, int ctxt) 818 enum qib_ureg regno, u64 value, int ctxt)
818{ 819{
819 u64 __iomem *ubase; 820 u64 __iomem *ubase;
821
820 if (dd->userbase) 822 if (dd->userbase)
821 ubase = (u64 __iomem *) 823 ubase = (u64 __iomem *)
822 ((char __iomem *) dd->userbase + 824 ((char __iomem *) dd->userbase +
@@ -1677,7 +1679,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
1677 /* do these first, they are most important */ 1679 /* do these first, they are most important */
1678 if (errs & QIB_E_HARDWARE) { 1680 if (errs & QIB_E_HARDWARE) {
1679 *msg = '\0'; 1681 *msg = '\0';
1680 qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); 1682 qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1681 } else 1683 } else
1682 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) 1684 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1683 if (errs & dd->eep_st_masks[log_idx].errs_to_log) 1685 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1702,7 +1704,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
1702 mask = QIB_E_HARDWARE; 1704 mask = QIB_E_HARDWARE;
1703 *msg = '\0'; 1705 *msg = '\0';
1704 1706
1705 err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask, 1707 err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
1706 qib_7322error_msgs); 1708 qib_7322error_msgs);
1707 1709
1708 /* 1710 /*
@@ -1889,10 +1891,10 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1889 *msg = '\0'; 1891 *msg = '\0';
1890 1892
1891 if (errs & ~QIB_E_P_BITSEXTANT) { 1893 if (errs & ~QIB_E_P_BITSEXTANT) {
1892 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, 1894 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1893 errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs); 1895 errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1894 if (!*msg) 1896 if (!*msg)
1895 snprintf(msg, sizeof ppd->cpspec->epmsgbuf, 1897 snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
1896 "no others"); 1898 "no others");
1897 qib_dev_porterr(dd, ppd->port, 1899 qib_dev_porterr(dd, ppd->port,
1898 "error interrupt with unknown errors 0x%016Lx set (and %s)\n", 1900 "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
@@ -1906,7 +1908,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1906 /* determine cause, then write to clear */ 1908 /* determine cause, then write to clear */
1907 symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom); 1909 symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1908 qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0); 1910 qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1909 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom, 1911 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
1910 hdrchk_msgs); 1912 hdrchk_msgs);
1911 *msg = '\0'; 1913 *msg = '\0';
1912 /* senderrbuf cleared in SPKTERRS below */ 1914 /* senderrbuf cleared in SPKTERRS below */
@@ -1922,7 +1924,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1922 * isn't valid. We don't want to confuse people, so 1924 * isn't valid. We don't want to confuse people, so
1923 * we just don't print them, except at debug 1925 * we just don't print them, except at debug
1924 */ 1926 */
1925 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, 1927 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1926 (errs & QIB_E_P_LINK_PKTERRS), 1928 (errs & QIB_E_P_LINK_PKTERRS),
1927 qib_7322p_error_msgs); 1929 qib_7322p_error_msgs);
1928 *msg = '\0'; 1930 *msg = '\0';
@@ -1938,7 +1940,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1938 * valid. We don't want to confuse people, so we just 1940 * valid. We don't want to confuse people, so we just
1939 * don't print them, except at debug 1941 * don't print them, except at debug
1940 */ 1942 */
1941 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs, 1943 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
1942 qib_7322p_error_msgs); 1944 qib_7322p_error_msgs);
1943 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS; 1945 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1944 *msg = '\0'; 1946 *msg = '\0';
@@ -2031,6 +2033,7 @@ static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
2031 if (dd->cspec->num_msix_entries) { 2033 if (dd->cspec->num_msix_entries) {
2032 /* and same for MSIx */ 2034 /* and same for MSIx */
2033 u64 val = qib_read_kreg64(dd, kr_intgranted); 2035 u64 val = qib_read_kreg64(dd, kr_intgranted);
2036
2034 if (val) 2037 if (val)
2035 qib_write_kreg(dd, kr_intgranted, val); 2038 qib_write_kreg(dd, kr_intgranted, val);
2036 } 2039 }
@@ -2176,6 +2179,7 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2176 int err; 2179 int err;
2177 unsigned long flags; 2180 unsigned long flags;
2178 struct qib_pportdata *ppd = dd->pport; 2181 struct qib_pportdata *ppd = dd->pport;
2182
2179 for (; pidx < dd->num_pports; ++pidx, ppd++) { 2183 for (; pidx < dd->num_pports; ++pidx, ppd++) {
2180 err = 0; 2184 err = 0;
2181 if (pidx == 0 && (hwerrs & 2185 if (pidx == 0 && (hwerrs &
@@ -2801,9 +2805,11 @@ static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2801 2805
2802 if (n->rcv) { 2806 if (n->rcv) {
2803 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; 2807 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2808
2804 qib_update_rhdrq_dca(rcd, cpu); 2809 qib_update_rhdrq_dca(rcd, cpu);
2805 } else { 2810 } else {
2806 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; 2811 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2812
2807 qib_update_sdma_dca(ppd, cpu); 2813 qib_update_sdma_dca(ppd, cpu);
2808 } 2814 }
2809} 2815}
@@ -2816,9 +2822,11 @@ static void qib_irq_notifier_release(struct kref *ref)
2816 2822
2817 if (n->rcv) { 2823 if (n->rcv) {
2818 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; 2824 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2825
2819 dd = rcd->dd; 2826 dd = rcd->dd;
2820 } else { 2827 } else {
2821 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; 2828 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2829
2822 dd = ppd->dd; 2830 dd = ppd->dd;
2823 } 2831 }
2824 qib_devinfo(dd->pcidev, 2832 qib_devinfo(dd->pcidev,
@@ -2994,6 +3002,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2994 struct qib_pportdata *ppd; 3002 struct qib_pportdata *ppd;
2995 struct qib_qsfp_data *qd; 3003 struct qib_qsfp_data *qd;
2996 u32 mask; 3004 u32 mask;
3005
2997 if (!dd->pport[pidx].link_speed_supported) 3006 if (!dd->pport[pidx].link_speed_supported)
2998 continue; 3007 continue;
2999 mask = QSFP_GPIO_MOD_PRS_N; 3008 mask = QSFP_GPIO_MOD_PRS_N;
@@ -3001,6 +3010,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
3001 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); 3010 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
3002 if (gpiostatus & dd->cspec->gpio_mask & mask) { 3011 if (gpiostatus & dd->cspec->gpio_mask & mask) {
3003 u64 pins; 3012 u64 pins;
3013
3004 qd = &ppd->cpspec->qsfp_data; 3014 qd = &ppd->cpspec->qsfp_data;
3005 gpiostatus &= ~mask; 3015 gpiostatus &= ~mask;
3006 pins = qib_read_kreg64(dd, kr_extstatus); 3016 pins = qib_read_kreg64(dd, kr_extstatus);
@@ -3442,7 +3452,7 @@ try_intx:
3442 } 3452 }
3443 3453
3444 /* Try to get MSIx interrupts */ 3454 /* Try to get MSIx interrupts */
3445 memset(redirect, 0, sizeof redirect); 3455 memset(redirect, 0, sizeof(redirect));
3446 mask = ~0ULL; 3456 mask = ~0ULL;
3447 msixnum = 0; 3457 msixnum = 0;
3448 local_mask = cpumask_of_pcibus(dd->pcidev->bus); 3458 local_mask = cpumask_of_pcibus(dd->pcidev->bus);
@@ -3617,6 +3627,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
3617 n = "InfiniPath_QME7362"; 3627 n = "InfiniPath_QME7362";
3618 dd->flags |= QIB_HAS_QSFP; 3628 dd->flags |= QIB_HAS_QSFP;
3619 break; 3629 break;
3630 case BOARD_QMH7360:
3631 n = "Intel IB QDR 1P FLR-QSFP Adptr";
3632 dd->flags |= QIB_HAS_QSFP;
3633 break;
3620 case 15: 3634 case 15:
3621 n = "InfiniPath_QLE7342_TEST"; 3635 n = "InfiniPath_QLE7342_TEST";
3622 dd->flags |= QIB_HAS_QSFP; 3636 dd->flags |= QIB_HAS_QSFP;
@@ -3694,6 +3708,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
3694 */ 3708 */
3695 for (i = 0; i < msix_entries; i++) { 3709 for (i = 0; i < msix_entries; i++) {
3696 u64 vecaddr, vecdata; 3710 u64 vecaddr, vecdata;
3711
3697 vecaddr = qib_read_kreg64(dd, 2 * i + 3712 vecaddr = qib_read_kreg64(dd, 2 * i +
3698 (QIB_7322_MsixTable_OFFS / sizeof(u64))); 3713 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3699 vecdata = qib_read_kreg64(dd, 1 + 2 * i + 3714 vecdata = qib_read_kreg64(dd, 1 + 2 * i +
@@ -5178,8 +5193,6 @@ static void qib_get_7322_faststats(unsigned long opaque)
5178 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags); 5193 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5179 traffic_wds -= ppd->dd->traffic_wds; 5194 traffic_wds -= ppd->dd->traffic_wds;
5180 ppd->dd->traffic_wds += traffic_wds; 5195 ppd->dd->traffic_wds += traffic_wds;
5181 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
5182 atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
5183 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags); 5196 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5184 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active & 5197 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5185 QIB_IB_QDR) && 5198 QIB_IB_QDR) &&
@@ -5357,6 +5370,7 @@ static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5357static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed) 5370static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5358{ 5371{
5359 u64 newctrlb; 5372 u64 newctrlb;
5373
5360 newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK | 5374 newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5361 IBA7322_IBC_IBTA_1_2_MASK | 5375 IBA7322_IBC_IBTA_1_2_MASK |
5362 IBA7322_IBC_MAX_SPEED_MASK); 5376 IBA7322_IBC_MAX_SPEED_MASK);
@@ -5843,6 +5857,7 @@ static void get_7322_chip_params(struct qib_devdata *dd)
5843static void qib_7322_set_baseaddrs(struct qib_devdata *dd) 5857static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5844{ 5858{
5845 u32 cregbase; 5859 u32 cregbase;
5860
5846 cregbase = qib_read_kreg32(dd, kr_counterregbase); 5861 cregbase = qib_read_kreg32(dd, kr_counterregbase);
5847 5862
5848 dd->cspec->cregbase = (u64 __iomem *)(cregbase + 5863 dd->cspec->cregbase = (u64 __iomem *)(cregbase +
@@ -6183,6 +6198,7 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
6183 struct qib_devdata *dd; 6198 struct qib_devdata *dd;
6184 unsigned long val; 6199 unsigned long val;
6185 char *n; 6200 char *n;
6201
6186 if (strlen(str) >= MAX_ATTEN_LEN) { 6202 if (strlen(str) >= MAX_ATTEN_LEN) {
6187 pr_info("txselect_values string too long\n"); 6203 pr_info("txselect_values string too long\n");
6188 return -ENOSPC; 6204 return -ENOSPC;
@@ -6393,6 +6409,7 @@ static void write_7322_initregs(struct qib_devdata *dd)
6393 val = TIDFLOW_ERRBITS; /* these are W1C */ 6409 val = TIDFLOW_ERRBITS; /* these are W1C */
6394 for (i = 0; i < dd->cfgctxts; i++) { 6410 for (i = 0; i < dd->cfgctxts; i++) {
6395 int flow; 6411 int flow;
6412
6396 for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++) 6413 for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6397 qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); 6414 qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6398 } 6415 }
@@ -6503,6 +6520,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
6503 6520
6504 for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) { 6521 for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6505 struct qib_chippport_specific *cp = ppd->cpspec; 6522 struct qib_chippport_specific *cp = ppd->cpspec;
6523
6506 ppd->link_speed_supported = features & PORT_SPD_CAP; 6524 ppd->link_speed_supported = features & PORT_SPD_CAP;
6507 features >>= PORT_SPD_CAP_SHIFT; 6525 features >>= PORT_SPD_CAP_SHIFT;
6508 if (!ppd->link_speed_supported) { 6526 if (!ppd->link_speed_supported) {
@@ -6581,8 +6599,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
6581 ppd->vls_supported = IB_VL_VL0_7; 6599 ppd->vls_supported = IB_VL_VL0_7;
6582 else { 6600 else {
6583 qib_devinfo(dd->pcidev, 6601 qib_devinfo(dd->pcidev,
6584 "Invalid num_vls %u for MTU %d " 6602 "Invalid num_vls %u for MTU %d , using 4 VLs\n",
6585 ", using 4 VLs\n",
6586 qib_num_cfg_vls, mtu); 6603 qib_num_cfg_vls, mtu);
6587 ppd->vls_supported = IB_VL_VL0_3; 6604 ppd->vls_supported = IB_VL_VL0_3;
6588 qib_num_cfg_vls = 4; 6605 qib_num_cfg_vls = 4;
@@ -7890,6 +7907,7 @@ static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7890static int serdes_7322_init(struct qib_pportdata *ppd) 7907static int serdes_7322_init(struct qib_pportdata *ppd)
7891{ 7908{
7892 int ret = 0; 7909 int ret = 0;
7910
7893 if (ppd->dd->cspec->r1) 7911 if (ppd->dd->cspec->r1)
7894 ret = serdes_7322_init_old(ppd); 7912 ret = serdes_7322_init_old(ppd);
7895 else 7913 else
@@ -8305,8 +8323,8 @@ static void force_h1(struct qib_pportdata *ppd)
8305 8323
8306static int qib_r_grab(struct qib_devdata *dd) 8324static int qib_r_grab(struct qib_devdata *dd)
8307{ 8325{
8308 u64 val; 8326 u64 val = SJA_EN;
8309 val = SJA_EN; 8327
8310 qib_write_kreg(dd, kr_r_access, val); 8328 qib_write_kreg(dd, kr_r_access, val);
8311 qib_read_kreg32(dd, kr_scratch); 8329 qib_read_kreg32(dd, kr_scratch);
8312 return 0; 8330 return 0;
@@ -8319,6 +8337,7 @@ static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8319{ 8337{
8320 u64 val; 8338 u64 val;
8321 int timeout; 8339 int timeout;
8340
8322 for (timeout = 0; timeout < 100 ; ++timeout) { 8341 for (timeout = 0; timeout < 100 ; ++timeout) {
8323 val = qib_read_kreg32(dd, kr_r_access); 8342 val = qib_read_kreg32(dd, kr_r_access);
8324 if (val & R_RDY) 8343 if (val & R_RDY)
@@ -8346,6 +8365,7 @@ static int qib_r_shift(struct qib_devdata *dd, int bisten,
8346 } 8365 }
8347 if (inp) { 8366 if (inp) {
8348 int tdi = inp[pos >> 3] >> (pos & 7); 8367 int tdi = inp[pos >> 3] >> (pos & 7);
8368
8349 val |= ((tdi & 1) << R_TDI_LSB); 8369 val |= ((tdi & 1) << R_TDI_LSB);
8350 } 8370 }
8351 qib_write_kreg(dd, kr_r_access, val); 8371 qib_write_kreg(dd, kr_r_access, val);
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 729da39c49ed..2ee36953e234 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -140,7 +140,7 @@ int qib_create_ctxts(struct qib_devdata *dd)
140 * Allocate full ctxtcnt array, rather than just cfgctxts, because 140 * Allocate full ctxtcnt array, rather than just cfgctxts, because
141 * cleanup iterates across all possible ctxts. 141 * cleanup iterates across all possible ctxts.
142 */ 142 */
143 dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL); 143 dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
144 if (!dd->rcd) { 144 if (!dd->rcd) {
145 qib_dev_err(dd, 145 qib_dev_err(dd,
146 "Unable to allocate ctxtdata array, failing\n"); 146 "Unable to allocate ctxtdata array, failing\n");
@@ -234,6 +234,7 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
234 u8 hw_pidx, u8 port) 234 u8 hw_pidx, u8 port)
235{ 235{
236 int size; 236 int size;
237
237 ppd->dd = dd; 238 ppd->dd = dd;
238 ppd->hw_pidx = hw_pidx; 239 ppd->hw_pidx = hw_pidx;
239 ppd->port = port; /* IB port number, not index */ 240 ppd->port = port; /* IB port number, not index */
@@ -613,6 +614,7 @@ static int qib_create_workqueues(struct qib_devdata *dd)
613 ppd = dd->pport + pidx; 614 ppd = dd->pport + pidx;
614 if (!ppd->qib_wq) { 615 if (!ppd->qib_wq) {
615 char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */ 616 char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */
617
616 snprintf(wq_name, sizeof(wq_name), "qib%d_%d", 618 snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
617 dd->unit, pidx); 619 dd->unit, pidx);
618 ppd->qib_wq = 620 ppd->qib_wq =
@@ -714,6 +716,7 @@ int qib_init(struct qib_devdata *dd, int reinit)
714 716
715 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 717 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
716 int mtu; 718 int mtu;
719
717 if (lastfail) 720 if (lastfail)
718 ret = lastfail; 721 ret = lastfail;
719 ppd = dd->pport + pidx; 722 ppd = dd->pport + pidx;
@@ -931,7 +934,6 @@ static void qib_shutdown_device(struct qib_devdata *dd)
931 qib_free_pportdata(ppd); 934 qib_free_pportdata(ppd);
932 } 935 }
933 936
934 qib_update_eeprom_log(dd);
935} 937}
936 938
937/** 939/**
@@ -1026,8 +1028,7 @@ static void qib_verify_pioperf(struct qib_devdata *dd)
1026 addr = vmalloc(cnt); 1028 addr = vmalloc(cnt);
1027 if (!addr) { 1029 if (!addr) {
1028 qib_devinfo(dd->pcidev, 1030 qib_devinfo(dd->pcidev,
1029 "Couldn't get memory for checking PIO perf," 1031 "Couldn't get memory for checking PIO perf, skipping\n");
1030 " skipping\n");
1031 goto done; 1032 goto done;
1032 } 1033 }
1033 1034
@@ -1163,6 +1164,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
1163 1164
1164 if (!qib_cpulist_count) { 1165 if (!qib_cpulist_count) {
1165 u32 count = num_online_cpus(); 1166 u32 count = num_online_cpus();
1167
1166 qib_cpulist = kzalloc(BITS_TO_LONGS(count) * 1168 qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
1167 sizeof(long), GFP_KERNEL); 1169 sizeof(long), GFP_KERNEL);
1168 if (qib_cpulist) 1170 if (qib_cpulist)
@@ -1179,7 +1181,7 @@ bail:
1179 if (!list_empty(&dd->list)) 1181 if (!list_empty(&dd->list))
1180 list_del_init(&dd->list); 1182 list_del_init(&dd->list);
1181 ib_dealloc_device(&dd->verbs_dev.ibdev); 1183 ib_dealloc_device(&dd->verbs_dev.ibdev);
1182 return ERR_PTR(ret);; 1184 return ERR_PTR(ret);
1183} 1185}
1184 1186
1185/* 1187/*
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index f4918f2165ec..086616d071b9 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -168,7 +168,6 @@ skip_ibchange:
168 ppd->lastibcstat = ibcs; 168 ppd->lastibcstat = ibcs;
169 if (ev) 169 if (ev)
170 signal_ib_event(ppd, ev); 170 signal_ib_event(ppd, ev);
171 return;
172} 171}
173 172
174void qib_clear_symerror_on_linkup(unsigned long opaque) 173void qib_clear_symerror_on_linkup(unsigned long opaque)
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 3b9afccaaade..ad843c786e72 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -122,10 +122,10 @@ void qib_free_lkey(struct qib_mregion *mr)
122 if (!mr->lkey_published) 122 if (!mr->lkey_published)
123 goto out; 123 goto out;
124 if (lkey == 0) 124 if (lkey == 0)
125 rcu_assign_pointer(dev->dma_mr, NULL); 125 RCU_INIT_POINTER(dev->dma_mr, NULL);
126 else { 126 else {
127 r = lkey >> (32 - ib_qib_lkey_table_size); 127 r = lkey >> (32 - ib_qib_lkey_table_size);
128 rcu_assign_pointer(rkt->table[r], NULL); 128 RCU_INIT_POINTER(rkt->table[r], NULL);
129 } 129 }
130 qib_put_mr(mr); 130 qib_put_mr(mr);
131 mr->lkey_published = 0; 131 mr->lkey_published = 0;
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 636be117b578..395f4046dba2 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -152,14 +152,14 @@ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
152 data.trap_num = trap_num; 152 data.trap_num = trap_num;
153 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 153 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
154 data.toggle_count = 0; 154 data.toggle_count = 0;
155 memset(&data.details, 0, sizeof data.details); 155 memset(&data.details, 0, sizeof(data.details));
156 data.details.ntc_257_258.lid1 = lid1; 156 data.details.ntc_257_258.lid1 = lid1;
157 data.details.ntc_257_258.lid2 = lid2; 157 data.details.ntc_257_258.lid2 = lid2;
158 data.details.ntc_257_258.key = cpu_to_be32(key); 158 data.details.ntc_257_258.key = cpu_to_be32(key);
159 data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1); 159 data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
160 data.details.ntc_257_258.qp2 = cpu_to_be32(qp2); 160 data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
161 161
162 qib_send_trap(ibp, &data, sizeof data); 162 qib_send_trap(ibp, &data, sizeof(data));
163} 163}
164 164
165/* 165/*
@@ -176,7 +176,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
176 data.trap_num = IB_NOTICE_TRAP_BAD_MKEY; 176 data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
177 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 177 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
178 data.toggle_count = 0; 178 data.toggle_count = 0;
179 memset(&data.details, 0, sizeof data.details); 179 memset(&data.details, 0, sizeof(data.details));
180 data.details.ntc_256.lid = data.issuer_lid; 180 data.details.ntc_256.lid = data.issuer_lid;
181 data.details.ntc_256.method = smp->method; 181 data.details.ntc_256.method = smp->method;
182 data.details.ntc_256.attr_id = smp->attr_id; 182 data.details.ntc_256.attr_id = smp->attr_id;
@@ -198,7 +198,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
198 hop_cnt); 198 hop_cnt);
199 } 199 }
200 200
201 qib_send_trap(ibp, &data, sizeof data); 201 qib_send_trap(ibp, &data, sizeof(data));
202} 202}
203 203
204/* 204/*
@@ -214,11 +214,11 @@ void qib_cap_mask_chg(struct qib_ibport *ibp)
214 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; 214 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
215 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 215 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
216 data.toggle_count = 0; 216 data.toggle_count = 0;
217 memset(&data.details, 0, sizeof data.details); 217 memset(&data.details, 0, sizeof(data.details));
218 data.details.ntc_144.lid = data.issuer_lid; 218 data.details.ntc_144.lid = data.issuer_lid;
219 data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags); 219 data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
220 220
221 qib_send_trap(ibp, &data, sizeof data); 221 qib_send_trap(ibp, &data, sizeof(data));
222} 222}
223 223
224/* 224/*
@@ -234,11 +234,11 @@ void qib_sys_guid_chg(struct qib_ibport *ibp)
234 data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG; 234 data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
235 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 235 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
236 data.toggle_count = 0; 236 data.toggle_count = 0;
237 memset(&data.details, 0, sizeof data.details); 237 memset(&data.details, 0, sizeof(data.details));
238 data.details.ntc_145.lid = data.issuer_lid; 238 data.details.ntc_145.lid = data.issuer_lid;
239 data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid; 239 data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
240 240
241 qib_send_trap(ibp, &data, sizeof data); 241 qib_send_trap(ibp, &data, sizeof(data));
242} 242}
243 243
244/* 244/*
@@ -254,12 +254,12 @@ void qib_node_desc_chg(struct qib_ibport *ibp)
254 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; 254 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
255 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 255 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
256 data.toggle_count = 0; 256 data.toggle_count = 0;
257 memset(&data.details, 0, sizeof data.details); 257 memset(&data.details, 0, sizeof(data.details));
258 data.details.ntc_144.lid = data.issuer_lid; 258 data.details.ntc_144.lid = data.issuer_lid;
259 data.details.ntc_144.local_changes = 1; 259 data.details.ntc_144.local_changes = 1;
260 data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG; 260 data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
261 261
262 qib_send_trap(ibp, &data, sizeof data); 262 qib_send_trap(ibp, &data, sizeof(data));
263} 263}
264 264
265static int subn_get_nodedescription(struct ib_smp *smp, 265static int subn_get_nodedescription(struct ib_smp *smp,
diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c
index 8b73a11d571c..146cf29a2e1d 100644
--- a/drivers/infiniband/hw/qib/qib_mmap.c
+++ b/drivers/infiniband/hw/qib/qib_mmap.c
@@ -134,7 +134,7 @@ struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
134 void *obj) { 134 void *obj) {
135 struct qib_mmap_info *ip; 135 struct qib_mmap_info *ip;
136 136
137 ip = kmalloc(sizeof *ip, GFP_KERNEL); 137 ip = kmalloc(sizeof(*ip), GFP_KERNEL);
138 if (!ip) 138 if (!ip)
139 goto bail; 139 goto bail;
140 140
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index a77fb4fb14e4..c4473db46699 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -55,7 +55,7 @@ static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
55 55
56 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; 56 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
57 for (; i < m; i++) { 57 for (; i < m; i++) {
58 mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL); 58 mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
59 if (!mr->map[i]) 59 if (!mr->map[i])
60 goto bail; 60 goto bail;
61 } 61 }
@@ -104,7 +104,7 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
104 goto bail; 104 goto bail;
105 } 105 }
106 106
107 mr = kzalloc(sizeof *mr, GFP_KERNEL); 107 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
108 if (!mr) { 108 if (!mr) {
109 ret = ERR_PTR(-ENOMEM); 109 ret = ERR_PTR(-ENOMEM);
110 goto bail; 110 goto bail;
@@ -143,7 +143,7 @@ static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
143 143
144 /* Allocate struct plus pointers to first level page tables. */ 144 /* Allocate struct plus pointers to first level page tables. */
145 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; 145 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
146 mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); 146 mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
147 if (!mr) 147 if (!mr)
148 goto bail; 148 goto bail;
149 149
@@ -347,7 +347,7 @@ qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
347 if (size > PAGE_SIZE) 347 if (size > PAGE_SIZE)
348 return ERR_PTR(-EINVAL); 348 return ERR_PTR(-EINVAL);
349 349
350 pl = kzalloc(sizeof *pl, GFP_KERNEL); 350 pl = kzalloc(sizeof(*pl), GFP_KERNEL);
351 if (!pl) 351 if (!pl)
352 return ERR_PTR(-ENOMEM); 352 return ERR_PTR(-ENOMEM);
353 353
@@ -386,7 +386,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
386 386
387 /* Allocate struct plus pointers to first level page tables. */ 387 /* Allocate struct plus pointers to first level page tables. */
388 m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ; 388 m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
389 fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); 389 fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
390 if (!fmr) 390 if (!fmr)
391 goto bail; 391 goto bail;
392 392
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 61a0046efb76..4758a3801ae8 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -210,7 +210,7 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
210 /* We can't pass qib_msix_entry array to qib_msix_setup 210 /* We can't pass qib_msix_entry array to qib_msix_setup
211 * so use a dummy msix_entry array and copy the allocated 211 * so use a dummy msix_entry array and copy the allocated
212 * irq back to the qib_msix_entry array. */ 212 * irq back to the qib_msix_entry array. */
213 msix_entry = kmalloc(nvec * sizeof(*msix_entry), GFP_KERNEL); 213 msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL);
214 if (!msix_entry) 214 if (!msix_entry)
215 goto do_intx; 215 goto do_intx;
216 216
@@ -234,8 +234,10 @@ free_msix_entry:
234 kfree(msix_entry); 234 kfree(msix_entry);
235 235
236do_intx: 236do_intx:
237 qib_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, " 237 qib_dev_err(
238 "falling back to INTx\n", nvec, ret); 238 dd,
239 "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n",
240 nvec, ret);
239 *msixcnt = 0; 241 *msixcnt = 0;
240 qib_enable_intx(dd->pcidev); 242 qib_enable_intx(dd->pcidev);
241} 243}
@@ -459,6 +461,7 @@ void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
459void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline) 461void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
460{ 462{
461 int r; 463 int r;
464
462 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, 465 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
463 dd->pcibar0); 466 dd->pcibar0);
464 if (r) 467 if (r)
@@ -696,6 +699,7 @@ static void
696qib_pci_resume(struct pci_dev *pdev) 699qib_pci_resume(struct pci_dev *pdev)
697{ 700{
698 struct qib_devdata *dd = pci_get_drvdata(pdev); 701 struct qib_devdata *dd = pci_get_drvdata(pdev);
702
699 qib_devinfo(pdev, "QIB resume function called\n"); 703 qib_devinfo(pdev, "QIB resume function called\n");
700 pci_cleanup_aer_uncorrect_error_status(pdev); 704 pci_cleanup_aer_uncorrect_error_status(pdev);
701 /* 705 /*
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 6ddc0264aad2..4fa88ba2963e 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -255,10 +255,10 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
255 255
256 if (rcu_dereference_protected(ibp->qp0, 256 if (rcu_dereference_protected(ibp->qp0,
257 lockdep_is_held(&dev->qpt_lock)) == qp) { 257 lockdep_is_held(&dev->qpt_lock)) == qp) {
258 rcu_assign_pointer(ibp->qp0, NULL); 258 RCU_INIT_POINTER(ibp->qp0, NULL);
259 } else if (rcu_dereference_protected(ibp->qp1, 259 } else if (rcu_dereference_protected(ibp->qp1,
260 lockdep_is_held(&dev->qpt_lock)) == qp) { 260 lockdep_is_held(&dev->qpt_lock)) == qp) {
261 rcu_assign_pointer(ibp->qp1, NULL); 261 RCU_INIT_POINTER(ibp->qp1, NULL);
262 } else { 262 } else {
263 struct qib_qp *q; 263 struct qib_qp *q;
264 struct qib_qp __rcu **qpp; 264 struct qib_qp __rcu **qpp;
@@ -269,7 +269,7 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
269 lockdep_is_held(&dev->qpt_lock))) != NULL; 269 lockdep_is_held(&dev->qpt_lock))) != NULL;
270 qpp = &q->next) 270 qpp = &q->next)
271 if (q == qp) { 271 if (q == qp) {
272 rcu_assign_pointer(*qpp, 272 RCU_INIT_POINTER(*qpp,
273 rcu_dereference_protected(qp->next, 273 rcu_dereference_protected(qp->next,
274 lockdep_is_held(&dev->qpt_lock))); 274 lockdep_is_held(&dev->qpt_lock)));
275 removed = 1; 275 removed = 1;
@@ -315,7 +315,7 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
315 for (n = 0; n < dev->qp_table_size; n++) { 315 for (n = 0; n < dev->qp_table_size; n++) {
316 qp = rcu_dereference_protected(dev->qp_table[n], 316 qp = rcu_dereference_protected(dev->qp_table[n],
317 lockdep_is_held(&dev->qpt_lock)); 317 lockdep_is_held(&dev->qpt_lock));
318 rcu_assign_pointer(dev->qp_table[n], NULL); 318 RCU_INIT_POINTER(dev->qp_table[n], NULL);
319 319
320 for (; qp; qp = rcu_dereference_protected(qp->next, 320 for (; qp; qp = rcu_dereference_protected(qp->next,
321 lockdep_is_held(&dev->qpt_lock))) 321 lockdep_is_held(&dev->qpt_lock)))
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index fa71b1e666c5..5e27f76805e2 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -81,7 +81,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
81 * Module could take up to 2 Msec to respond to MOD_SEL, and there 81 * Module could take up to 2 Msec to respond to MOD_SEL, and there
82 * is no way to tell if it is ready, so we must wait. 82 * is no way to tell if it is ready, so we must wait.
83 */ 83 */
84 msleep(2); 84 msleep(20);
85 85
86 /* Make sure TWSI bus is in sane state. */ 86 /* Make sure TWSI bus is in sane state. */
87 ret = qib_twsi_reset(dd); 87 ret = qib_twsi_reset(dd);
@@ -99,6 +99,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
99 while (cnt < len) { 99 while (cnt < len) {
100 unsigned in_page; 100 unsigned in_page;
101 int wlen = len - cnt; 101 int wlen = len - cnt;
102
102 in_page = addr % QSFP_PAGESIZE; 103 in_page = addr % QSFP_PAGESIZE;
103 if ((in_page + wlen) > QSFP_PAGESIZE) 104 if ((in_page + wlen) > QSFP_PAGESIZE)
104 wlen = QSFP_PAGESIZE - in_page; 105 wlen = QSFP_PAGESIZE - in_page;
@@ -139,7 +140,7 @@ deselect:
139 else if (pass) 140 else if (pass)
140 qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass); 141 qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
141 142
142 msleep(2); 143 msleep(20);
143 144
144bail: 145bail:
145 mutex_unlock(&dd->eep_lock); 146 mutex_unlock(&dd->eep_lock);
@@ -189,7 +190,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
189 * Module could take up to 2 Msec to respond to MOD_SEL, 190 * Module could take up to 2 Msec to respond to MOD_SEL,
190 * and there is no way to tell if it is ready, so we must wait. 191 * and there is no way to tell if it is ready, so we must wait.
191 */ 192 */
192 msleep(2); 193 msleep(20);
193 194
194 /* Make sure TWSI bus is in sane state. */ 195 /* Make sure TWSI bus is in sane state. */
195 ret = qib_twsi_reset(dd); 196 ret = qib_twsi_reset(dd);
@@ -206,6 +207,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
206 while (cnt < len) { 207 while (cnt < len) {
207 unsigned in_page; 208 unsigned in_page;
208 int wlen = len - cnt; 209 int wlen = len - cnt;
210
209 in_page = addr % QSFP_PAGESIZE; 211 in_page = addr % QSFP_PAGESIZE;
210 if ((in_page + wlen) > QSFP_PAGESIZE) 212 if ((in_page + wlen) > QSFP_PAGESIZE)
211 wlen = QSFP_PAGESIZE - in_page; 213 wlen = QSFP_PAGESIZE - in_page;
@@ -234,7 +236,7 @@ deselect:
234 * going away, and there is no way to tell if it is ready. 236 * going away, and there is no way to tell if it is ready.
235 * so we must wait. 237 * so we must wait.
236 */ 238 */
237 msleep(2); 239 msleep(20);
238 240
239bail: 241bail:
240 mutex_unlock(&dd->eep_lock); 242 mutex_unlock(&dd->eep_lock);
@@ -296,6 +298,7 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
296 * set the page to zero, Even if it already appears to be zero. 298 * set the page to zero, Even if it already appears to be zero.
297 */ 299 */
298 u8 poke = 0; 300 u8 poke = 0;
301
299 ret = qib_qsfp_write(ppd, 127, &poke, 1); 302 ret = qib_qsfp_write(ppd, 127, &poke, 1);
300 udelay(50); 303 udelay(50);
301 if (ret != 1) { 304 if (ret != 1) {
@@ -480,7 +483,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
480 udelay(20); /* Generous RST dwell */ 483 udelay(20); /* Generous RST dwell */
481 484
482 dd->f_gpio_mod(dd, mask, mask, mask); 485 dd->f_gpio_mod(dd, mask, mask, mask);
483 return;
484} 486}
485 487
486void qib_qsfp_deinit(struct qib_qsfp_data *qd) 488void qib_qsfp_deinit(struct qib_qsfp_data *qd)
@@ -540,6 +542,7 @@ int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
540 542
541 while (bidx < QSFP_DEFAULT_HDR_CNT) { 543 while (bidx < QSFP_DEFAULT_HDR_CNT) {
542 int iidx; 544 int iidx;
545
543 ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK); 546 ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
544 if (ret < 0) 547 if (ret < 0)
545 goto bail; 548 goto bail;
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 2f2501890c4e..4544d6f88ad7 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1017,7 +1017,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
1017 /* Post a send completion queue entry if requested. */ 1017 /* Post a send completion queue entry if requested. */
1018 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || 1018 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
1019 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 1019 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1020 memset(&wc, 0, sizeof wc); 1020 memset(&wc, 0, sizeof(wc));
1021 wc.wr_id = wqe->wr.wr_id; 1021 wc.wr_id = wqe->wr.wr_id;
1022 wc.status = IB_WC_SUCCESS; 1022 wc.status = IB_WC_SUCCESS;
1023 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; 1023 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
@@ -1073,7 +1073,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
1073 /* Post a send completion queue entry if requested. */ 1073 /* Post a send completion queue entry if requested. */
1074 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || 1074 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
1075 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 1075 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1076 memset(&wc, 0, sizeof wc); 1076 memset(&wc, 0, sizeof(wc));
1077 wc.wr_id = wqe->wr.wr_id; 1077 wc.wr_id = wqe->wr.wr_id;
1078 wc.status = IB_WC_SUCCESS; 1078 wc.status = IB_WC_SUCCESS;
1079 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; 1079 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 4c07a8b34ffe..f42bd0f47577 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -247,8 +247,8 @@ static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
247 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 247 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
248 248
249 return ppd->guid; 249 return ppd->guid;
250 } else 250 }
251 return ibp->guids[index - 1]; 251 return ibp->guids[index - 1];
252} 252}
253 253
254static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) 254static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
@@ -420,7 +420,7 @@ again:
420 goto serr; 420 goto serr;
421 } 421 }
422 422
423 memset(&wc, 0, sizeof wc); 423 memset(&wc, 0, sizeof(wc));
424 send_status = IB_WC_SUCCESS; 424 send_status = IB_WC_SUCCESS;
425 425
426 release = 1; 426 release = 1;
@@ -792,7 +792,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
792 status != IB_WC_SUCCESS) { 792 status != IB_WC_SUCCESS) {
793 struct ib_wc wc; 793 struct ib_wc wc;
794 794
795 memset(&wc, 0, sizeof wc); 795 memset(&wc, 0, sizeof(wc));
796 wc.wr_id = wqe->wr.wr_id; 796 wc.wr_id = wqe->wr.wr_id;
797 wc.status = status; 797 wc.status = status;
798 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; 798 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 911205d3d5a0..c72775f27212 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -259,6 +259,7 @@ static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
259 * it again during startup. 259 * it again during startup.
260 */ 260 */
261 u64 val; 261 u64 val;
262
262 rst_val &= ~(1ULL); 263 rst_val &= ~(1ULL);
263 qib_write_kreg(dd, kr_hwerrmask, 264 qib_write_kreg(dd, kr_hwerrmask,
264 dd->cspec->hwerrmask & 265 dd->cspec->hwerrmask &
@@ -590,6 +591,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
590 * Both should be clear 591 * Both should be clear
591 */ 592 */
592 u64 newval = 0; 593 u64 newval = 0;
594
593 qib_write_kreg(dd, acc, newval); 595 qib_write_kreg(dd, acc, newval);
594 /* First read after write is not trustworthy */ 596 /* First read after write is not trustworthy */
595 pollval = qib_read_kreg32(dd, acc); 597 pollval = qib_read_kreg32(dd, acc);
@@ -601,6 +603,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
601 /* Need to claim */ 603 /* Need to claim */
602 u64 pollval; 604 u64 pollval;
603 u64 newval = EPB_ACC_REQ | oct_sel; 605 u64 newval = EPB_ACC_REQ | oct_sel;
606
604 qib_write_kreg(dd, acc, newval); 607 qib_write_kreg(dd, acc, newval);
605 /* First read after write is not trustworthy */ 608 /* First read after write is not trustworthy */
606 pollval = qib_read_kreg32(dd, acc); 609 pollval = qib_read_kreg32(dd, acc);
@@ -812,6 +815,7 @@ static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
812 if (!sofar) { 815 if (!sofar) {
813 /* Only set address at start of chunk */ 816 /* Only set address at start of chunk */
814 int addrbyte = (addr + sofar) >> 8; 817 int addrbyte = (addr + sofar) >> 8;
818
815 transval = csbit | EPB_MADDRH | addrbyte; 819 transval = csbit | EPB_MADDRH | addrbyte;
816 tries = epb_trans(dd, trans, transval, 820 tries = epb_trans(dd, trans, transval,
817 &transval); 821 &transval);
@@ -922,7 +926,7 @@ qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw)
922 * IRQ not set up at this point in init, so we poll. 926 * IRQ not set up at this point in init, so we poll.
923 */ 927 */
924#define IB_SERDES_TRIM_DONE (1ULL << 11) 928#define IB_SERDES_TRIM_DONE (1ULL << 11)
925#define TRIM_TMO (30) 929#define TRIM_TMO (15)
926 930
927static int qib_sd_trimdone_poll(struct qib_devdata *dd) 931static int qib_sd_trimdone_poll(struct qib_devdata *dd)
928{ 932{
@@ -940,7 +944,7 @@ static int qib_sd_trimdone_poll(struct qib_devdata *dd)
940 ret = 1; 944 ret = 1;
941 break; 945 break;
942 } 946 }
943 msleep(10); 947 msleep(20);
944 } 948 }
945 if (trim_tmo >= TRIM_TMO) { 949 if (trim_tmo >= TRIM_TMO) {
946 qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo); 950 qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
@@ -1071,6 +1075,7 @@ static int qib_sd_setvals(struct qib_devdata *dd)
1071 dds_reg_map >>= 4; 1075 dds_reg_map >>= 4;
1072 for (midx = 0; midx < DDS_ROWS; ++midx) { 1076 for (midx = 0; midx < DDS_ROWS; ++midx) {
1073 u64 __iomem *daddr = taddr + ((midx << 4) + idx); 1077 u64 __iomem *daddr = taddr + ((midx << 4) + idx);
1078
1074 data = dds_init_vals[midx].reg_vals[idx]; 1079 data = dds_init_vals[midx].reg_vals[idx];
1075 writeq(data, daddr); 1080 writeq(data, daddr);
1076 mmiowb(); 1081 mmiowb();
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 3c8e4e3caca6..81f56cdff2bc 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -586,8 +586,8 @@ static ssize_t show_serial(struct device *device,
586 container_of(device, struct qib_ibdev, ibdev.dev); 586 container_of(device, struct qib_ibdev, ibdev.dev);
587 struct qib_devdata *dd = dd_from_dev(dev); 587 struct qib_devdata *dd = dd_from_dev(dev);
588 588
589 buf[sizeof dd->serial] = '\0'; 589 buf[sizeof(dd->serial)] = '\0';
590 memcpy(buf, dd->serial, sizeof dd->serial); 590 memcpy(buf, dd->serial, sizeof(dd->serial));
591 strcat(buf, "\n"); 591 strcat(buf, "\n");
592 return strlen(buf); 592 return strlen(buf);
593} 593}
@@ -611,28 +611,6 @@ bail:
611 return ret < 0 ? ret : count; 611 return ret < 0 ? ret : count;
612} 612}
613 613
614static ssize_t show_logged_errs(struct device *device,
615 struct device_attribute *attr, char *buf)
616{
617 struct qib_ibdev *dev =
618 container_of(device, struct qib_ibdev, ibdev.dev);
619 struct qib_devdata *dd = dd_from_dev(dev);
620 int idx, count;
621
622 /* force consistency with actual EEPROM */
623 if (qib_update_eeprom_log(dd) != 0)
624 return -ENXIO;
625
626 count = 0;
627 for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
628 count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
629 dd->eep_st_errs[idx],
630 idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
631 }
632
633 return count;
634}
635
636/* 614/*
637 * Dump tempsense regs. in decimal, to ease shell-scripts. 615 * Dump tempsense regs. in decimal, to ease shell-scripts.
638 */ 616 */
@@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
679static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL); 657static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
680static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); 658static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
681static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); 659static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
682static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
683static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); 660static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
684static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL); 661static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
685static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); 662static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
@@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = {
693 &dev_attr_nfreectxts, 670 &dev_attr_nfreectxts,
694 &dev_attr_serial, 671 &dev_attr_serial,
695 &dev_attr_boardversion, 672 &dev_attr_boardversion,
696 &dev_attr_logged_errors,
697 &dev_attr_tempsense, 673 &dev_attr_tempsense,
698 &dev_attr_localbus_info, 674 &dev_attr_localbus_info,
699 &dev_attr_chip_reset, 675 &dev_attr_chip_reset,
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c
index 647f7beb1b0a..f5698664419b 100644
--- a/drivers/infiniband/hw/qib/qib_twsi.c
+++ b/drivers/infiniband/hw/qib/qib_twsi.c
@@ -105,6 +105,7 @@ static void scl_out(struct qib_devdata *dd, u8 bit)
105 udelay(2); 105 udelay(2);
106 else { 106 else {
107 int rise_usec; 107 int rise_usec;
108
108 for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) { 109 for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
109 if (mask & dd->f_gpio_mod(dd, 0, 0, 0)) 110 if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
110 break; 111 break;
@@ -326,6 +327,7 @@ int qib_twsi_reset(struct qib_devdata *dd)
326static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags) 327static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
327{ 328{
328 int ret = 1; 329 int ret = 1;
330
329 if (flags & QIB_TWSI_START) 331 if (flags & QIB_TWSI_START)
330 start_seq(dd); 332 start_seq(dd);
331 333
@@ -435,8 +437,7 @@ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
435 int sub_len; 437 int sub_len;
436 const u8 *bp = buffer; 438 const u8 *bp = buffer;
437 int max_wait_time, i; 439 int max_wait_time, i;
438 int ret; 440 int ret = 1;
439 ret = 1;
440 441
441 while (len > 0) { 442 while (len > 0) {
442 if (dev == QIB_TWSI_NO_DEV) { 443 if (dev == QIB_TWSI_NO_DEV) {
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
index 31d3561400a4..eface3b3dacf 100644
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -180,6 +180,7 @@ void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
180 180
181 for (i = 0; i < cnt; i++) { 181 for (i = 0; i < cnt; i++) {
182 int which; 182 int which;
183
183 if (!test_bit(i, mask)) 184 if (!test_bit(i, mask))
184 continue; 185 continue;
185 /* 186 /*
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index aaf7039f8ed2..26243b722b5e 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -127,7 +127,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
127 * present on the wire. 127 * present on the wire.
128 */ 128 */
129 length = swqe->length; 129 length = swqe->length;
130 memset(&wc, 0, sizeof wc); 130 memset(&wc, 0, sizeof(wc));
131 wc.byte_len = length + sizeof(struct ib_grh); 131 wc.byte_len = length + sizeof(struct ib_grh);
132 132
133 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 133 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index d2806cae234c..3e0677c51276 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -50,7 +50,7 @@
50/* expected size of headers (for dma_pool) */ 50/* expected size of headers (for dma_pool) */
51#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64 51#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
52/* attempt to drain the queue for 5secs */ 52/* attempt to drain the queue for 5secs */
53#define QIB_USER_SDMA_DRAIN_TIMEOUT 500 53#define QIB_USER_SDMA_DRAIN_TIMEOUT 250
54 54
55/* 55/*
56 * track how many times a process open this driver. 56 * track how many times a process open this driver.
@@ -226,6 +226,7 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
226 sdma_rb_node->refcount++; 226 sdma_rb_node->refcount++;
227 } else { 227 } else {
228 int ret; 228 int ret;
229
229 sdma_rb_node = kmalloc(sizeof( 230 sdma_rb_node = kmalloc(sizeof(
230 struct qib_user_sdma_rb_node), GFP_KERNEL); 231 struct qib_user_sdma_rb_node), GFP_KERNEL);
231 if (!sdma_rb_node) 232 if (!sdma_rb_node)
@@ -936,6 +937,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
936 937
937 if (tiddma) { 938 if (tiddma) {
938 char *tidsm = (char *)pkt + pktsize; 939 char *tidsm = (char *)pkt + pktsize;
940
939 cfur = copy_from_user(tidsm, 941 cfur = copy_from_user(tidsm,
940 iov[idx].iov_base, tidsmsize); 942 iov[idx].iov_base, tidsmsize);
941 if (cfur) { 943 if (cfur) {
@@ -1142,7 +1144,7 @@ void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
1142 qib_user_sdma_hwqueue_clean(ppd); 1144 qib_user_sdma_hwqueue_clean(ppd);
1143 qib_user_sdma_queue_clean(ppd, pq); 1145 qib_user_sdma_queue_clean(ppd, pq);
1144 mutex_unlock(&pq->lock); 1146 mutex_unlock(&pq->lock);
1145 msleep(10); 1147 msleep(20);
1146 } 1148 }
1147 1149
1148 if (pq->num_pending || pq->num_sending) { 1150 if (pq->num_pending || pq->num_sending) {
@@ -1316,8 +1318,6 @@ retry:
1316 1318
1317 if (nfree && !list_empty(pktlist)) 1319 if (nfree && !list_empty(pktlist))
1318 goto retry; 1320 goto retry;
1319
1320 return;
1321} 1321}
1322 1322
1323/* pq->lock must be held, get packets on the wire... */ 1323/* pq->lock must be held, get packets on the wire... */
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 9bcfbd842980..4a3599890ea5 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1342,6 +1342,7 @@ static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
1342done: 1342done:
1343 if (dd->flags & QIB_USE_SPCL_TRIG) { 1343 if (dd->flags & QIB_USE_SPCL_TRIG) {
1344 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; 1344 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
1345
1345 qib_flush_wc(); 1346 qib_flush_wc();
1346 __raw_writel(0xaebecede, piobuf_orig + spcl_off); 1347 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1347 } 1348 }
@@ -1744,7 +1745,7 @@ static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
1744 * we allow allocations of more than we report for this value. 1745 * we allow allocations of more than we report for this value.
1745 */ 1746 */
1746 1747
1747 pd = kmalloc(sizeof *pd, GFP_KERNEL); 1748 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
1748 if (!pd) { 1749 if (!pd) {
1749 ret = ERR_PTR(-ENOMEM); 1750 ret = ERR_PTR(-ENOMEM);
1750 goto bail; 1751 goto bail;
@@ -1829,7 +1830,7 @@ static struct ib_ah *qib_create_ah(struct ib_pd *pd,
1829 goto bail; 1830 goto bail;
1830 } 1831 }
1831 1832
1832 ah = kmalloc(sizeof *ah, GFP_ATOMIC); 1833 ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
1833 if (!ah) { 1834 if (!ah) {
1834 ret = ERR_PTR(-ENOMEM); 1835 ret = ERR_PTR(-ENOMEM);
1835 goto bail; 1836 goto bail;
@@ -1862,7 +1863,7 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
1862 struct ib_ah *ah = ERR_PTR(-EINVAL); 1863 struct ib_ah *ah = ERR_PTR(-EINVAL);
1863 struct qib_qp *qp0; 1864 struct qib_qp *qp0;
1864 1865
1865 memset(&attr, 0, sizeof attr); 1866 memset(&attr, 0, sizeof(attr));
1866 attr.dlid = dlid; 1867 attr.dlid = dlid;
1867 attr.port_num = ppd_from_ibp(ibp)->port; 1868 attr.port_num = ppd_from_ibp(ibp)->port;
1868 rcu_read_lock(); 1869 rcu_read_lock();
@@ -1977,7 +1978,7 @@ static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
1977 struct qib_ucontext *context; 1978 struct qib_ucontext *context;
1978 struct ib_ucontext *ret; 1979 struct ib_ucontext *ret;
1979 1980
1980 context = kmalloc(sizeof *context, GFP_KERNEL); 1981 context = kmalloc(sizeof(*context), GFP_KERNEL);
1981 if (!context) { 1982 if (!context) {
1982 ret = ERR_PTR(-ENOMEM); 1983 ret = ERR_PTR(-ENOMEM);
1983 goto bail; 1984 goto bail;
@@ -2054,7 +2055,9 @@ int qib_register_ib_device(struct qib_devdata *dd)
2054 2055
2055 dev->qp_table_size = ib_qib_qp_table_size; 2056 dev->qp_table_size = ib_qib_qp_table_size;
2056 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); 2057 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
2057 dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table, 2058 dev->qp_table = kmalloc_array(
2059 dev->qp_table_size,
2060 sizeof(*dev->qp_table),
2058 GFP_KERNEL); 2061 GFP_KERNEL);
2059 if (!dev->qp_table) { 2062 if (!dev->qp_table) {
2060 ret = -ENOMEM; 2063 ret = -ENOMEM;
@@ -2122,7 +2125,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
2122 for (i = 0; i < ppd->sdma_descq_cnt; i++) { 2125 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
2123 struct qib_verbs_txreq *tx; 2126 struct qib_verbs_txreq *tx;
2124 2127
2125 tx = kzalloc(sizeof *tx, GFP_KERNEL); 2128 tx = kzalloc(sizeof(*tx), GFP_KERNEL);
2126 if (!tx) { 2129 if (!tx) {
2127 ret = -ENOMEM; 2130 ret = -ENOMEM;
2128 goto err_tx; 2131 goto err_tx;
diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
index dabb697b1c2a..f8ea069a3eaf 100644
--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
@@ -43,7 +43,7 @@ static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
43{ 43{
44 struct qib_mcast_qp *mqp; 44 struct qib_mcast_qp *mqp;
45 45
46 mqp = kmalloc(sizeof *mqp, GFP_KERNEL); 46 mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
47 if (!mqp) 47 if (!mqp)
48 goto bail; 48 goto bail;
49 49
@@ -75,7 +75,7 @@ static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
75{ 75{
76 struct qib_mcast *mcast; 76 struct qib_mcast *mcast;
77 77
78 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 78 mcast = kmalloc(sizeof(*mcast), GFP_KERNEL);
79 if (!mcast) 79 if (!mcast)
80 goto bail; 80 goto bail;
81 81
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
index 1d7281c5a02e..81b225f2300a 100644
--- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c
+++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
@@ -72,6 +72,7 @@ int qib_enable_wc(struct qib_devdata *dd)
72 if (dd->piobcnt2k && dd->piobcnt4k) { 72 if (dd->piobcnt2k && dd->piobcnt4k) {
73 /* 2 sizes for chip */ 73 /* 2 sizes for chip */
74 unsigned long pio2kbase, pio4kbase; 74 unsigned long pio2kbase, pio4kbase;
75
75 pio2kbase = dd->piobufbase & 0xffffffffUL; 76 pio2kbase = dd->piobufbase & 0xffffffffUL;
76 pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL; 77 pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
77 if (pio2kbase < pio4kbase) { 78 if (pio2kbase < pio4kbase) {
@@ -91,7 +92,7 @@ int qib_enable_wc(struct qib_devdata *dd)
91 } 92 }
92 93
93 for (bits = 0; !(piolen & (1ULL << bits)); bits++) 94 for (bits = 0; !(piolen & (1ULL << bits)); bits++)
94 /* do nothing */ ; 95 ; /* do nothing */
95 96
96 if (piolen != (1ULL << bits)) { 97 if (piolen != (1ULL << bits)) {
97 piolen >>= bits; 98 piolen >>= bits;
@@ -100,8 +101,8 @@ int qib_enable_wc(struct qib_devdata *dd)
100 piolen = 1ULL << (bits + 1); 101 piolen = 1ULL << (bits + 1);
101 } 102 }
102 if (pioaddr & (piolen - 1)) { 103 if (pioaddr & (piolen - 1)) {
103 u64 atmp; 104 u64 atmp = pioaddr & ~(piolen - 1);
104 atmp = pioaddr & ~(piolen - 1); 105
105 if (atmp < addr || (atmp + piolen) > (addr + len)) { 106 if (atmp < addr || (atmp + piolen) > (addr + len)) {
106 qib_dev_err(dd, 107 qib_dev_err(dd,
107 "No way to align address/size (%llx/%llx), no WC mtrr\n", 108 "No way to align address/size (%llx/%llx), no WC mtrr\n",
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 5ce26817e7e1..b47aea1094b2 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -654,7 +654,9 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
654 enum dma_data_direction dma_dir); 654 enum dma_data_direction dma_dir);
655 655
656void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, 656void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
657 struct iser_data_buf *data); 657 struct iser_data_buf *data,
658 enum dma_data_direction dir);
659
658int iser_initialize_task_headers(struct iscsi_task *task, 660int iser_initialize_task_headers(struct iscsi_task *task,
659 struct iser_tx_desc *tx_desc); 661 struct iser_tx_desc *tx_desc);
660int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, 662int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 3821633f1065..20e859a6f1a6 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -320,9 +320,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
320 struct ib_conn *ib_conn = &iser_conn->ib_conn; 320 struct ib_conn *ib_conn = &iser_conn->ib_conn;
321 struct iser_device *device = ib_conn->device; 321 struct iser_device *device = ib_conn->device;
322 322
323 if (!iser_conn->rx_descs)
324 goto free_login_buf;
325
326 if (device->iser_free_rdma_reg_res) 323 if (device->iser_free_rdma_reg_res)
327 device->iser_free_rdma_reg_res(ib_conn); 324 device->iser_free_rdma_reg_res(ib_conn);
328 325
@@ -334,7 +331,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
334 /* make sure we never redo any unmapping */ 331 /* make sure we never redo any unmapping */
335 iser_conn->rx_descs = NULL; 332 iser_conn->rx_descs = NULL;
336 333
337free_login_buf:
338 iser_free_login_buf(iser_conn); 334 iser_free_login_buf(iser_conn);
339} 335}
340 336
@@ -714,19 +710,23 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
714 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); 710 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
715 if (is_rdma_data_aligned) 711 if (is_rdma_data_aligned)
716 iser_dma_unmap_task_data(iser_task, 712 iser_dma_unmap_task_data(iser_task,
717 &iser_task->data[ISER_DIR_IN]); 713 &iser_task->data[ISER_DIR_IN],
714 DMA_FROM_DEVICE);
718 if (prot_count && is_rdma_prot_aligned) 715 if (prot_count && is_rdma_prot_aligned)
719 iser_dma_unmap_task_data(iser_task, 716 iser_dma_unmap_task_data(iser_task,
720 &iser_task->prot[ISER_DIR_IN]); 717 &iser_task->prot[ISER_DIR_IN],
718 DMA_FROM_DEVICE);
721 } 719 }
722 720
723 if (iser_task->dir[ISER_DIR_OUT]) { 721 if (iser_task->dir[ISER_DIR_OUT]) {
724 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); 722 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
725 if (is_rdma_data_aligned) 723 if (is_rdma_data_aligned)
726 iser_dma_unmap_task_data(iser_task, 724 iser_dma_unmap_task_data(iser_task,
727 &iser_task->data[ISER_DIR_OUT]); 725 &iser_task->data[ISER_DIR_OUT],
726 DMA_TO_DEVICE);
728 if (prot_count && is_rdma_prot_aligned) 727 if (prot_count && is_rdma_prot_aligned)
729 iser_dma_unmap_task_data(iser_task, 728 iser_dma_unmap_task_data(iser_task,
730 &iser_task->prot[ISER_DIR_OUT]); 729 &iser_task->prot[ISER_DIR_OUT],
730 DMA_TO_DEVICE);
731 } 731 }
732} 732}
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index abce9339333f..341040bf0984 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -332,12 +332,13 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
332} 332}
333 333
334void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, 334void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
335 struct iser_data_buf *data) 335 struct iser_data_buf *data,
336 enum dma_data_direction dir)
336{ 337{
337 struct ib_device *dev; 338 struct ib_device *dev;
338 339
339 dev = iser_task->iser_conn->ib_conn.device->ib_device; 340 dev = iser_task->iser_conn->ib_conn.device->ib_device;
340 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); 341 ib_dma_unmap_sg(dev, data->buf, data->size, dir);
341} 342}
342 343
343static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, 344static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
@@ -357,7 +358,9 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
357 iser_data_buf_dump(mem, ibdev); 358 iser_data_buf_dump(mem, ibdev);
358 359
359 /* unmap the command data before accessing it */ 360 /* unmap the command data before accessing it */
360 iser_dma_unmap_task_data(iser_task, mem); 361 iser_dma_unmap_task_data(iser_task, mem,
362 (cmd_dir == ISER_DIR_OUT) ?
363 DMA_TO_DEVICE : DMA_FROM_DEVICE);
361 364
362 /* allocate copy buf, if we are writing, copy the */ 365 /* allocate copy buf, if we are writing, copy the */
363 /* unaligned scatterlist, dma map the copy */ 366 /* unaligned scatterlist, dma map the copy */
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 695a2704bd43..4065abe28829 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -600,16 +600,16 @@ void iser_release_work(struct work_struct *work)
600/** 600/**
601 * iser_free_ib_conn_res - release IB related resources 601 * iser_free_ib_conn_res - release IB related resources
602 * @iser_conn: iser connection struct 602 * @iser_conn: iser connection struct
603 * @destroy_device: indicator if we need to try to release 603 * @destroy: indicator if we need to try to release the
604 * the iser device (only iscsi shutdown and DEVICE_REMOVAL 604 * iser device and memory regoins pool (only iscsi
605 * will use this. 605 * shutdown and DEVICE_REMOVAL will use this).
606 * 606 *
607 * This routine is called with the iser state mutex held 607 * This routine is called with the iser state mutex held
608 * so the cm_id removal is out of here. It is Safe to 608 * so the cm_id removal is out of here. It is Safe to
609 * be invoked multiple times. 609 * be invoked multiple times.
610 */ 610 */
611static void iser_free_ib_conn_res(struct iser_conn *iser_conn, 611static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
612 bool destroy_device) 612 bool destroy)
613{ 613{
614 struct ib_conn *ib_conn = &iser_conn->ib_conn; 614 struct ib_conn *ib_conn = &iser_conn->ib_conn;
615 struct iser_device *device = ib_conn->device; 615 struct iser_device *device = ib_conn->device;
@@ -617,17 +617,20 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
617 iser_info("freeing conn %p cma_id %p qp %p\n", 617 iser_info("freeing conn %p cma_id %p qp %p\n",
618 iser_conn, ib_conn->cma_id, ib_conn->qp); 618 iser_conn, ib_conn->cma_id, ib_conn->qp);
619 619
620 iser_free_rx_descriptors(iser_conn);
621
622 if (ib_conn->qp != NULL) { 620 if (ib_conn->qp != NULL) {
623 ib_conn->comp->active_qps--; 621 ib_conn->comp->active_qps--;
624 rdma_destroy_qp(ib_conn->cma_id); 622 rdma_destroy_qp(ib_conn->cma_id);
625 ib_conn->qp = NULL; 623 ib_conn->qp = NULL;
626 } 624 }
627 625
628 if (destroy_device && device != NULL) { 626 if (destroy) {
629 iser_device_try_release(device); 627 if (iser_conn->rx_descs)
630 ib_conn->device = NULL; 628 iser_free_rx_descriptors(iser_conn);
629
630 if (device != NULL) {
631 iser_device_try_release(device);
632 ib_conn->device = NULL;
633 }
631 } 634 }
632} 635}
633 636
@@ -643,9 +646,11 @@ void iser_conn_release(struct iser_conn *iser_conn)
643 mutex_unlock(&ig.connlist_mutex); 646 mutex_unlock(&ig.connlist_mutex);
644 647
645 mutex_lock(&iser_conn->state_mutex); 648 mutex_lock(&iser_conn->state_mutex);
649 /* In case we endup here without ep_disconnect being invoked. */
646 if (iser_conn->state != ISER_CONN_DOWN) { 650 if (iser_conn->state != ISER_CONN_DOWN) {
647 iser_warn("iser conn %p state %d, expected state down.\n", 651 iser_warn("iser conn %p state %d, expected state down.\n",
648 iser_conn, iser_conn->state); 652 iser_conn, iser_conn->state);
653 iscsi_destroy_endpoint(iser_conn->ep);
649 iser_conn->state = ISER_CONN_DOWN; 654 iser_conn->state = ISER_CONN_DOWN;
650 } 655 }
651 /* 656 /*
@@ -840,7 +845,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
840} 845}
841 846
842static void iser_cleanup_handler(struct rdma_cm_id *cma_id, 847static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
843 bool destroy_device) 848 bool destroy)
844{ 849{
845 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; 850 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
846 851
@@ -850,7 +855,7 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
850 * and flush errors. 855 * and flush errors.
851 */ 856 */
852 iser_disconnected_handler(cma_id); 857 iser_disconnected_handler(cma_id);
853 iser_free_ib_conn_res(iser_conn, destroy_device); 858 iser_free_ib_conn_res(iser_conn, destroy);
854 complete(&iser_conn->ib_completion); 859 complete(&iser_conn->ib_completion);
855}; 860};
856 861
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 867cc5084afb..b513e662d8e4 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -90,6 +90,7 @@ enum {
90}; 90};
91 91
92enum { 92enum {
93 IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
93 IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, 94 IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
94 IB_USER_VERBS_EX_CMD_DESTROY_FLOW, 95 IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
95}; 96};
@@ -201,6 +202,28 @@ struct ib_uverbs_query_device_resp {
201 __u8 reserved[4]; 202 __u8 reserved[4];
202}; 203};
203 204
205struct ib_uverbs_ex_query_device {
206 __u32 comp_mask;
207 __u32 reserved;
208};
209
210struct ib_uverbs_odp_caps {
211 __u64 general_caps;
212 struct {
213 __u32 rc_odp_caps;
214 __u32 uc_odp_caps;
215 __u32 ud_odp_caps;
216 } per_transport_caps;
217 __u32 reserved;
218};
219
220struct ib_uverbs_ex_query_device_resp {
221 struct ib_uverbs_query_device_resp base;
222 __u32 comp_mask;
223 __u32 response_length;
224 struct ib_uverbs_odp_caps odp_caps;
225};
226
204struct ib_uverbs_query_port { 227struct ib_uverbs_query_port {
205 __u64 response; 228 __u64 response;
206 __u8 port_num; 229 __u8 port_num;