aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHarish Chegondi <harish.chegondi@intel.com>2016-01-22 15:56:02 -0500
committerDoug Ledford <dledford@redhat.com>2016-03-10 20:37:21 -0500
commitf24a6d4887668cc2925b1e908c72b96a2604a814 (patch)
treeae63779f41d5f2a5f7d6c8f64565f31e0365c30b
parentd2b8d4da1ca5052b72e043d2ce68157abf3f2d24 (diff)
IB/qib: Remove ibport and use rdmavt version
Remove several ibport members from qib and use the rdmavt version. rc_acks, rc_qacks, and rc_delayed_comp are defined as per CPU variables in rdmavt. Add support for these rdmavt per CPU variables which were not per cpu variables in qib ibport structure. Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Signed-off-by: Harish Chegondi <harish.chegondi@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c240
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c22
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c30
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c65
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c52
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h46
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs_mcast.c28
15 files changed, 281 insertions, 258 deletions
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index eafdee958e87..e8b239c76890 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -379,7 +379,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
379 /* Check for valid receive state. */ 379 /* Check for valid receive state. */
380 if (!(ib_qib_state_ops[qp->state] & 380 if (!(ib_qib_state_ops[qp->state] &
381 QIB_PROCESS_RECV_OK)) { 381 QIB_PROCESS_RECV_OK)) {
382 ibp->n_pkt_drops++; 382 ibp->rvp.n_pkt_drops++;
383 goto unlock; 383 goto unlock;
384 } 384 }
385 385
@@ -399,7 +399,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
399 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) { 399 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
400 diff = qib_cmp24(psn, qp->r_psn); 400 diff = qib_cmp24(psn, qp->r_psn);
401 if (!qp->r_nak_state && diff >= 0) { 401 if (!qp->r_nak_state && diff >= 0) {
402 ibp->n_rc_seqnak++; 402 ibp->rvp.n_rc_seqnak++;
403 qp->r_nak_state = 403 qp->r_nak_state =
404 IB_NAK_PSN_ERROR; 404 IB_NAK_PSN_ERROR;
405 /* Use the expected PSN. */ 405 /* Use the expected PSN. */
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 4b927809d1a1..a3733f25280f 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2956,13 +2956,13 @@ static void pma_6120_timer(unsigned long data)
2956 struct qib_ibport *ibp = &ppd->ibport_data; 2956 struct qib_ibport *ibp = &ppd->ibport_data;
2957 unsigned long flags; 2957 unsigned long flags;
2958 2958
2959 spin_lock_irqsave(&ibp->lock, flags); 2959 spin_lock_irqsave(&ibp->rvp.lock, flags);
2960 if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) { 2960 if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) {
2961 cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; 2961 cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
2962 qib_snapshot_counters(ppd, &cs->sword, &cs->rword, 2962 qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
2963 &cs->spkts, &cs->rpkts, &cs->xmit_wait); 2963 &cs->spkts, &cs->rpkts, &cs->xmit_wait);
2964 mod_timer(&cs->pma_timer, 2964 mod_timer(&cs->pma_timer,
2965 jiffies + usecs_to_jiffies(ibp->pma_sample_interval)); 2965 jiffies + usecs_to_jiffies(ibp->rvp.pma_sample_interval));
2966 } else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) { 2966 } else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
2967 u64 ta, tb, tc, td, te; 2967 u64 ta, tb, tc, td, te;
2968 2968
@@ -2975,11 +2975,11 @@ static void pma_6120_timer(unsigned long data)
2975 cs->rpkts = td - cs->rpkts; 2975 cs->rpkts = td - cs->rpkts;
2976 cs->xmit_wait = te - cs->xmit_wait; 2976 cs->xmit_wait = te - cs->xmit_wait;
2977 } 2977 }
2978 spin_unlock_irqrestore(&ibp->lock, flags); 2978 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
2979} 2979}
2980 2980
2981/* 2981/*
2982 * Note that the caller has the ibp->lock held. 2982 * Note that the caller has the ibp->rvp.lock held.
2983 */ 2983 */
2984static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv, 2984static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv,
2985 u32 start) 2985 u32 start)
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 1fbe3086db5b..ca28c19d9618 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -5497,7 +5497,7 @@ static void try_7322_ipg(struct qib_pportdata *ppd)
5497 unsigned delay; 5497 unsigned delay;
5498 int ret; 5498 int ret;
5499 5499
5500 agent = ibp->send_agent; 5500 agent = ibp->rvp.send_agent;
5501 if (!agent) 5501 if (!agent)
5502 goto retry; 5502 goto retry;
5503 5503
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 47190f1fff95..5087a1f808c0 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -245,6 +245,13 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
245 alloc_percpu(struct qib_pma_counters); 245 alloc_percpu(struct qib_pma_counters);
246 if (!ppd->ibport_data.pmastats) 246 if (!ppd->ibport_data.pmastats)
247 return -ENOMEM; 247 return -ENOMEM;
248 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
249 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
250 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
251 if (!(ppd->ibport_data.rvp.rc_acks) ||
252 !(ppd->ibport_data.rvp.rc_qacks) ||
253 !(ppd->ibport_data.rvp.rc_delayed_comp))
254 return -ENOMEM;
248 255
249 if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) 256 if (qib_cc_table_size < IB_CCT_MIN_ENTRIES)
250 goto bail; 257 goto bail;
@@ -632,6 +639,9 @@ wq_error:
632static void qib_free_pportdata(struct qib_pportdata *ppd) 639static void qib_free_pportdata(struct qib_pportdata *ppd)
633{ 640{
634 free_percpu(ppd->ibport_data.pmastats); 641 free_percpu(ppd->ibport_data.pmastats);
642 free_percpu(ppd->ibport_data.rvp.rc_acks);
643 free_percpu(ppd->ibport_data.rvp.rc_qacks);
644 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
635 ppd->ibport_data.pmastats = NULL; 645 ppd->ibport_data.pmastats = NULL;
636} 646}
637 647
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 43f8c498fef0..3e8dde2119ed 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -70,7 +70,7 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
70 unsigned long flags; 70 unsigned long flags;
71 unsigned long timeout; 71 unsigned long timeout;
72 72
73 agent = ibp->send_agent; 73 agent = ibp->rvp.send_agent;
74 if (!agent) 74 if (!agent)
75 return; 75 return;
76 76
@@ -79,7 +79,8 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
79 return; 79 return;
80 80
81 /* o14-2 */ 81 /* o14-2 */
82 if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout)) 82 if (ibp->rvp.trap_timeout &&
83 time_before(jiffies, ibp->rvp.trap_timeout))
83 return; 84 return;
84 85
85 send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, 86 send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
@@ -93,18 +94,18 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
93 smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 94 smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
94 smp->class_version = 1; 95 smp->class_version = 1;
95 smp->method = IB_MGMT_METHOD_TRAP; 96 smp->method = IB_MGMT_METHOD_TRAP;
96 ibp->tid++; 97 ibp->rvp.tid++;
97 smp->tid = cpu_to_be64(ibp->tid); 98 smp->tid = cpu_to_be64(ibp->rvp.tid);
98 smp->attr_id = IB_SMP_ATTR_NOTICE; 99 smp->attr_id = IB_SMP_ATTR_NOTICE;
99 /* o14-1: smp->mkey = 0; */ 100 /* o14-1: smp->mkey = 0; */
100 memcpy(smp->data, data, len); 101 memcpy(smp->data, data, len);
101 102
102 spin_lock_irqsave(&ibp->lock, flags); 103 spin_lock_irqsave(&ibp->rvp.lock, flags);
103 if (!ibp->sm_ah) { 104 if (!ibp->sm_ah) {
104 if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { 105 if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
105 struct ib_ah *ah; 106 struct ib_ah *ah;
106 107
107 ah = qib_create_qp0_ah(ibp, ibp->sm_lid); 108 ah = qib_create_qp0_ah(ibp, ibp->rvp.sm_lid);
108 if (IS_ERR(ah)) 109 if (IS_ERR(ah))
109 ret = PTR_ERR(ah); 110 ret = PTR_ERR(ah);
110 else { 111 else {
@@ -118,17 +119,17 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
118 send_buf->ah = &ibp->sm_ah->ibah; 119 send_buf->ah = &ibp->sm_ah->ibah;
119 ret = 0; 120 ret = 0;
120 } 121 }
121 spin_unlock_irqrestore(&ibp->lock, flags); 122 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
122 123
123 if (!ret) 124 if (!ret)
124 ret = ib_post_send_mad(send_buf, NULL); 125 ret = ib_post_send_mad(send_buf, NULL);
125 if (!ret) { 126 if (!ret) {
126 /* 4.096 usec. */ 127 /* 4.096 usec. */
127 timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000; 128 timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000;
128 ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout); 129 ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout);
129 } else { 130 } else {
130 ib_free_send_mad(send_buf); 131 ib_free_send_mad(send_buf);
131 ibp->trap_timeout = 0; 132 ibp->rvp.trap_timeout = 0;
132 } 133 }
133} 134}
134 135
@@ -141,10 +142,10 @@ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
141 struct ib_mad_notice_attr data; 142 struct ib_mad_notice_attr data;
142 143
143 if (trap_num == IB_NOTICE_TRAP_BAD_PKEY) 144 if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
144 ibp->pkey_violations++; 145 ibp->rvp.pkey_violations++;
145 else 146 else
146 ibp->qkey_violations++; 147 ibp->rvp.qkey_violations++;
147 ibp->n_pkt_drops++; 148 ibp->rvp.n_pkt_drops++;
148 149
149 /* Send violation trap */ 150 /* Send violation trap */
150 data.generic_type = IB_NOTICE_TYPE_SECURITY; 151 data.generic_type = IB_NOTICE_TYPE_SECURITY;
@@ -217,8 +218,8 @@ void qib_cap_mask_chg(struct qib_ibport *ibp)
217 data.toggle_count = 0; 218 data.toggle_count = 0;
218 memset(&data.details, 0, sizeof(data.details)); 219 memset(&data.details, 0, sizeof(data.details));
219 data.details.ntc_144.lid = data.issuer_lid; 220 data.details.ntc_144.lid = data.issuer_lid;
220 data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags); 221 data.details.ntc_144.new_cap_mask =
221 222 cpu_to_be32(ibp->rvp.port_cap_flags);
222 qib_send_trap(ibp, &data, sizeof(data)); 223 qib_send_trap(ibp, &data, sizeof(data));
223} 224}
224 225
@@ -409,37 +410,38 @@ static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
409 int ret = 0; 410 int ret = 0;
410 411
411 /* Is the mkey in the process of expiring? */ 412 /* Is the mkey in the process of expiring? */
412 if (ibp->mkey_lease_timeout && 413 if (ibp->rvp.mkey_lease_timeout &&
413 time_after_eq(jiffies, ibp->mkey_lease_timeout)) { 414 time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
414 /* Clear timeout and mkey protection field. */ 415 /* Clear timeout and mkey protection field. */
415 ibp->mkey_lease_timeout = 0; 416 ibp->rvp.mkey_lease_timeout = 0;
416 ibp->mkeyprot = 0; 417 ibp->rvp.mkeyprot = 0;
417 } 418 }
418 419
419 if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->mkey == 0 || 420 if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 ||
420 ibp->mkey == smp->mkey) 421 ibp->rvp.mkey == smp->mkey)
421 valid_mkey = 1; 422 valid_mkey = 1;
422 423
423 /* Unset lease timeout on any valid Get/Set/TrapRepress */ 424 /* Unset lease timeout on any valid Get/Set/TrapRepress */
424 if (valid_mkey && ibp->mkey_lease_timeout && 425 if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
425 (smp->method == IB_MGMT_METHOD_GET || 426 (smp->method == IB_MGMT_METHOD_GET ||
426 smp->method == IB_MGMT_METHOD_SET || 427 smp->method == IB_MGMT_METHOD_SET ||
427 smp->method == IB_MGMT_METHOD_TRAP_REPRESS)) 428 smp->method == IB_MGMT_METHOD_TRAP_REPRESS))
428 ibp->mkey_lease_timeout = 0; 429 ibp->rvp.mkey_lease_timeout = 0;
429 430
430 if (!valid_mkey) { 431 if (!valid_mkey) {
431 switch (smp->method) { 432 switch (smp->method) {
432 case IB_MGMT_METHOD_GET: 433 case IB_MGMT_METHOD_GET:
433 /* Bad mkey not a violation below level 2 */ 434 /* Bad mkey not a violation below level 2 */
434 if (ibp->mkeyprot < 2) 435 if (ibp->rvp.mkeyprot < 2)
435 break; 436 break;
436 case IB_MGMT_METHOD_SET: 437 case IB_MGMT_METHOD_SET:
437 case IB_MGMT_METHOD_TRAP_REPRESS: 438 case IB_MGMT_METHOD_TRAP_REPRESS:
438 if (ibp->mkey_violations != 0xFFFF) 439 if (ibp->rvp.mkey_violations != 0xFFFF)
439 ++ibp->mkey_violations; 440 ++ibp->rvp.mkey_violations;
440 if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period) 441 if (!ibp->rvp.mkey_lease_timeout &&
441 ibp->mkey_lease_timeout = jiffies + 442 ibp->rvp.mkey_lease_period)
442 ibp->mkey_lease_period * HZ; 443 ibp->rvp.mkey_lease_timeout = jiffies +
444 ibp->rvp.mkey_lease_period * HZ;
443 /* Generate a trap notice. */ 445 /* Generate a trap notice. */
444 qib_bad_mkey(ibp, smp); 446 qib_bad_mkey(ibp, smp);
445 ret = 1; 447 ret = 1;
@@ -489,15 +491,15 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
489 491
490 /* Only return the mkey if the protection field allows it. */ 492 /* Only return the mkey if the protection field allows it. */
491 if (!(smp->method == IB_MGMT_METHOD_GET && 493 if (!(smp->method == IB_MGMT_METHOD_GET &&
492 ibp->mkey != smp->mkey && 494 ibp->rvp.mkey != smp->mkey &&
493 ibp->mkeyprot == 1)) 495 ibp->rvp.mkeyprot == 1))
494 pip->mkey = ibp->mkey; 496 pip->mkey = ibp->rvp.mkey;
495 pip->gid_prefix = ibp->gid_prefix; 497 pip->gid_prefix = ibp->rvp.gid_prefix;
496 pip->lid = cpu_to_be16(ppd->lid); 498 pip->lid = cpu_to_be16(ppd->lid);
497 pip->sm_lid = cpu_to_be16(ibp->sm_lid); 499 pip->sm_lid = cpu_to_be16(ibp->rvp.sm_lid);
498 pip->cap_mask = cpu_to_be32(ibp->port_cap_flags); 500 pip->cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
499 /* pip->diag_code; */ 501 /* pip->diag_code; */
500 pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period); 502 pip->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
501 pip->local_port_num = port; 503 pip->local_port_num = port;
502 pip->link_width_enabled = ppd->link_width_enabled; 504 pip->link_width_enabled = ppd->link_width_enabled;
503 pip->link_width_supported = ppd->link_width_supported; 505 pip->link_width_supported = ppd->link_width_supported;
@@ -508,7 +510,7 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
508 pip->portphysstate_linkdown = 510 pip->portphysstate_linkdown =
509 (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) | 511 (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
510 (get_linkdowndefaultstate(ppd) ? 1 : 2); 512 (get_linkdowndefaultstate(ppd) ? 1 : 2);
511 pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc; 513 pip->mkeyprot_resv_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
512 pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) | 514 pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
513 ppd->link_speed_enabled; 515 ppd->link_speed_enabled;
514 switch (ppd->ibmtu) { 516 switch (ppd->ibmtu) {
@@ -529,9 +531,9 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
529 mtu = IB_MTU_256; 531 mtu = IB_MTU_256;
530 break; 532 break;
531 } 533 }
532 pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl; 534 pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->rvp.sm_sl;
533 pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */ 535 pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */
534 pip->vl_high_limit = ibp->vl_high_limit; 536 pip->vl_high_limit = ibp->rvp.vl_high_limit;
535 pip->vl_arb_high_cap = 537 pip->vl_arb_high_cap =
536 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP); 538 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
537 pip->vl_arb_low_cap = 539 pip->vl_arb_low_cap =
@@ -542,20 +544,20 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
542 /* pip->vlstallcnt_hoqlife; */ 544 /* pip->vlstallcnt_hoqlife; */
543 pip->operationalvl_pei_peo_fpi_fpo = 545 pip->operationalvl_pei_peo_fpi_fpo =
544 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4; 546 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
545 pip->mkey_violations = cpu_to_be16(ibp->mkey_violations); 547 pip->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
546 /* P_KeyViolations are counted by hardware. */ 548 /* P_KeyViolations are counted by hardware. */
547 pip->pkey_violations = cpu_to_be16(ibp->pkey_violations); 549 pip->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
548 pip->qkey_violations = cpu_to_be16(ibp->qkey_violations); 550 pip->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
549 /* Only the hardware GUID is supported for now */ 551 /* Only the hardware GUID is supported for now */
550 pip->guid_cap = QIB_GUIDS_PER_PORT; 552 pip->guid_cap = QIB_GUIDS_PER_PORT;
551 pip->clientrereg_resv_subnetto = ibp->subnet_timeout; 553 pip->clientrereg_resv_subnetto = ibp->rvp.subnet_timeout;
552 /* 32.768 usec. response time (guessing) */ 554 /* 32.768 usec. response time (guessing) */
553 pip->resv_resptimevalue = 3; 555 pip->resv_resptimevalue = 3;
554 pip->localphyerrors_overrunerrors = 556 pip->localphyerrors_overrunerrors =
555 (get_phyerrthreshold(ppd) << 4) | 557 (get_phyerrthreshold(ppd) << 4) |
556 get_overrunthreshold(ppd); 558 get_overrunthreshold(ppd);
557 /* pip->max_credit_hint; */ 559 /* pip->max_credit_hint; */
558 if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) { 560 if (ibp->rvp.port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
559 u32 v; 561 u32 v;
560 562
561 v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY); 563 v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
@@ -685,9 +687,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
685 event.device = ibdev; 687 event.device = ibdev;
686 event.element.port_num = port; 688 event.element.port_num = port;
687 689
688 ibp->mkey = pip->mkey; 690 ibp->rvp.mkey = pip->mkey;
689 ibp->gid_prefix = pip->gid_prefix; 691 ibp->rvp.gid_prefix = pip->gid_prefix;
690 ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); 692 ibp->rvp.mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
691 693
692 lid = be16_to_cpu(pip->lid); 694 lid = be16_to_cpu(pip->lid);
693 /* Must be a valid unicast LID address. */ 695 /* Must be a valid unicast LID address. */
@@ -708,19 +710,19 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
708 /* Must be a valid unicast LID address. */ 710 /* Must be a valid unicast LID address. */
709 if (smlid == 0 || smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) 711 if (smlid == 0 || smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
710 smp->status |= IB_SMP_INVALID_FIELD; 712 smp->status |= IB_SMP_INVALID_FIELD;
711 else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { 713 else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
712 spin_lock_irqsave(&ibp->lock, flags); 714 spin_lock_irqsave(&ibp->rvp.lock, flags);
713 if (ibp->sm_ah) { 715 if (ibp->sm_ah) {
714 if (smlid != ibp->sm_lid) 716 if (smlid != ibp->rvp.sm_lid)
715 ibp->sm_ah->attr.dlid = smlid; 717 ibp->sm_ah->attr.dlid = smlid;
716 if (msl != ibp->sm_sl) 718 if (msl != ibp->rvp.sm_sl)
717 ibp->sm_ah->attr.sl = msl; 719 ibp->sm_ah->attr.sl = msl;
718 } 720 }
719 spin_unlock_irqrestore(&ibp->lock, flags); 721 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
720 if (smlid != ibp->sm_lid) 722 if (smlid != ibp->rvp.sm_lid)
721 ibp->sm_lid = smlid; 723 ibp->rvp.sm_lid = smlid;
722 if (msl != ibp->sm_sl) 724 if (msl != ibp->rvp.sm_sl)
723 ibp->sm_sl = msl; 725 ibp->rvp.sm_sl = msl;
724 event.event = IB_EVENT_SM_CHANGE; 726 event.event = IB_EVENT_SM_CHANGE;
725 ib_dispatch_event(&event); 727 ib_dispatch_event(&event);
726 } 728 }
@@ -768,10 +770,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
768 smp->status |= IB_SMP_INVALID_FIELD; 770 smp->status |= IB_SMP_INVALID_FIELD;
769 } 771 }
770 772
771 ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; 773 ibp->rvp.mkeyprot = pip->mkeyprot_resv_lmc >> 6;
772 ibp->vl_high_limit = pip->vl_high_limit; 774 ibp->rvp.vl_high_limit = pip->vl_high_limit;
773 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT, 775 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
774 ibp->vl_high_limit); 776 ibp->rvp.vl_high_limit);
775 777
776 mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); 778 mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
777 if (mtu == -1) 779 if (mtu == -1)
@@ -789,13 +791,13 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
789 } 791 }
790 792
791 if (pip->mkey_violations == 0) 793 if (pip->mkey_violations == 0)
792 ibp->mkey_violations = 0; 794 ibp->rvp.mkey_violations = 0;
793 795
794 if (pip->pkey_violations == 0) 796 if (pip->pkey_violations == 0)
795 ibp->pkey_violations = 0; 797 ibp->rvp.pkey_violations = 0;
796 798
797 if (pip->qkey_violations == 0) 799 if (pip->qkey_violations == 0)
798 ibp->qkey_violations = 0; 800 ibp->rvp.qkey_violations = 0;
799 801
800 ore = pip->localphyerrors_overrunerrors; 802 ore = pip->localphyerrors_overrunerrors;
801 if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) 803 if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
@@ -804,7 +806,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
804 if (set_overrunthreshold(ppd, (ore & 0xF))) 806 if (set_overrunthreshold(ppd, (ore & 0xF)))
805 smp->status |= IB_SMP_INVALID_FIELD; 807 smp->status |= IB_SMP_INVALID_FIELD;
806 808
807 ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; 809 ibp->rvp.subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
808 810
809 /* 811 /*
810 * Do the port state change now that the other link parameters 812 * Do the port state change now that the other link parameters
@@ -1062,7 +1064,7 @@ static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1062 1064
1063 memset(smp->data, 0, sizeof(smp->data)); 1065 memset(smp->data, 0, sizeof(smp->data));
1064 1066
1065 if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) 1067 if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP))
1066 smp->status |= IB_SMP_UNSUP_METHOD; 1068 smp->status |= IB_SMP_UNSUP_METHOD;
1067 else 1069 else
1068 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2) 1070 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
@@ -1078,7 +1080,7 @@ static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1078 u8 *p = (u8 *) smp->data; 1080 u8 *p = (u8 *) smp->data;
1079 unsigned i; 1081 unsigned i;
1080 1082
1081 if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) { 1083 if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP)) {
1082 smp->status |= IB_SMP_UNSUP_METHOD; 1084 smp->status |= IB_SMP_UNSUP_METHOD;
1083 return reply(smp); 1085 return reply(smp);
1084 } 1086 }
@@ -1195,20 +1197,20 @@ static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
1195 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 1197 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1196 goto bail; 1198 goto bail;
1197 } 1199 }
1198 spin_lock_irqsave(&ibp->lock, flags); 1200 spin_lock_irqsave(&ibp->rvp.lock, flags);
1199 p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS); 1201 p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
1200 p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); 1202 p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1201 p->counter_width = 4; /* 32 bit counters */ 1203 p->counter_width = 4; /* 32 bit counters */
1202 p->counter_mask0_9 = COUNTER_MASK0_9; 1204 p->counter_mask0_9 = COUNTER_MASK0_9;
1203 p->sample_start = cpu_to_be32(ibp->pma_sample_start); 1205 p->sample_start = cpu_to_be32(ibp->rvp.pma_sample_start);
1204 p->sample_interval = cpu_to_be32(ibp->pma_sample_interval); 1206 p->sample_interval = cpu_to_be32(ibp->rvp.pma_sample_interval);
1205 p->tag = cpu_to_be16(ibp->pma_tag); 1207 p->tag = cpu_to_be16(ibp->rvp.pma_tag);
1206 p->counter_select[0] = ibp->pma_counter_select[0]; 1208 p->counter_select[0] = ibp->rvp.pma_counter_select[0];
1207 p->counter_select[1] = ibp->pma_counter_select[1]; 1209 p->counter_select[1] = ibp->rvp.pma_counter_select[1];
1208 p->counter_select[2] = ibp->pma_counter_select[2]; 1210 p->counter_select[2] = ibp->rvp.pma_counter_select[2];
1209 p->counter_select[3] = ibp->pma_counter_select[3]; 1211 p->counter_select[3] = ibp->rvp.pma_counter_select[3];
1210 p->counter_select[4] = ibp->pma_counter_select[4]; 1212 p->counter_select[4] = ibp->rvp.pma_counter_select[4];
1211 spin_unlock_irqrestore(&ibp->lock, flags); 1213 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1212 1214
1213bail: 1215bail:
1214 return reply((struct ib_smp *) pmp); 1216 return reply((struct ib_smp *) pmp);
@@ -1233,7 +1235,7 @@ static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
1233 goto bail; 1235 goto bail;
1234 } 1236 }
1235 1237
1236 spin_lock_irqsave(&ibp->lock, flags); 1238 spin_lock_irqsave(&ibp->rvp.lock, flags);
1237 1239
1238 /* Port Sampling code owns the PS* HW counters */ 1240 /* Port Sampling code owns the PS* HW counters */
1239 xmit_flags = ppd->cong_stats.flags; 1241 xmit_flags = ppd->cong_stats.flags;
@@ -1242,18 +1244,18 @@ static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
1242 if (status == IB_PMA_SAMPLE_STATUS_DONE || 1244 if (status == IB_PMA_SAMPLE_STATUS_DONE ||
1243 (status == IB_PMA_SAMPLE_STATUS_RUNNING && 1245 (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
1244 xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) { 1246 xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
1245 ibp->pma_sample_start = be32_to_cpu(p->sample_start); 1247 ibp->rvp.pma_sample_start = be32_to_cpu(p->sample_start);
1246 ibp->pma_sample_interval = be32_to_cpu(p->sample_interval); 1248 ibp->rvp.pma_sample_interval = be32_to_cpu(p->sample_interval);
1247 ibp->pma_tag = be16_to_cpu(p->tag); 1249 ibp->rvp.pma_tag = be16_to_cpu(p->tag);
1248 ibp->pma_counter_select[0] = p->counter_select[0]; 1250 ibp->rvp.pma_counter_select[0] = p->counter_select[0];
1249 ibp->pma_counter_select[1] = p->counter_select[1]; 1251 ibp->rvp.pma_counter_select[1] = p->counter_select[1];
1250 ibp->pma_counter_select[2] = p->counter_select[2]; 1252 ibp->rvp.pma_counter_select[2] = p->counter_select[2];
1251 ibp->pma_counter_select[3] = p->counter_select[3]; 1253 ibp->rvp.pma_counter_select[3] = p->counter_select[3];
1252 ibp->pma_counter_select[4] = p->counter_select[4]; 1254 ibp->rvp.pma_counter_select[4] = p->counter_select[4];
1253 dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval, 1255 dd->f_set_cntr_sample(ppd, ibp->rvp.pma_sample_interval,
1254 ibp->pma_sample_start); 1256 ibp->rvp.pma_sample_start);
1255 } 1257 }
1256 spin_unlock_irqrestore(&ibp->lock, flags); 1258 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1257 1259
1258 ret = pma_get_portsamplescontrol(pmp, ibdev, port); 1260 ret = pma_get_portsamplescontrol(pmp, ibdev, port);
1259 1261
@@ -1357,8 +1359,8 @@ static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
1357 int i; 1359 int i;
1358 1360
1359 memset(pmp->data, 0, sizeof(pmp->data)); 1361 memset(pmp->data, 0, sizeof(pmp->data));
1360 spin_lock_irqsave(&ibp->lock, flags); 1362 spin_lock_irqsave(&ibp->rvp.lock, flags);
1361 p->tag = cpu_to_be16(ibp->pma_tag); 1363 p->tag = cpu_to_be16(ibp->rvp.pma_tag);
1362 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) 1364 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1363 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; 1365 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1364 else { 1366 else {
@@ -1373,11 +1375,11 @@ static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
1373 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; 1375 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1374 } 1376 }
1375 } 1377 }
1376 for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) 1378 for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
1377 p->counter[i] = cpu_to_be32( 1379 p->counter[i] = cpu_to_be32(
1378 get_cache_hw_sample_counters( 1380 get_cache_hw_sample_counters(
1379 ppd, ibp->pma_counter_select[i])); 1381 ppd, ibp->rvp.pma_counter_select[i]));
1380 spin_unlock_irqrestore(&ibp->lock, flags); 1382 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1381 1383
1382 return reply((struct ib_smp *) pmp); 1384 return reply((struct ib_smp *) pmp);
1383} 1385}
@@ -1397,8 +1399,8 @@ static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
1397 1399
1398 /* Port Sampling code owns the PS* HW counters */ 1400 /* Port Sampling code owns the PS* HW counters */
1399 memset(pmp->data, 0, sizeof(pmp->data)); 1401 memset(pmp->data, 0, sizeof(pmp->data));
1400 spin_lock_irqsave(&ibp->lock, flags); 1402 spin_lock_irqsave(&ibp->rvp.lock, flags);
1401 p->tag = cpu_to_be16(ibp->pma_tag); 1403 p->tag = cpu_to_be16(ibp->rvp.pma_tag);
1402 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) 1404 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1403 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; 1405 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1404 else { 1406 else {
@@ -1415,11 +1417,11 @@ static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
1415 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; 1417 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1416 } 1418 }
1417 } 1419 }
1418 for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) 1420 for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
1419 p->counter[i] = cpu_to_be64( 1421 p->counter[i] = cpu_to_be64(
1420 get_cache_hw_sample_counters( 1422 get_cache_hw_sample_counters(
1421 ppd, ibp->pma_counter_select[i])); 1423 ppd, ibp->rvp.pma_counter_select[i]));
1422 spin_unlock_irqrestore(&ibp->lock, flags); 1424 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1423 1425
1424 return reply((struct ib_smp *) pmp); 1426 return reply((struct ib_smp *) pmp);
1425} 1427}
@@ -1453,7 +1455,7 @@ static int pma_get_portcounters(struct ib_pma_mad *pmp,
1453 cntrs.excessive_buffer_overrun_errors -= 1455 cntrs.excessive_buffer_overrun_errors -=
1454 ibp->z_excessive_buffer_overrun_errors; 1456 ibp->z_excessive_buffer_overrun_errors;
1455 cntrs.vl15_dropped -= ibp->z_vl15_dropped; 1457 cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1456 cntrs.vl15_dropped += ibp->n_vl15_dropped; 1458 cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
1457 1459
1458 memset(pmp->data, 0, sizeof(pmp->data)); 1460 memset(pmp->data, 0, sizeof(pmp->data));
1459 1461
@@ -1546,9 +1548,9 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
1546 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 1548 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1547 1549
1548 qib_get_counters(ppd, &cntrs); 1550 qib_get_counters(ppd, &cntrs);
1549 spin_lock_irqsave(&ppd->ibport_data.lock, flags); 1551 spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
1550 xmit_wait_counter = xmit_wait_get_value_delta(ppd); 1552 xmit_wait_counter = xmit_wait_get_value_delta(ppd);
1551 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); 1553 spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
1552 1554
1553 /* Adjust counters for any resets done. */ 1555 /* Adjust counters for any resets done. */
1554 cntrs.symbol_error_counter -= ibp->z_symbol_error_counter; 1556 cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
@@ -1564,7 +1566,7 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
1564 cntrs.excessive_buffer_overrun_errors -= 1566 cntrs.excessive_buffer_overrun_errors -=
1565 ibp->z_excessive_buffer_overrun_errors; 1567 ibp->z_excessive_buffer_overrun_errors;
1566 cntrs.vl15_dropped -= ibp->z_vl15_dropped; 1568 cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1567 cntrs.vl15_dropped += ibp->n_vl15_dropped; 1569 cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
1568 cntrs.port_xmit_data -= ibp->z_port_xmit_data; 1570 cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1569 cntrs.port_rcv_data -= ibp->z_port_rcv_data; 1571 cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1570 cntrs.port_xmit_packets -= ibp->z_port_xmit_packets; 1572 cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
@@ -1743,7 +1745,7 @@ static int pma_set_portcounters(struct ib_pma_mad *pmp,
1743 cntrs.excessive_buffer_overrun_errors; 1745 cntrs.excessive_buffer_overrun_errors;
1744 1746
1745 if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) { 1747 if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
1746 ibp->n_vl15_dropped = 0; 1748 ibp->rvp.n_vl15_dropped = 0;
1747 ibp->z_vl15_dropped = cntrs.vl15_dropped; 1749 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1748 } 1750 }
1749 1751
@@ -1778,11 +1780,11 @@ static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
1778 ret = pma_get_portcounters_cong(pmp, ibdev, port); 1780 ret = pma_get_portcounters_cong(pmp, ibdev, port);
1779 1781
1780 if (counter_select & IB_PMA_SEL_CONG_XMIT) { 1782 if (counter_select & IB_PMA_SEL_CONG_XMIT) {
1781 spin_lock_irqsave(&ppd->ibport_data.lock, flags); 1783 spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
1782 ppd->cong_stats.counter = 0; 1784 ppd->cong_stats.counter = 0;
1783 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 1785 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
1784 0x0); 1786 0x0);
1785 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); 1787 spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
1786 } 1788 }
1787 if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) { 1789 if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
1788 ibp->z_port_xmit_data = cntrs.port_xmit_data; 1790 ibp->z_port_xmit_data = cntrs.port_xmit_data;
@@ -1806,7 +1808,7 @@ static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
1806 cntrs.local_link_integrity_errors; 1808 cntrs.local_link_integrity_errors;
1807 ibp->z_excessive_buffer_overrun_errors = 1809 ibp->z_excessive_buffer_overrun_errors =
1808 cntrs.excessive_buffer_overrun_errors; 1810 cntrs.excessive_buffer_overrun_errors;
1809 ibp->n_vl15_dropped = 0; 1811 ibp->rvp.n_vl15_dropped = 0;
1810 ibp->z_vl15_dropped = cntrs.vl15_dropped; 1812 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1811 } 1813 }
1812 1814
@@ -1916,12 +1918,12 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
1916 ret = subn_get_vl_arb(smp, ibdev, port); 1918 ret = subn_get_vl_arb(smp, ibdev, port);
1917 goto bail; 1919 goto bail;
1918 case IB_SMP_ATTR_SM_INFO: 1920 case IB_SMP_ATTR_SM_INFO:
1919 if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { 1921 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
1920 ret = IB_MAD_RESULT_SUCCESS | 1922 ret = IB_MAD_RESULT_SUCCESS |
1921 IB_MAD_RESULT_CONSUMED; 1923 IB_MAD_RESULT_CONSUMED;
1922 goto bail; 1924 goto bail;
1923 } 1925 }
1924 if (ibp->port_cap_flags & IB_PORT_SM) { 1926 if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
1925 ret = IB_MAD_RESULT_SUCCESS; 1927 ret = IB_MAD_RESULT_SUCCESS;
1926 goto bail; 1928 goto bail;
1927 } 1929 }
@@ -1950,12 +1952,12 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
1950 ret = subn_set_vl_arb(smp, ibdev, port); 1952 ret = subn_set_vl_arb(smp, ibdev, port);
1951 goto bail; 1953 goto bail;
1952 case IB_SMP_ATTR_SM_INFO: 1954 case IB_SMP_ATTR_SM_INFO:
1953 if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { 1955 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
1954 ret = IB_MAD_RESULT_SUCCESS | 1956 ret = IB_MAD_RESULT_SUCCESS |
1955 IB_MAD_RESULT_CONSUMED; 1957 IB_MAD_RESULT_CONSUMED;
1956 goto bail; 1958 goto bail;
1957 } 1959 }
1958 if (ibp->port_cap_flags & IB_PORT_SM) { 1960 if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
1959 ret = IB_MAD_RESULT_SUCCESS; 1961 ret = IB_MAD_RESULT_SUCCESS;
1960 goto bail; 1962 goto bail;
1961 } 1963 }
@@ -2456,7 +2458,7 @@ static void xmit_wait_timer_func(unsigned long opaque)
2456 unsigned long flags; 2458 unsigned long flags;
2457 u8 status; 2459 u8 status;
2458 2460
2459 spin_lock_irqsave(&ppd->ibport_data.lock, flags); 2461 spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
2460 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) { 2462 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
2461 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); 2463 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
2462 if (status == IB_PMA_SAMPLE_STATUS_DONE) { 2464 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
@@ -2469,7 +2471,7 @@ static void xmit_wait_timer_func(unsigned long opaque)
2469 ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd); 2471 ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
2470 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0); 2472 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
2471done: 2473done:
2472 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); 2474 spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
2473 mod_timer(&ppd->cong_stats.timer, jiffies + HZ); 2475 mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
2474} 2476}
2475 2477
@@ -2501,7 +2503,7 @@ int qib_create_agents(struct qib_ibdev *dev)
2501 dd->pport[p].cong_stats.timer.expires = 0; 2503 dd->pport[p].cong_stats.timer.expires = 0;
2502 add_timer(&dd->pport[p].cong_stats.timer); 2504 add_timer(&dd->pport[p].cong_stats.timer);
2503 2505
2504 ibp->send_agent = agent; 2506 ibp->rvp.send_agent = agent;
2505 } 2507 }
2506 2508
2507 return 0; 2509 return 0;
@@ -2509,9 +2511,9 @@ int qib_create_agents(struct qib_ibdev *dev)
2509err: 2511err:
2510 for (p = 0; p < dd->num_pports; p++) { 2512 for (p = 0; p < dd->num_pports; p++) {
2511 ibp = &dd->pport[p].ibport_data; 2513 ibp = &dd->pport[p].ibport_data;
2512 if (ibp->send_agent) { 2514 if (ibp->rvp.send_agent) {
2513 agent = ibp->send_agent; 2515 agent = ibp->rvp.send_agent;
2514 ibp->send_agent = NULL; 2516 ibp->rvp.send_agent = NULL;
2515 ib_unregister_mad_agent(agent); 2517 ib_unregister_mad_agent(agent);
2516 } 2518 }
2517 } 2519 }
@@ -2528,9 +2530,9 @@ void qib_free_agents(struct qib_ibdev *dev)
2528 2530
2529 for (p = 0; p < dd->num_pports; p++) { 2531 for (p = 0; p < dd->num_pports; p++) {
2530 ibp = &dd->pport[p].ibport_data; 2532 ibp = &dd->pport[p].ibport_data;
2531 if (ibp->send_agent) { 2533 if (ibp->rvp.send_agent) {
2532 agent = ibp->send_agent; 2534 agent = ibp->rvp.send_agent;
2533 ibp->send_agent = NULL; 2535 ibp->rvp.send_agent = NULL;
2534 ib_unregister_mad_agent(agent); 2536 ib_unregister_mad_agent(agent);
2535 } 2537 }
2536 if (ibp->sm_ah) { 2538 if (ibp->sm_ah) {
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 1f63835c43a6..9cb9be7809ae 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -231,9 +231,9 @@ static void insert_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
231 spin_lock_irqsave(&dev->qpt_lock, flags); 231 spin_lock_irqsave(&dev->qpt_lock, flags);
232 232
233 if (qp->ibqp.qp_num == 0) 233 if (qp->ibqp.qp_num == 0)
234 rcu_assign_pointer(ibp->qp0, qp); 234 rcu_assign_pointer(ibp->rvp.qp[0], qp);
235 else if (qp->ibqp.qp_num == 1) 235 else if (qp->ibqp.qp_num == 1)
236 rcu_assign_pointer(ibp->qp1, qp); 236 rcu_assign_pointer(ibp->rvp.qp[1], qp);
237 else { 237 else {
238 qp->next = dev->qp_table[n]; 238 qp->next = dev->qp_table[n];
239 rcu_assign_pointer(dev->qp_table[n], qp); 239 rcu_assign_pointer(dev->qp_table[n], qp);
@@ -255,12 +255,12 @@ static void remove_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
255 255
256 spin_lock_irqsave(&dev->qpt_lock, flags); 256 spin_lock_irqsave(&dev->qpt_lock, flags);
257 257
258 if (rcu_dereference_protected(ibp->qp0, 258 if (rcu_dereference_protected(ibp->rvp.qp[0],
259 lockdep_is_held(&dev->qpt_lock)) == qp) {
260 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
261 } else if (rcu_dereference_protected(ibp->rvp.qp[1],
259 lockdep_is_held(&dev->qpt_lock)) == qp) { 262 lockdep_is_held(&dev->qpt_lock)) == qp) {
260 RCU_INIT_POINTER(ibp->qp0, NULL); 263 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
261 } else if (rcu_dereference_protected(ibp->qp1,
262 lockdep_is_held(&dev->qpt_lock)) == qp) {
263 RCU_INIT_POINTER(ibp->qp1, NULL);
264 } else { 264 } else {
265 struct rvt_qp *q; 265 struct rvt_qp *q;
266 struct rvt_qp __rcu **qpp; 266 struct rvt_qp __rcu **qpp;
@@ -306,9 +306,9 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
306 if (!qib_mcast_tree_empty(ibp)) 306 if (!qib_mcast_tree_empty(ibp))
307 qp_inuse++; 307 qp_inuse++;
308 rcu_read_lock(); 308 rcu_read_lock();
309 if (rcu_dereference(ibp->qp0)) 309 if (rcu_dereference(ibp->rvp.qp[0]))
310 qp_inuse++; 310 qp_inuse++;
311 if (rcu_dereference(ibp->qp1)) 311 if (rcu_dereference(ibp->rvp.qp[1]))
312 qp_inuse++; 312 qp_inuse++;
313 rcu_read_unlock(); 313 rcu_read_unlock();
314 } 314 }
@@ -344,9 +344,9 @@ struct rvt_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
344 rcu_read_lock(); 344 rcu_read_lock();
345 if (unlikely(qpn <= 1)) { 345 if (unlikely(qpn <= 1)) {
346 if (qpn == 0) 346 if (qpn == 0)
347 qp = rcu_dereference(ibp->qp0); 347 qp = rcu_dereference(ibp->rvp.qp[0]);
348 else 348 else
349 qp = rcu_dereference(ibp->qp1); 349 qp = rcu_dereference(ibp->rvp.qp[1]);
350 if (qp) 350 if (qp)
351 atomic_inc(&qp->refcount); 351 atomic_inc(&qp->refcount);
352 } else { 352 } else {
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 46e6c9765ea6..1e8463de9efc 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -760,7 +760,7 @@ void qib_send_rc_ack(struct rvt_qp *qp)
760 760
761queue_ack: 761queue_ack:
762 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { 762 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
763 ibp->n_rc_qacks++; 763 this_cpu_inc(*ibp->rvp.rc_qacks);
764 qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING; 764 qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING;
765 qp->s_nak_state = qp->r_nak_state; 765 qp->s_nak_state = qp->r_nak_state;
766 qp->s_ack_psn = qp->r_ack_psn; 766 qp->s_ack_psn = qp->r_ack_psn;
@@ -888,9 +888,9 @@ static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
888 888
889 ibp = to_iport(qp->ibqp.device, qp->port_num); 889 ibp = to_iport(qp->ibqp.device, qp->port_num);
890 if (wqe->wr.opcode == IB_WR_RDMA_READ) 890 if (wqe->wr.opcode == IB_WR_RDMA_READ)
891 ibp->n_rc_resends++; 891 ibp->rvp.n_rc_resends++;
892 else 892 else
893 ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; 893 ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
894 894
895 qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | 895 qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR |
896 QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN | 896 QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN |
@@ -913,7 +913,7 @@ static void rc_timeout(unsigned long arg)
913 spin_lock(&qp->s_lock); 913 spin_lock(&qp->s_lock);
914 if (qp->s_flags & QIB_S_TIMER) { 914 if (qp->s_flags & QIB_S_TIMER) {
915 ibp = to_iport(qp->ibqp.device, qp->port_num); 915 ibp = to_iport(qp->ibqp.device, qp->port_num);
916 ibp->n_rc_timeouts++; 916 ibp->rvp.n_rc_timeouts++;
917 qp->s_flags &= ~QIB_S_TIMER; 917 qp->s_flags &= ~QIB_S_TIMER;
918 del_timer(&qp->s_timer); 918 del_timer(&qp->s_timer);
919 qib_restart_rc(qp, qp->s_last_psn + 1, 1); 919 qib_restart_rc(qp, qp->s_last_psn + 1, 1);
@@ -1087,7 +1087,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
1087 if (++qp->s_last >= qp->s_size) 1087 if (++qp->s_last >= qp->s_size)
1088 qp->s_last = 0; 1088 qp->s_last = 0;
1089 } else 1089 } else
1090 ibp->n_rc_delayed_comp++; 1090 this_cpu_inc(*ibp->rvp.rc_delayed_comp);
1091 1091
1092 qp->s_retry = qp->s_retry_cnt; 1092 qp->s_retry = qp->s_retry_cnt;
1093 update_last_psn(qp, wqe->lpsn); 1093 update_last_psn(qp, wqe->lpsn);
@@ -1232,7 +1232,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
1232 1232
1233 switch (aeth >> 29) { 1233 switch (aeth >> 29) {
1234 case 0: /* ACK */ 1234 case 0: /* ACK */
1235 ibp->n_rc_acks++; 1235 this_cpu_inc(*ibp->rvp.rc_acks);
1236 if (qp->s_acked != qp->s_tail) { 1236 if (qp->s_acked != qp->s_tail) {
1237 /* 1237 /*
1238 * We are expecting more ACKs so 1238 * We are expecting more ACKs so
@@ -1261,7 +1261,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
1261 goto bail; 1261 goto bail;
1262 1262
1263 case 1: /* RNR NAK */ 1263 case 1: /* RNR NAK */
1264 ibp->n_rnr_naks++; 1264 ibp->rvp.n_rnr_naks++;
1265 if (qp->s_acked == qp->s_tail) 1265 if (qp->s_acked == qp->s_tail)
1266 goto bail; 1266 goto bail;
1267 if (qp->s_flags & QIB_S_WAIT_RNR) 1267 if (qp->s_flags & QIB_S_WAIT_RNR)
@@ -1276,7 +1276,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
1276 /* The last valid PSN is the previous PSN. */ 1276 /* The last valid PSN is the previous PSN. */
1277 update_last_psn(qp, psn - 1); 1277 update_last_psn(qp, psn - 1);
1278 1278
1279 ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; 1279 ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
1280 1280
1281 reset_psn(qp, psn); 1281 reset_psn(qp, psn);
1282 1282
@@ -1297,7 +1297,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
1297 switch ((aeth >> QIB_AETH_CREDIT_SHIFT) & 1297 switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
1298 QIB_AETH_CREDIT_MASK) { 1298 QIB_AETH_CREDIT_MASK) {
1299 case 0: /* PSN sequence error */ 1299 case 0: /* PSN sequence error */
1300 ibp->n_seq_naks++; 1300 ibp->rvp.n_seq_naks++;
1301 /* 1301 /*
1302 * Back up to the responder's expected PSN. 1302 * Back up to the responder's expected PSN.
1303 * Note that we might get a NAK in the middle of an 1303 * Note that we might get a NAK in the middle of an
@@ -1310,17 +1310,17 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
1310 1310
1311 case 1: /* Invalid Request */ 1311 case 1: /* Invalid Request */
1312 status = IB_WC_REM_INV_REQ_ERR; 1312 status = IB_WC_REM_INV_REQ_ERR;
1313 ibp->n_other_naks++; 1313 ibp->rvp.n_other_naks++;
1314 goto class_b; 1314 goto class_b;
1315 1315
1316 case 2: /* Remote Access Error */ 1316 case 2: /* Remote Access Error */
1317 status = IB_WC_REM_ACCESS_ERR; 1317 status = IB_WC_REM_ACCESS_ERR;
1318 ibp->n_other_naks++; 1318 ibp->rvp.n_other_naks++;
1319 goto class_b; 1319 goto class_b;
1320 1320
1321 case 3: /* Remote Operation Error */ 1321 case 3: /* Remote Operation Error */
1322 status = IB_WC_REM_OP_ERR; 1322 status = IB_WC_REM_OP_ERR;
1323 ibp->n_other_naks++; 1323 ibp->rvp.n_other_naks++;
1324class_b: 1324class_b:
1325 if (qp->s_last == qp->s_acked) { 1325 if (qp->s_last == qp->s_acked) {
1326 qib_send_complete(qp, wqe, status); 1326 qib_send_complete(qp, wqe, status);
@@ -1371,7 +1371,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
1371 wqe = do_rc_completion(qp, wqe, ibp); 1371 wqe = do_rc_completion(qp, wqe, ibp);
1372 } 1372 }
1373 1373
1374 ibp->n_rdma_seq++; 1374 ibp->rvp.n_rdma_seq++;
1375 qp->r_flags |= QIB_R_RDMAR_SEQ; 1375 qp->r_flags |= QIB_R_RDMAR_SEQ;
1376 qib_restart_rc(qp, qp->s_last_psn + 1, 0); 1376 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1377 if (list_empty(&qp->rspwait)) { 1377 if (list_empty(&qp->rspwait)) {
@@ -1643,7 +1643,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
1643 * Don't queue the NAK if we already sent one. 1643 * Don't queue the NAK if we already sent one.
1644 */ 1644 */
1645 if (!qp->r_nak_state) { 1645 if (!qp->r_nak_state) {
1646 ibp->n_rc_seqnak++; 1646 ibp->rvp.n_rc_seqnak++;
1647 qp->r_nak_state = IB_NAK_PSN_ERROR; 1647 qp->r_nak_state = IB_NAK_PSN_ERROR;
1648 /* Use the expected PSN. */ 1648 /* Use the expected PSN. */
1649 qp->r_ack_psn = qp->r_psn; 1649 qp->r_ack_psn = qp->r_psn;
@@ -1679,7 +1679,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
1679 */ 1679 */
1680 e = NULL; 1680 e = NULL;
1681 old_req = 1; 1681 old_req = 1;
1682 ibp->n_rc_dupreq++; 1682 ibp->rvp.n_rc_dupreq++;
1683 1683
1684 spin_lock_irqsave(&qp->s_lock, flags); 1684 spin_lock_irqsave(&qp->s_lock, flags);
1685 1685
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 682447e785e1..629097998358 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -279,7 +279,8 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
279 if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH)) 279 if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
280 goto err; 280 goto err;
281 guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index); 281 guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
282 if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) 282 if (!gid_ok(&hdr->u.l.grh.dgid,
283 ibp->rvp.gid_prefix, guid))
283 goto err; 284 goto err;
284 if (!gid_ok(&hdr->u.l.grh.sgid, 285 if (!gid_ok(&hdr->u.l.grh.sgid,
285 qp->alt_ah_attr.grh.dgid.global.subnet_prefix, 286 qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
@@ -311,7 +312,8 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
311 goto err; 312 goto err;
312 guid = get_sguid(ibp, 313 guid = get_sguid(ibp,
313 qp->remote_ah_attr.grh.sgid_index); 314 qp->remote_ah_attr.grh.sgid_index);
314 if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) 315 if (!gid_ok(&hdr->u.l.grh.dgid,
316 ibp->rvp.gid_prefix, guid))
315 goto err; 317 goto err;
316 if (!gid_ok(&hdr->u.l.grh.sgid, 318 if (!gid_ok(&hdr->u.l.grh.sgid,
317 qp->remote_ah_attr.grh.dgid.global.subnet_prefix, 319 qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
@@ -409,7 +411,7 @@ again:
409 411
410 if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) || 412 if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
411 qp->ibqp.qp_type != sqp->ibqp.qp_type) { 413 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
412 ibp->n_pkt_drops++; 414 ibp->rvp.n_pkt_drops++;
413 /* 415 /*
414 * For RC, the requester would timeout and retry so 416 * For RC, the requester would timeout and retry so
415 * shortcut the timeouts and just signal too many retries. 417 * shortcut the timeouts and just signal too many retries.
@@ -566,7 +568,7 @@ again:
566 568
567send_comp: 569send_comp:
568 spin_lock_irqsave(&sqp->s_lock, flags); 570 spin_lock_irqsave(&sqp->s_lock, flags);
569 ibp->n_loop_pkts++; 571 ibp->rvp.n_loop_pkts++;
570flush_send: 572flush_send:
571 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; 573 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
572 qib_send_complete(sqp, wqe, send_status); 574 qib_send_complete(sqp, wqe, send_status);
@@ -576,7 +578,7 @@ rnr_nak:
576 /* Handle RNR NAK */ 578 /* Handle RNR NAK */
577 if (qp->ibqp.qp_type == IB_QPT_UC) 579 if (qp->ibqp.qp_type == IB_QPT_UC)
578 goto send_comp; 580 goto send_comp;
579 ibp->n_rnr_naks++; 581 ibp->rvp.n_rnr_naks++;
580 /* 582 /*
581 * Note: we don't need the s_lock held since the BUSY flag 583 * Note: we don't need the s_lock held since the BUSY flag
582 * makes this single threaded. 584 * makes this single threaded.
@@ -663,7 +665,7 @@ u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
663 hdr->next_hdr = IB_GRH_NEXT_HDR; 665 hdr->next_hdr = IB_GRH_NEXT_HDR;
664 hdr->hop_limit = grh->hop_limit; 666 hdr->hop_limit = grh->hop_limit;
665 /* The SGID is 32-bit aligned. */ 667 /* The SGID is 32-bit aligned. */
666 hdr->sgid.global.subnet_prefix = ibp->gid_prefix; 668 hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
667 hdr->sgid.global.interface_id = grh->sgid_index ? 669 hdr->sgid.global.interface_id = grh->sgid_index ?
668 ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid; 670 ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
669 hdr->dgid = grh->dgid; 671 hdr->dgid = grh->dgid;
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index 1395ed0c811e..9d1104e3254d 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -702,7 +702,7 @@ busy:
702 struct qib_ibport *ibp; 702 struct qib_ibport *ibp;
703 703
704 ibp = &ppd->ibport_data; 704 ibp = &ppd->ibport_data;
705 ibp->n_dmawait++; 705 ibp->rvp.n_dmawait++;
706 qp->s_flags |= QIB_S_WAIT_DMA_DESC; 706 qp->s_flags |= QIB_S_WAIT_DMA_DESC;
707 list_add_tail(&priv->iowait, &dev->dmawait); 707 list_add_tail(&priv->iowait, &dev->dmawait);
708 } 708 }
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 72a160e94e21..fe4cf5e4acec 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -406,7 +406,13 @@ static struct kobj_type qib_sl2vl_ktype = {
406#define QIB_DIAGC_ATTR(N) \ 406#define QIB_DIAGC_ATTR(N) \
407 static struct qib_diagc_attr qib_diagc_attr_##N = { \ 407 static struct qib_diagc_attr qib_diagc_attr_##N = { \
408 .attr = { .name = __stringify(N), .mode = 0664 }, \ 408 .attr = { .name = __stringify(N), .mode = 0664 }, \
409 .counter = offsetof(struct qib_ibport, n_##N) \ 409 .counter = offsetof(struct qib_ibport, rvp.n_##N) \
410 }
411
412#define QIB_DIAGC_ATTR_PER_CPU(N) \
413 static struct qib_diagc_attr qib_diagc_attr_##N = { \
414 .attr = { .name = __stringify(N), .mode = 0664 }, \
415 .counter = offsetof(struct qib_ibport, rvp.z_##N) \
410 } 416 }
411 417
412struct qib_diagc_attr { 418struct qib_diagc_attr {
@@ -414,10 +420,11 @@ struct qib_diagc_attr {
414 size_t counter; 420 size_t counter;
415}; 421};
416 422
423QIB_DIAGC_ATTR_PER_CPU(rc_acks);
424QIB_DIAGC_ATTR_PER_CPU(rc_qacks);
425QIB_DIAGC_ATTR_PER_CPU(rc_delayed_comp);
426
417QIB_DIAGC_ATTR(rc_resends); 427QIB_DIAGC_ATTR(rc_resends);
418QIB_DIAGC_ATTR(rc_acks);
419QIB_DIAGC_ATTR(rc_qacks);
420QIB_DIAGC_ATTR(rc_delayed_comp);
421QIB_DIAGC_ATTR(seq_naks); 428QIB_DIAGC_ATTR(seq_naks);
422QIB_DIAGC_ATTR(rdma_seq); 429QIB_DIAGC_ATTR(rdma_seq);
423QIB_DIAGC_ATTR(rnr_naks); 430QIB_DIAGC_ATTR(rnr_naks);
@@ -449,6 +456,35 @@ static struct attribute *diagc_default_attributes[] = {
449 NULL 456 NULL
450}; 457};
451 458
459static u64 get_all_cpu_total(u64 __percpu *cntr)
460{
461 int cpu;
462 u64 counter = 0;
463
464 for_each_possible_cpu(cpu)
465 counter += *per_cpu_ptr(cntr, cpu);
466 return counter;
467}
468
469#define def_write_per_cpu(cntr) \
470static void write_per_cpu_##cntr(struct qib_pportdata *ppd, u32 data) \
471{ \
472 struct qib_devdata *dd = ppd->dd; \
473 struct qib_ibport *qibp = &ppd->ibport_data; \
474 /* A write can only zero the counter */ \
475 if (data == 0) \
476 qibp->rvp.z_##cntr = get_all_cpu_total(qibp->rvp.cntr); \
477 else \
478 qib_dev_err(dd, "Per CPU cntrs can only be zeroed"); \
479}
480
481def_write_per_cpu(rc_acks)
482def_write_per_cpu(rc_qacks)
483def_write_per_cpu(rc_delayed_comp)
484
485#define READ_PER_CPU_CNTR(cntr) (get_all_cpu_total(qibp->rvp.cntr) - \
486 qibp->rvp.z_##cntr)
487
452static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr, 488static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
453 char *buf) 489 char *buf)
454{ 490{
@@ -458,7 +494,16 @@ static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
458 container_of(kobj, struct qib_pportdata, diagc_kobj); 494 container_of(kobj, struct qib_pportdata, diagc_kobj);
459 struct qib_ibport *qibp = &ppd->ibport_data; 495 struct qib_ibport *qibp = &ppd->ibport_data;
460 496
461 return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter)); 497 if (!strncmp(dattr->attr.name, "rc_acks", 7))
498 return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_acks));
499 else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
500 return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_qacks));
501 else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
502 return sprintf(buf, "%llu\n",
503 READ_PER_CPU_CNTR(rc_delayed_comp));
504 else
505 return sprintf(buf, "%u\n",
506 *(u32 *)((char *)qibp + dattr->counter));
462} 507}
463 508
464static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr, 509static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
@@ -475,7 +520,15 @@ static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
475 ret = kstrtou32(buf, 0, &val); 520 ret = kstrtou32(buf, 0, &val);
476 if (ret) 521 if (ret)
477 return ret; 522 return ret;
478 *(u32 *)((char *) qibp + dattr->counter) = val; 523
524 if (!strncmp(dattr->attr.name, "rc_acks", 7))
525 write_per_cpu_rc_acks(ppd, val);
526 else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
527 write_per_cpu_rc_qacks(ppd, val);
528 else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
529 write_per_cpu_rc_delayed_comp(ppd, val);
530 else
531 *(u32 *)((char *)qibp + dattr->counter) = val;
479 return size; 532 return size;
480} 533}
481 534
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 1ae135a4cead..659ac519bbfc 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -527,7 +527,7 @@ rewind:
527 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); 527 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
528 qp->r_sge.num_sge = 0; 528 qp->r_sge.num_sge = 0;
529drop: 529drop:
530 ibp->n_pkt_drops++; 530 ibp->rvp.n_pkt_drops++;
531 return; 531 return;
532 532
533op_err: 533op_err:
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 6dc20cabab66..d84872dbadd7 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -62,7 +62,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
62 62
63 qp = qib_lookup_qpn(ibp, swqe->ud_wr.remote_qpn); 63 qp = qib_lookup_qpn(ibp, swqe->ud_wr.remote_qpn);
64 if (!qp) { 64 if (!qp) {
65 ibp->n_pkt_drops++; 65 ibp->rvp.n_pkt_drops++;
66 return; 66 return;
67 } 67 }
68 68
@@ -73,7 +73,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
73 73
74 if (dqptype != sqptype || 74 if (dqptype != sqptype ||
75 !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { 75 !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
76 ibp->n_pkt_drops++; 76 ibp->rvp.n_pkt_drops++;
77 goto drop; 77 goto drop;
78 } 78 }
79 79
@@ -153,14 +153,14 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
153 } 153 }
154 if (!ret) { 154 if (!ret) {
155 if (qp->ibqp.qp_num == 0) 155 if (qp->ibqp.qp_num == 0)
156 ibp->n_vl15_dropped++; 156 ibp->rvp.n_vl15_dropped++;
157 goto bail_unlock; 157 goto bail_unlock;
158 } 158 }
159 } 159 }
160 /* Silently drop packets which are too big. */ 160 /* Silently drop packets which are too big. */
161 if (unlikely(wc.byte_len > qp->r_len)) { 161 if (unlikely(wc.byte_len > qp->r_len)) {
162 qp->r_flags |= QIB_R_REUSE_SGE; 162 qp->r_flags |= QIB_R_REUSE_SGE;
163 ibp->n_pkt_drops++; 163 ibp->rvp.n_pkt_drops++;
164 goto bail_unlock; 164 goto bail_unlock;
165 } 165 }
166 166
@@ -219,7 +219,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
219 /* Signal completion event if the solicited bit is set. */ 219 /* Signal completion event if the solicited bit is set. */
220 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 220 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
221 swqe->wr.send_flags & IB_SEND_SOLICITED); 221 swqe->wr.send_flags & IB_SEND_SOLICITED);
222 ibp->n_loop_pkts++; 222 ibp->rvp.n_loop_pkts++;
223bail_unlock: 223bail_unlock:
224 spin_unlock_irqrestore(&qp->r_lock, flags); 224 spin_unlock_irqrestore(&qp->r_lock, flags);
225drop: 225drop:
@@ -546,7 +546,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
546 } 546 }
547 if (!ret) { 547 if (!ret) {
548 if (qp->ibqp.qp_num == 0) 548 if (qp->ibqp.qp_num == 0)
549 ibp->n_vl15_dropped++; 549 ibp->rvp.n_vl15_dropped++;
550 return; 550 return;
551 } 551 }
552 } 552 }
@@ -589,5 +589,5 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
589 return; 589 return;
590 590
591drop: 591drop:
592 ibp->n_pkt_drops++; 592 ibp->rvp.n_pkt_drops++;
593} 593}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 89fe514a4981..276e4dcc2d46 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -586,7 +586,7 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
586 586
587 /* Check for valid receive state. */ 587 /* Check for valid receive state. */
588 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { 588 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
589 ibp->n_pkt_drops++; 589 ibp->rvp.n_pkt_drops++;
590 goto unlock; 590 goto unlock;
591 } 591 }
592 592
@@ -716,7 +716,7 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
716 return; 716 return;
717 717
718drop: 718drop:
719 ibp->n_pkt_drops++; 719 ibp->rvp.n_pkt_drops++;
720} 720}
721 721
722/* 722/*
@@ -1256,7 +1256,7 @@ err_tx:
1256 qib_put_txreq(tx); 1256 qib_put_txreq(tx);
1257 ret = wait_kmem(dev, qp); 1257 ret = wait_kmem(dev, qp);
1258unaligned: 1258unaligned:
1259 ibp->n_unaligned++; 1259 ibp->rvp.n_unaligned++;
1260bail: 1260bail:
1261 return ret; 1261 return ret;
1262bail_tx: 1262bail_tx:
@@ -1647,16 +1647,16 @@ static int qib_query_port(struct ib_device *ibdev, u8 port,
1647 memset(props, 0, sizeof(*props)); 1647 memset(props, 0, sizeof(*props));
1648 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE); 1648 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1649 props->lmc = ppd->lmc; 1649 props->lmc = ppd->lmc;
1650 props->sm_lid = ibp->sm_lid; 1650 props->sm_lid = ibp->rvp.sm_lid;
1651 props->sm_sl = ibp->sm_sl; 1651 props->sm_sl = ibp->rvp.sm_sl;
1652 props->state = dd->f_iblink_state(ppd->lastibcstat); 1652 props->state = dd->f_iblink_state(ppd->lastibcstat);
1653 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat); 1653 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
1654 props->port_cap_flags = ibp->port_cap_flags; 1654 props->port_cap_flags = ibp->rvp.port_cap_flags;
1655 props->gid_tbl_len = QIB_GUIDS_PER_PORT; 1655 props->gid_tbl_len = QIB_GUIDS_PER_PORT;
1656 props->max_msg_sz = 0x80000000; 1656 props->max_msg_sz = 0x80000000;
1657 props->pkey_tbl_len = qib_get_npkeys(dd); 1657 props->pkey_tbl_len = qib_get_npkeys(dd);
1658 props->bad_pkey_cntr = ibp->pkey_violations; 1658 props->bad_pkey_cntr = ibp->rvp.pkey_violations;
1659 props->qkey_viol_cntr = ibp->qkey_violations; 1659 props->qkey_viol_cntr = ibp->rvp.qkey_violations;
1660 props->active_width = ppd->link_width_active; 1660 props->active_width = ppd->link_width_active;
1661 /* See rate_show() */ 1661 /* See rate_show() */
1662 props->active_speed = ppd->link_speed_active; 1662 props->active_speed = ppd->link_speed_active;
@@ -1684,7 +1684,7 @@ static int qib_query_port(struct ib_device *ibdev, u8 port,
1684 mtu = IB_MTU_2048; 1684 mtu = IB_MTU_2048;
1685 } 1685 }
1686 props->active_mtu = mtu; 1686 props->active_mtu = mtu;
1687 props->subnet_timeout = ibp->subnet_timeout; 1687 props->subnet_timeout = ibp->rvp.subnet_timeout;
1688 1688
1689 return 0; 1689 return 0;
1690} 1690}
@@ -1734,14 +1734,14 @@ static int qib_modify_port(struct ib_device *ibdev, u8 port,
1734 struct qib_ibport *ibp = to_iport(ibdev, port); 1734 struct qib_ibport *ibp = to_iport(ibdev, port);
1735 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1735 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1736 1736
1737 ibp->port_cap_flags |= props->set_port_cap_mask; 1737 ibp->rvp.port_cap_flags |= props->set_port_cap_mask;
1738 ibp->port_cap_flags &= ~props->clr_port_cap_mask; 1738 ibp->rvp.port_cap_flags &= ~props->clr_port_cap_mask;
1739 if (props->set_port_cap_mask || props->clr_port_cap_mask) 1739 if (props->set_port_cap_mask || props->clr_port_cap_mask)
1740 qib_cap_mask_chg(ibp); 1740 qib_cap_mask_chg(ibp);
1741 if (port_modify_mask & IB_PORT_SHUTDOWN) 1741 if (port_modify_mask & IB_PORT_SHUTDOWN)
1742 qib_set_linkstate(ppd, QIB_IB_LINKDOWN); 1742 qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1743 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) 1743 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1744 ibp->qkey_violations = 0; 1744 ibp->rvp.qkey_violations = 0;
1745 return 0; 1745 return 0;
1746} 1746}
1747 1747
@@ -1757,7 +1757,7 @@ static int qib_query_gid(struct ib_device *ibdev, u8 port,
1757 struct qib_ibport *ibp = to_iport(ibdev, port); 1757 struct qib_ibport *ibp = to_iport(ibdev, port);
1758 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1758 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1759 1759
1760 gid->global.subnet_prefix = ibp->gid_prefix; 1760 gid->global.subnet_prefix = ibp->rvp.gid_prefix;
1761 if (index == 0) 1761 if (index == 0)
1762 gid->global.interface_id = ppd->guid; 1762 gid->global.interface_id = ppd->guid;
1763 else if (index < QIB_GUIDS_PER_PORT) 1763 else if (index < QIB_GUIDS_PER_PORT)
@@ -1787,7 +1787,7 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
1787 attr.dlid = dlid; 1787 attr.dlid = dlid;
1788 attr.port_num = ppd_from_ibp(ibp)->port; 1788 attr.port_num = ppd_from_ibp(ibp)->port;
1789 rcu_read_lock(); 1789 rcu_read_lock();
1790 qp0 = rcu_dereference(ibp->qp0); 1790 qp0 = rcu_dereference(ibp->rvp.qp[0]);
1791 if (qp0) 1791 if (qp0)
1792 ah = ib_create_ah(qp0->ibqp.pd, &attr); 1792 ah = ib_create_ah(qp0->ibqp.pd, &attr);
1793 rcu_read_unlock(); 1793 rcu_read_unlock();
@@ -1876,22 +1876,22 @@ static void init_ibport(struct qib_pportdata *ppd)
1876 struct qib_verbs_counters cntrs; 1876 struct qib_verbs_counters cntrs;
1877 struct qib_ibport *ibp = &ppd->ibport_data; 1877 struct qib_ibport *ibp = &ppd->ibport_data;
1878 1878
1879 spin_lock_init(&ibp->lock); 1879 spin_lock_init(&ibp->rvp.lock);
1880 /* Set the prefix to the default value (see ch. 4.1.1) */ 1880 /* Set the prefix to the default value (see ch. 4.1.1) */
1881 ibp->gid_prefix = IB_DEFAULT_GID_PREFIX; 1881 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1882 ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE); 1882 ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1883 ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP | 1883 ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
1884 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP | 1884 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1885 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP | 1885 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1886 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP | 1886 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1887 IB_PORT_OTHER_LOCAL_CHANGES_SUP; 1887 IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1888 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY) 1888 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
1889 ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP; 1889 ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1890 ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; 1890 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1891 ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; 1891 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1892 ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; 1892 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1893 ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; 1893 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1894 ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; 1894 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1895 1895
1896 /* Snapshot current HW counters to "clear" them. */ 1896 /* Snapshot current HW counters to "clear" them. */
1897 qib_get_counters(ppd, &cntrs); 1897 qib_get_counters(ppd, &cntrs);
@@ -1911,8 +1911,8 @@ static void init_ibport(struct qib_pportdata *ppd)
1911 ibp->z_excessive_buffer_overrun_errors = 1911 ibp->z_excessive_buffer_overrun_errors =
1912 cntrs.excessive_buffer_overrun_errors; 1912 cntrs.excessive_buffer_overrun_errors;
1913 ibp->z_vl15_dropped = cntrs.vl15_dropped; 1913 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1914 RCU_INIT_POINTER(ibp->qp0, NULL); 1914 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1915 RCU_INIT_POINTER(ibp->qp1, NULL); 1915 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
1916} 1916}
1917 1917
1918static int qib_port_immutable(struct ib_device *ibdev, u8 port_num, 1918static int qib_port_immutable(struct ib_device *ibdev, u8 port_num,
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 00dd2ad0bfa2..538d3a6adf3b 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -401,21 +401,10 @@ struct qib_pma_counters {
401}; 401};
402 402
403struct qib_ibport { 403struct qib_ibport {
404 struct rvt_qp __rcu *qp0; 404 struct rvt_ibport rvp;
405 struct rvt_qp __rcu *qp1;
406 struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
407 struct rvt_ah *sm_ah; 405 struct rvt_ah *sm_ah;
408 struct rvt_ah *smi_ah; 406 struct rvt_ah *smi_ah;
409 struct rb_root mcast_tree;
410 spinlock_t lock; /* protect changes in this struct */
411
412 /* non-zero when timer is set */
413 unsigned long mkey_lease_timeout;
414 unsigned long trap_timeout;
415 __be64 gid_prefix; /* in network order */
416 __be64 mkey;
417 __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */ 407 __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
418 u64 tid; /* TID for traps */
419 struct qib_pma_counters __percpu *pmastats; 408 struct qib_pma_counters __percpu *pmastats;
420 u64 z_unicast_xmit; /* starting count for PMA */ 409 u64 z_unicast_xmit; /* starting count for PMA */
421 u64 z_unicast_rcv; /* starting count for PMA */ 410 u64 z_unicast_rcv; /* starting count for PMA */
@@ -434,42 +423,9 @@ struct qib_ibport {
434 u32 z_local_link_integrity_errors; /* starting count for PMA */ 423 u32 z_local_link_integrity_errors; /* starting count for PMA */
435 u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */ 424 u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
436 u32 z_vl15_dropped; /* starting count for PMA */ 425 u32 z_vl15_dropped; /* starting count for PMA */
437 u32 n_rc_resends;
438 u32 n_rc_acks;
439 u32 n_rc_qacks;
440 u32 n_rc_delayed_comp;
441 u32 n_seq_naks;
442 u32 n_rdma_seq;
443 u32 n_rnr_naks;
444 u32 n_other_naks;
445 u32 n_loop_pkts;
446 u32 n_pkt_drops;
447 u32 n_vl15_dropped;
448 u32 n_rc_timeouts;
449 u32 n_dmawait;
450 u32 n_unaligned;
451 u32 n_rc_dupreq;
452 u32 n_rc_seqnak;
453 u32 port_cap_flags;
454 u32 pma_sample_start;
455 u32 pma_sample_interval;
456 __be16 pma_counter_select[5];
457 u16 pma_tag;
458 u16 pkey_violations;
459 u16 qkey_violations;
460 u16 mkey_violations;
461 u16 mkey_lease_period;
462 u16 sm_lid;
463 u16 repress_traps;
464 u8 sm_sl;
465 u8 mkeyprot;
466 u8 subnet_timeout;
467 u8 vl_high_limit;
468 u8 sl_to_vl[16]; 426 u8 sl_to_vl[16];
469
470}; 427};
471 428
472
473struct qib_ibdev { 429struct qib_ibdev {
474 struct rvt_dev_info rdi; 430 struct rvt_dev_info rdi;
475 struct list_head pending_mmaps; 431 struct list_head pending_mmaps;
diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
index 1c7af034bdae..c3d6535aab38 100644
--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
@@ -114,8 +114,8 @@ struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
114 unsigned long flags; 114 unsigned long flags;
115 struct qib_mcast *mcast; 115 struct qib_mcast *mcast;
116 116
117 spin_lock_irqsave(&ibp->lock, flags); 117 spin_lock_irqsave(&ibp->rvp.lock, flags);
118 n = ibp->mcast_tree.rb_node; 118 n = ibp->rvp.mcast_tree.rb_node;
119 while (n) { 119 while (n) {
120 int ret; 120 int ret;
121 121
@@ -129,11 +129,11 @@ struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
129 n = n->rb_right; 129 n = n->rb_right;
130 else { 130 else {
131 atomic_inc(&mcast->refcount); 131 atomic_inc(&mcast->refcount);
132 spin_unlock_irqrestore(&ibp->lock, flags); 132 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
133 goto bail; 133 goto bail;
134 } 134 }
135 } 135 }
136 spin_unlock_irqrestore(&ibp->lock, flags); 136 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
137 137
138 mcast = NULL; 138 mcast = NULL;
139 139
@@ -153,11 +153,11 @@ bail:
153static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp, 153static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
154 struct qib_mcast *mcast, struct qib_mcast_qp *mqp) 154 struct qib_mcast *mcast, struct qib_mcast_qp *mqp)
155{ 155{
156 struct rb_node **n = &ibp->mcast_tree.rb_node; 156 struct rb_node **n = &ibp->rvp.mcast_tree.rb_node;
157 struct rb_node *pn = NULL; 157 struct rb_node *pn = NULL;
158 int ret; 158 int ret;
159 159
160 spin_lock_irq(&ibp->lock); 160 spin_lock_irq(&ibp->rvp.lock);
161 161
162 while (*n) { 162 while (*n) {
163 struct qib_mcast *tmcast; 163 struct qib_mcast *tmcast;
@@ -212,12 +212,12 @@ static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
212 212
213 atomic_inc(&mcast->refcount); 213 atomic_inc(&mcast->refcount);
214 rb_link_node(&mcast->rb_node, pn, n); 214 rb_link_node(&mcast->rb_node, pn, n);
215 rb_insert_color(&mcast->rb_node, &ibp->mcast_tree); 215 rb_insert_color(&mcast->rb_node, &ibp->rvp.mcast_tree);
216 216
217 ret = 0; 217 ret = 0;
218 218
219bail: 219bail:
220 spin_unlock_irq(&ibp->lock); 220 spin_unlock_irq(&ibp->rvp.lock);
221 221
222 return ret; 222 return ret;
223} 223}
@@ -294,13 +294,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
294 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) 294 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
295 return -EINVAL; 295 return -EINVAL;
296 296
297 spin_lock_irq(&ibp->lock); 297 spin_lock_irq(&ibp->rvp.lock);
298 298
299 /* Find the GID in the mcast table. */ 299 /* Find the GID in the mcast table. */
300 n = ibp->mcast_tree.rb_node; 300 n = ibp->rvp.mcast_tree.rb_node;
301 while (1) { 301 while (1) {
302 if (n == NULL) { 302 if (n == NULL) {
303 spin_unlock_irq(&ibp->lock); 303 spin_unlock_irq(&ibp->rvp.lock);
304 return -EINVAL; 304 return -EINVAL;
305 } 305 }
306 306
@@ -329,13 +329,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
329 329
330 /* If this was the last attached QP, remove the GID too. */ 330 /* If this was the last attached QP, remove the GID too. */
331 if (list_empty(&mcast->qp_list)) { 331 if (list_empty(&mcast->qp_list)) {
332 rb_erase(&mcast->rb_node, &ibp->mcast_tree); 332 rb_erase(&mcast->rb_node, &ibp->rvp.mcast_tree);
333 last = 1; 333 last = 1;
334 } 334 }
335 break; 335 break;
336 } 336 }
337 337
338 spin_unlock_irq(&ibp->lock); 338 spin_unlock_irq(&ibp->rvp.lock);
339 /* QP not attached */ 339 /* QP not attached */
340 if (!delp) 340 if (!delp)
341 return -EINVAL; 341 return -EINVAL;
@@ -359,5 +359,5 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
359 359
360int qib_mcast_tree_empty(struct qib_ibport *ibp) 360int qib_mcast_tree_empty(struct qib_ibport *ibp)
361{ 361{
362 return ibp->mcast_tree.rb_node == NULL; 362 return !(ibp->rvp.mcast_tree.rb_node);
363} 363}