diff options
author | Harish Chegondi <harish.chegondi@intel.com> | 2016-01-22 15:56:02 -0500 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-03-10 20:37:21 -0500 |
commit | f24a6d4887668cc2925b1e908c72b96a2604a814 (patch) | |
tree | ae63779f41d5f2a5f7d6c8f64565f31e0365c30b /drivers/infiniband/hw/qib/qib_mad.c | |
parent | d2b8d4da1ca5052b72e043d2ce68157abf3f2d24 (diff) |
IB/qib: Remove ibport and use rdmavt version
Remove several ibport members from qib and use the rdmavt version. rc_acks,
rc_qacks, and rc_delayed_comp are defined as per CPU variables in rdmavt.
Add support for these rdmavt per CPU variables which were not per cpu
variables in qib ibport structure.
Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Harish Chegondi <harish.chegondi@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_mad.c')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_mad.c | 240 |
1 files changed, 121 insertions, 119 deletions
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 43f8c498fef0..3e8dde2119ed 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c | |||
@@ -70,7 +70,7 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) | |||
70 | unsigned long flags; | 70 | unsigned long flags; |
71 | unsigned long timeout; | 71 | unsigned long timeout; |
72 | 72 | ||
73 | agent = ibp->send_agent; | 73 | agent = ibp->rvp.send_agent; |
74 | if (!agent) | 74 | if (!agent) |
75 | return; | 75 | return; |
76 | 76 | ||
@@ -79,7 +79,8 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) | |||
79 | return; | 79 | return; |
80 | 80 | ||
81 | /* o14-2 */ | 81 | /* o14-2 */ |
82 | if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout)) | 82 | if (ibp->rvp.trap_timeout && |
83 | time_before(jiffies, ibp->rvp.trap_timeout)) | ||
83 | return; | 84 | return; |
84 | 85 | ||
85 | send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, | 86 | send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, |
@@ -93,18 +94,18 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) | |||
93 | smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; | 94 | smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; |
94 | smp->class_version = 1; | 95 | smp->class_version = 1; |
95 | smp->method = IB_MGMT_METHOD_TRAP; | 96 | smp->method = IB_MGMT_METHOD_TRAP; |
96 | ibp->tid++; | 97 | ibp->rvp.tid++; |
97 | smp->tid = cpu_to_be64(ibp->tid); | 98 | smp->tid = cpu_to_be64(ibp->rvp.tid); |
98 | smp->attr_id = IB_SMP_ATTR_NOTICE; | 99 | smp->attr_id = IB_SMP_ATTR_NOTICE; |
99 | /* o14-1: smp->mkey = 0; */ | 100 | /* o14-1: smp->mkey = 0; */ |
100 | memcpy(smp->data, data, len); | 101 | memcpy(smp->data, data, len); |
101 | 102 | ||
102 | spin_lock_irqsave(&ibp->lock, flags); | 103 | spin_lock_irqsave(&ibp->rvp.lock, flags); |
103 | if (!ibp->sm_ah) { | 104 | if (!ibp->sm_ah) { |
104 | if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { | 105 | if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { |
105 | struct ib_ah *ah; | 106 | struct ib_ah *ah; |
106 | 107 | ||
107 | ah = qib_create_qp0_ah(ibp, ibp->sm_lid); | 108 | ah = qib_create_qp0_ah(ibp, ibp->rvp.sm_lid); |
108 | if (IS_ERR(ah)) | 109 | if (IS_ERR(ah)) |
109 | ret = PTR_ERR(ah); | 110 | ret = PTR_ERR(ah); |
110 | else { | 111 | else { |
@@ -118,17 +119,17 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) | |||
118 | send_buf->ah = &ibp->sm_ah->ibah; | 119 | send_buf->ah = &ibp->sm_ah->ibah; |
119 | ret = 0; | 120 | ret = 0; |
120 | } | 121 | } |
121 | spin_unlock_irqrestore(&ibp->lock, flags); | 122 | spin_unlock_irqrestore(&ibp->rvp.lock, flags); |
122 | 123 | ||
123 | if (!ret) | 124 | if (!ret) |
124 | ret = ib_post_send_mad(send_buf, NULL); | 125 | ret = ib_post_send_mad(send_buf, NULL); |
125 | if (!ret) { | 126 | if (!ret) { |
126 | /* 4.096 usec. */ | 127 | /* 4.096 usec. */ |
127 | timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000; | 128 | timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000; |
128 | ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout); | 129 | ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout); |
129 | } else { | 130 | } else { |
130 | ib_free_send_mad(send_buf); | 131 | ib_free_send_mad(send_buf); |
131 | ibp->trap_timeout = 0; | 132 | ibp->rvp.trap_timeout = 0; |
132 | } | 133 | } |
133 | } | 134 | } |
134 | 135 | ||
@@ -141,10 +142,10 @@ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, | |||
141 | struct ib_mad_notice_attr data; | 142 | struct ib_mad_notice_attr data; |
142 | 143 | ||
143 | if (trap_num == IB_NOTICE_TRAP_BAD_PKEY) | 144 | if (trap_num == IB_NOTICE_TRAP_BAD_PKEY) |
144 | ibp->pkey_violations++; | 145 | ibp->rvp.pkey_violations++; |
145 | else | 146 | else |
146 | ibp->qkey_violations++; | 147 | ibp->rvp.qkey_violations++; |
147 | ibp->n_pkt_drops++; | 148 | ibp->rvp.n_pkt_drops++; |
148 | 149 | ||
149 | /* Send violation trap */ | 150 | /* Send violation trap */ |
150 | data.generic_type = IB_NOTICE_TYPE_SECURITY; | 151 | data.generic_type = IB_NOTICE_TYPE_SECURITY; |
@@ -217,8 +218,8 @@ void qib_cap_mask_chg(struct qib_ibport *ibp) | |||
217 | data.toggle_count = 0; | 218 | data.toggle_count = 0; |
218 | memset(&data.details, 0, sizeof(data.details)); | 219 | memset(&data.details, 0, sizeof(data.details)); |
219 | data.details.ntc_144.lid = data.issuer_lid; | 220 | data.details.ntc_144.lid = data.issuer_lid; |
220 | data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags); | 221 | data.details.ntc_144.new_cap_mask = |
221 | 222 | cpu_to_be32(ibp->rvp.port_cap_flags); | |
222 | qib_send_trap(ibp, &data, sizeof(data)); | 223 | qib_send_trap(ibp, &data, sizeof(data)); |
223 | } | 224 | } |
224 | 225 | ||
@@ -409,37 +410,38 @@ static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags) | |||
409 | int ret = 0; | 410 | int ret = 0; |
410 | 411 | ||
411 | /* Is the mkey in the process of expiring? */ | 412 | /* Is the mkey in the process of expiring? */ |
412 | if (ibp->mkey_lease_timeout && | 413 | if (ibp->rvp.mkey_lease_timeout && |
413 | time_after_eq(jiffies, ibp->mkey_lease_timeout)) { | 414 | time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) { |
414 | /* Clear timeout and mkey protection field. */ | 415 | /* Clear timeout and mkey protection field. */ |
415 | ibp->mkey_lease_timeout = 0; | 416 | ibp->rvp.mkey_lease_timeout = 0; |
416 | ibp->mkeyprot = 0; | 417 | ibp->rvp.mkeyprot = 0; |
417 | } | 418 | } |
418 | 419 | ||
419 | if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->mkey == 0 || | 420 | if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 || |
420 | ibp->mkey == smp->mkey) | 421 | ibp->rvp.mkey == smp->mkey) |
421 | valid_mkey = 1; | 422 | valid_mkey = 1; |
422 | 423 | ||
423 | /* Unset lease timeout on any valid Get/Set/TrapRepress */ | 424 | /* Unset lease timeout on any valid Get/Set/TrapRepress */ |
424 | if (valid_mkey && ibp->mkey_lease_timeout && | 425 | if (valid_mkey && ibp->rvp.mkey_lease_timeout && |
425 | (smp->method == IB_MGMT_METHOD_GET || | 426 | (smp->method == IB_MGMT_METHOD_GET || |
426 | smp->method == IB_MGMT_METHOD_SET || | 427 | smp->method == IB_MGMT_METHOD_SET || |
427 | smp->method == IB_MGMT_METHOD_TRAP_REPRESS)) | 428 | smp->method == IB_MGMT_METHOD_TRAP_REPRESS)) |
428 | ibp->mkey_lease_timeout = 0; | 429 | ibp->rvp.mkey_lease_timeout = 0; |
429 | 430 | ||
430 | if (!valid_mkey) { | 431 | if (!valid_mkey) { |
431 | switch (smp->method) { | 432 | switch (smp->method) { |
432 | case IB_MGMT_METHOD_GET: | 433 | case IB_MGMT_METHOD_GET: |
433 | /* Bad mkey not a violation below level 2 */ | 434 | /* Bad mkey not a violation below level 2 */ |
434 | if (ibp->mkeyprot < 2) | 435 | if (ibp->rvp.mkeyprot < 2) |
435 | break; | 436 | break; |
436 | case IB_MGMT_METHOD_SET: | 437 | case IB_MGMT_METHOD_SET: |
437 | case IB_MGMT_METHOD_TRAP_REPRESS: | 438 | case IB_MGMT_METHOD_TRAP_REPRESS: |
438 | if (ibp->mkey_violations != 0xFFFF) | 439 | if (ibp->rvp.mkey_violations != 0xFFFF) |
439 | ++ibp->mkey_violations; | 440 | ++ibp->rvp.mkey_violations; |
440 | if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period) | 441 | if (!ibp->rvp.mkey_lease_timeout && |
441 | ibp->mkey_lease_timeout = jiffies + | 442 | ibp->rvp.mkey_lease_period) |
442 | ibp->mkey_lease_period * HZ; | 443 | ibp->rvp.mkey_lease_timeout = jiffies + |
444 | ibp->rvp.mkey_lease_period * HZ; | ||
443 | /* Generate a trap notice. */ | 445 | /* Generate a trap notice. */ |
444 | qib_bad_mkey(ibp, smp); | 446 | qib_bad_mkey(ibp, smp); |
445 | ret = 1; | 447 | ret = 1; |
@@ -489,15 +491,15 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
489 | 491 | ||
490 | /* Only return the mkey if the protection field allows it. */ | 492 | /* Only return the mkey if the protection field allows it. */ |
491 | if (!(smp->method == IB_MGMT_METHOD_GET && | 493 | if (!(smp->method == IB_MGMT_METHOD_GET && |
492 | ibp->mkey != smp->mkey && | 494 | ibp->rvp.mkey != smp->mkey && |
493 | ibp->mkeyprot == 1)) | 495 | ibp->rvp.mkeyprot == 1)) |
494 | pip->mkey = ibp->mkey; | 496 | pip->mkey = ibp->rvp.mkey; |
495 | pip->gid_prefix = ibp->gid_prefix; | 497 | pip->gid_prefix = ibp->rvp.gid_prefix; |
496 | pip->lid = cpu_to_be16(ppd->lid); | 498 | pip->lid = cpu_to_be16(ppd->lid); |
497 | pip->sm_lid = cpu_to_be16(ibp->sm_lid); | 499 | pip->sm_lid = cpu_to_be16(ibp->rvp.sm_lid); |
498 | pip->cap_mask = cpu_to_be32(ibp->port_cap_flags); | 500 | pip->cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags); |
499 | /* pip->diag_code; */ | 501 | /* pip->diag_code; */ |
500 | pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period); | 502 | pip->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period); |
501 | pip->local_port_num = port; | 503 | pip->local_port_num = port; |
502 | pip->link_width_enabled = ppd->link_width_enabled; | 504 | pip->link_width_enabled = ppd->link_width_enabled; |
503 | pip->link_width_supported = ppd->link_width_supported; | 505 | pip->link_width_supported = ppd->link_width_supported; |
@@ -508,7 +510,7 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
508 | pip->portphysstate_linkdown = | 510 | pip->portphysstate_linkdown = |
509 | (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) | | 511 | (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) | |
510 | (get_linkdowndefaultstate(ppd) ? 1 : 2); | 512 | (get_linkdowndefaultstate(ppd) ? 1 : 2); |
511 | pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc; | 513 | pip->mkeyprot_resv_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc; |
512 | pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) | | 514 | pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) | |
513 | ppd->link_speed_enabled; | 515 | ppd->link_speed_enabled; |
514 | switch (ppd->ibmtu) { | 516 | switch (ppd->ibmtu) { |
@@ -529,9 +531,9 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
529 | mtu = IB_MTU_256; | 531 | mtu = IB_MTU_256; |
530 | break; | 532 | break; |
531 | } | 533 | } |
532 | pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl; | 534 | pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->rvp.sm_sl; |
533 | pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */ | 535 | pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */ |
534 | pip->vl_high_limit = ibp->vl_high_limit; | 536 | pip->vl_high_limit = ibp->rvp.vl_high_limit; |
535 | pip->vl_arb_high_cap = | 537 | pip->vl_arb_high_cap = |
536 | dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP); | 538 | dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP); |
537 | pip->vl_arb_low_cap = | 539 | pip->vl_arb_low_cap = |
@@ -542,20 +544,20 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
542 | /* pip->vlstallcnt_hoqlife; */ | 544 | /* pip->vlstallcnt_hoqlife; */ |
543 | pip->operationalvl_pei_peo_fpi_fpo = | 545 | pip->operationalvl_pei_peo_fpi_fpo = |
544 | dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4; | 546 | dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4; |
545 | pip->mkey_violations = cpu_to_be16(ibp->mkey_violations); | 547 | pip->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations); |
546 | /* P_KeyViolations are counted by hardware. */ | 548 | /* P_KeyViolations are counted by hardware. */ |
547 | pip->pkey_violations = cpu_to_be16(ibp->pkey_violations); | 549 | pip->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations); |
548 | pip->qkey_violations = cpu_to_be16(ibp->qkey_violations); | 550 | pip->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations); |
549 | /* Only the hardware GUID is supported for now */ | 551 | /* Only the hardware GUID is supported for now */ |
550 | pip->guid_cap = QIB_GUIDS_PER_PORT; | 552 | pip->guid_cap = QIB_GUIDS_PER_PORT; |
551 | pip->clientrereg_resv_subnetto = ibp->subnet_timeout; | 553 | pip->clientrereg_resv_subnetto = ibp->rvp.subnet_timeout; |
552 | /* 32.768 usec. response time (guessing) */ | 554 | /* 32.768 usec. response time (guessing) */ |
553 | pip->resv_resptimevalue = 3; | 555 | pip->resv_resptimevalue = 3; |
554 | pip->localphyerrors_overrunerrors = | 556 | pip->localphyerrors_overrunerrors = |
555 | (get_phyerrthreshold(ppd) << 4) | | 557 | (get_phyerrthreshold(ppd) << 4) | |
556 | get_overrunthreshold(ppd); | 558 | get_overrunthreshold(ppd); |
557 | /* pip->max_credit_hint; */ | 559 | /* pip->max_credit_hint; */ |
558 | if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) { | 560 | if (ibp->rvp.port_cap_flags & IB_PORT_LINK_LATENCY_SUP) { |
559 | u32 v; | 561 | u32 v; |
560 | 562 | ||
561 | v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY); | 563 | v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY); |
@@ -685,9 +687,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
685 | event.device = ibdev; | 687 | event.device = ibdev; |
686 | event.element.port_num = port; | 688 | event.element.port_num = port; |
687 | 689 | ||
688 | ibp->mkey = pip->mkey; | 690 | ibp->rvp.mkey = pip->mkey; |
689 | ibp->gid_prefix = pip->gid_prefix; | 691 | ibp->rvp.gid_prefix = pip->gid_prefix; |
690 | ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); | 692 | ibp->rvp.mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); |
691 | 693 | ||
692 | lid = be16_to_cpu(pip->lid); | 694 | lid = be16_to_cpu(pip->lid); |
693 | /* Must be a valid unicast LID address. */ | 695 | /* Must be a valid unicast LID address. */ |
@@ -708,19 +710,19 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
708 | /* Must be a valid unicast LID address. */ | 710 | /* Must be a valid unicast LID address. */ |
709 | if (smlid == 0 || smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) | 711 | if (smlid == 0 || smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) |
710 | smp->status |= IB_SMP_INVALID_FIELD; | 712 | smp->status |= IB_SMP_INVALID_FIELD; |
711 | else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { | 713 | else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) { |
712 | spin_lock_irqsave(&ibp->lock, flags); | 714 | spin_lock_irqsave(&ibp->rvp.lock, flags); |
713 | if (ibp->sm_ah) { | 715 | if (ibp->sm_ah) { |
714 | if (smlid != ibp->sm_lid) | 716 | if (smlid != ibp->rvp.sm_lid) |
715 | ibp->sm_ah->attr.dlid = smlid; | 717 | ibp->sm_ah->attr.dlid = smlid; |
716 | if (msl != ibp->sm_sl) | 718 | if (msl != ibp->rvp.sm_sl) |
717 | ibp->sm_ah->attr.sl = msl; | 719 | ibp->sm_ah->attr.sl = msl; |
718 | } | 720 | } |
719 | spin_unlock_irqrestore(&ibp->lock, flags); | 721 | spin_unlock_irqrestore(&ibp->rvp.lock, flags); |
720 | if (smlid != ibp->sm_lid) | 722 | if (smlid != ibp->rvp.sm_lid) |
721 | ibp->sm_lid = smlid; | 723 | ibp->rvp.sm_lid = smlid; |
722 | if (msl != ibp->sm_sl) | 724 | if (msl != ibp->rvp.sm_sl) |
723 | ibp->sm_sl = msl; | 725 | ibp->rvp.sm_sl = msl; |
724 | event.event = IB_EVENT_SM_CHANGE; | 726 | event.event = IB_EVENT_SM_CHANGE; |
725 | ib_dispatch_event(&event); | 727 | ib_dispatch_event(&event); |
726 | } | 728 | } |
@@ -768,10 +770,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
768 | smp->status |= IB_SMP_INVALID_FIELD; | 770 | smp->status |= IB_SMP_INVALID_FIELD; |
769 | } | 771 | } |
770 | 772 | ||
771 | ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; | 773 | ibp->rvp.mkeyprot = pip->mkeyprot_resv_lmc >> 6; |
772 | ibp->vl_high_limit = pip->vl_high_limit; | 774 | ibp->rvp.vl_high_limit = pip->vl_high_limit; |
773 | (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT, | 775 | (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT, |
774 | ibp->vl_high_limit); | 776 | ibp->rvp.vl_high_limit); |
775 | 777 | ||
776 | mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); | 778 | mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); |
777 | if (mtu == -1) | 779 | if (mtu == -1) |
@@ -789,13 +791,13 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
789 | } | 791 | } |
790 | 792 | ||
791 | if (pip->mkey_violations == 0) | 793 | if (pip->mkey_violations == 0) |
792 | ibp->mkey_violations = 0; | 794 | ibp->rvp.mkey_violations = 0; |
793 | 795 | ||
794 | if (pip->pkey_violations == 0) | 796 | if (pip->pkey_violations == 0) |
795 | ibp->pkey_violations = 0; | 797 | ibp->rvp.pkey_violations = 0; |
796 | 798 | ||
797 | if (pip->qkey_violations == 0) | 799 | if (pip->qkey_violations == 0) |
798 | ibp->qkey_violations = 0; | 800 | ibp->rvp.qkey_violations = 0; |
799 | 801 | ||
800 | ore = pip->localphyerrors_overrunerrors; | 802 | ore = pip->localphyerrors_overrunerrors; |
801 | if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) | 803 | if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) |
@@ -804,7 +806,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
804 | if (set_overrunthreshold(ppd, (ore & 0xF))) | 806 | if (set_overrunthreshold(ppd, (ore & 0xF))) |
805 | smp->status |= IB_SMP_INVALID_FIELD; | 807 | smp->status |= IB_SMP_INVALID_FIELD; |
806 | 808 | ||
807 | ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; | 809 | ibp->rvp.subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; |
808 | 810 | ||
809 | /* | 811 | /* |
810 | * Do the port state change now that the other link parameters | 812 | * Do the port state change now that the other link parameters |
@@ -1062,7 +1064,7 @@ static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, | |||
1062 | 1064 | ||
1063 | memset(smp->data, 0, sizeof(smp->data)); | 1065 | memset(smp->data, 0, sizeof(smp->data)); |
1064 | 1066 | ||
1065 | if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) | 1067 | if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP)) |
1066 | smp->status |= IB_SMP_UNSUP_METHOD; | 1068 | smp->status |= IB_SMP_UNSUP_METHOD; |
1067 | else | 1069 | else |
1068 | for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2) | 1070 | for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2) |
@@ -1078,7 +1080,7 @@ static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, | |||
1078 | u8 *p = (u8 *) smp->data; | 1080 | u8 *p = (u8 *) smp->data; |
1079 | unsigned i; | 1081 | unsigned i; |
1080 | 1082 | ||
1081 | if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) { | 1083 | if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP)) { |
1082 | smp->status |= IB_SMP_UNSUP_METHOD; | 1084 | smp->status |= IB_SMP_UNSUP_METHOD; |
1083 | return reply(smp); | 1085 | return reply(smp); |
1084 | } | 1086 | } |
@@ -1195,20 +1197,20 @@ static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp, | |||
1195 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; | 1197 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; |
1196 | goto bail; | 1198 | goto bail; |
1197 | } | 1199 | } |
1198 | spin_lock_irqsave(&ibp->lock, flags); | 1200 | spin_lock_irqsave(&ibp->rvp.lock, flags); |
1199 | p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS); | 1201 | p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS); |
1200 | p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); | 1202 | p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); |
1201 | p->counter_width = 4; /* 32 bit counters */ | 1203 | p->counter_width = 4; /* 32 bit counters */ |
1202 | p->counter_mask0_9 = COUNTER_MASK0_9; | 1204 | p->counter_mask0_9 = COUNTER_MASK0_9; |
1203 | p->sample_start = cpu_to_be32(ibp->pma_sample_start); | 1205 | p->sample_start = cpu_to_be32(ibp->rvp.pma_sample_start); |
1204 | p->sample_interval = cpu_to_be32(ibp->pma_sample_interval); | 1206 | p->sample_interval = cpu_to_be32(ibp->rvp.pma_sample_interval); |
1205 | p->tag = cpu_to_be16(ibp->pma_tag); | 1207 | p->tag = cpu_to_be16(ibp->rvp.pma_tag); |
1206 | p->counter_select[0] = ibp->pma_counter_select[0]; | 1208 | p->counter_select[0] = ibp->rvp.pma_counter_select[0]; |
1207 | p->counter_select[1] = ibp->pma_counter_select[1]; | 1209 | p->counter_select[1] = ibp->rvp.pma_counter_select[1]; |
1208 | p->counter_select[2] = ibp->pma_counter_select[2]; | 1210 | p->counter_select[2] = ibp->rvp.pma_counter_select[2]; |
1209 | p->counter_select[3] = ibp->pma_counter_select[3]; | 1211 | p->counter_select[3] = ibp->rvp.pma_counter_select[3]; |
1210 | p->counter_select[4] = ibp->pma_counter_select[4]; | 1212 | p->counter_select[4] = ibp->rvp.pma_counter_select[4]; |
1211 | spin_unlock_irqrestore(&ibp->lock, flags); | 1213 | spin_unlock_irqrestore(&ibp->rvp.lock, flags); |
1212 | 1214 | ||
1213 | bail: | 1215 | bail: |
1214 | return reply((struct ib_smp *) pmp); | 1216 | return reply((struct ib_smp *) pmp); |
@@ -1233,7 +1235,7 @@ static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp, | |||
1233 | goto bail; | 1235 | goto bail; |
1234 | } | 1236 | } |
1235 | 1237 | ||
1236 | spin_lock_irqsave(&ibp->lock, flags); | 1238 | spin_lock_irqsave(&ibp->rvp.lock, flags); |
1237 | 1239 | ||
1238 | /* Port Sampling code owns the PS* HW counters */ | 1240 | /* Port Sampling code owns the PS* HW counters */ |
1239 | xmit_flags = ppd->cong_stats.flags; | 1241 | xmit_flags = ppd->cong_stats.flags; |
@@ -1242,18 +1244,18 @@ static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp, | |||
1242 | if (status == IB_PMA_SAMPLE_STATUS_DONE || | 1244 | if (status == IB_PMA_SAMPLE_STATUS_DONE || |
1243 | (status == IB_PMA_SAMPLE_STATUS_RUNNING && | 1245 | (status == IB_PMA_SAMPLE_STATUS_RUNNING && |
1244 | xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) { | 1246 | xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) { |
1245 | ibp->pma_sample_start = be32_to_cpu(p->sample_start); | 1247 | ibp->rvp.pma_sample_start = be32_to_cpu(p->sample_start); |
1246 | ibp->pma_sample_interval = be32_to_cpu(p->sample_interval); | 1248 | ibp->rvp.pma_sample_interval = be32_to_cpu(p->sample_interval); |
1247 | ibp->pma_tag = be16_to_cpu(p->tag); | 1249 | ibp->rvp.pma_tag = be16_to_cpu(p->tag); |
1248 | ibp->pma_counter_select[0] = p->counter_select[0]; | 1250 | ibp->rvp.pma_counter_select[0] = p->counter_select[0]; |
1249 | ibp->pma_counter_select[1] = p->counter_select[1]; | 1251 | ibp->rvp.pma_counter_select[1] = p->counter_select[1]; |
1250 | ibp->pma_counter_select[2] = p->counter_select[2]; | 1252 | ibp->rvp.pma_counter_select[2] = p->counter_select[2]; |
1251 | ibp->pma_counter_select[3] = p->counter_select[3]; | 1253 | ibp->rvp.pma_counter_select[3] = p->counter_select[3]; |
1252 | ibp->pma_counter_select[4] = p->counter_select[4]; | 1254 | ibp->rvp.pma_counter_select[4] = p->counter_select[4]; |
1253 | dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval, | 1255 | dd->f_set_cntr_sample(ppd, ibp->rvp.pma_sample_interval, |
1254 | ibp->pma_sample_start); | 1256 | ibp->rvp.pma_sample_start); |
1255 | } | 1257 | } |
1256 | spin_unlock_irqrestore(&ibp->lock, flags); | 1258 | spin_unlock_irqrestore(&ibp->rvp.lock, flags); |
1257 | 1259 | ||
1258 | ret = pma_get_portsamplescontrol(pmp, ibdev, port); | 1260 | ret = pma_get_portsamplescontrol(pmp, ibdev, port); |
1259 | 1261 | ||
@@ -1357,8 +1359,8 @@ static int pma_get_portsamplesresult(struct ib_pma_mad *pmp, | |||
1357 | int i; | 1359 | int i; |
1358 | 1360 | ||
1359 | memset(pmp->data, 0, sizeof(pmp->data)); | 1361 | memset(pmp->data, 0, sizeof(pmp->data)); |
1360 | spin_lock_irqsave(&ibp->lock, flags); | 1362 | spin_lock_irqsave(&ibp->rvp.lock, flags); |
1361 | p->tag = cpu_to_be16(ibp->pma_tag); | 1363 | p->tag = cpu_to_be16(ibp->rvp.pma_tag); |
1362 | if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) | 1364 | if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) |
1363 | p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; | 1365 | p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; |
1364 | else { | 1366 | else { |
@@ -1373,11 +1375,11 @@ static int pma_get_portsamplesresult(struct ib_pma_mad *pmp, | |||
1373 | ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; | 1375 | ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; |
1374 | } | 1376 | } |
1375 | } | 1377 | } |
1376 | for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) | 1378 | for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++) |
1377 | p->counter[i] = cpu_to_be32( | 1379 | p->counter[i] = cpu_to_be32( |
1378 | get_cache_hw_sample_counters( | 1380 | get_cache_hw_sample_counters( |
1379 | ppd, ibp->pma_counter_select[i])); | 1381 | ppd, ibp->rvp.pma_counter_select[i])); |
1380 | spin_unlock_irqrestore(&ibp->lock, flags); | 1382 | spin_unlock_irqrestore(&ibp->rvp.lock, flags); |
1381 | 1383 | ||
1382 | return reply((struct ib_smp *) pmp); | 1384 | return reply((struct ib_smp *) pmp); |
1383 | } | 1385 | } |
@@ -1397,8 +1399,8 @@ static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp, | |||
1397 | 1399 | ||
1398 | /* Port Sampling code owns the PS* HW counters */ | 1400 | /* Port Sampling code owns the PS* HW counters */ |
1399 | memset(pmp->data, 0, sizeof(pmp->data)); | 1401 | memset(pmp->data, 0, sizeof(pmp->data)); |
1400 | spin_lock_irqsave(&ibp->lock, flags); | 1402 | spin_lock_irqsave(&ibp->rvp.lock, flags); |
1401 | p->tag = cpu_to_be16(ibp->pma_tag); | 1403 | p->tag = cpu_to_be16(ibp->rvp.pma_tag); |
1402 | if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) | 1404 | if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) |
1403 | p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; | 1405 | p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; |
1404 | else { | 1406 | else { |
@@ -1415,11 +1417,11 @@ static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp, | |||
1415 | ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; | 1417 | ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; |
1416 | } | 1418 | } |
1417 | } | 1419 | } |
1418 | for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) | 1420 | for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++) |
1419 | p->counter[i] = cpu_to_be64( | 1421 | p->counter[i] = cpu_to_be64( |
1420 | get_cache_hw_sample_counters( | 1422 | get_cache_hw_sample_counters( |
1421 | ppd, ibp->pma_counter_select[i])); | 1423 | ppd, ibp->rvp.pma_counter_select[i])); |
1422 | spin_unlock_irqrestore(&ibp->lock, flags); | 1424 | spin_unlock_irqrestore(&ibp->rvp.lock, flags); |
1423 | 1425 | ||
1424 | return reply((struct ib_smp *) pmp); | 1426 | return reply((struct ib_smp *) pmp); |
1425 | } | 1427 | } |
@@ -1453,7 +1455,7 @@ static int pma_get_portcounters(struct ib_pma_mad *pmp, | |||
1453 | cntrs.excessive_buffer_overrun_errors -= | 1455 | cntrs.excessive_buffer_overrun_errors -= |
1454 | ibp->z_excessive_buffer_overrun_errors; | 1456 | ibp->z_excessive_buffer_overrun_errors; |
1455 | cntrs.vl15_dropped -= ibp->z_vl15_dropped; | 1457 | cntrs.vl15_dropped -= ibp->z_vl15_dropped; |
1456 | cntrs.vl15_dropped += ibp->n_vl15_dropped; | 1458 | cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped; |
1457 | 1459 | ||
1458 | memset(pmp->data, 0, sizeof(pmp->data)); | 1460 | memset(pmp->data, 0, sizeof(pmp->data)); |
1459 | 1461 | ||
@@ -1546,9 +1548,9 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp, | |||
1546 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; | 1548 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; |
1547 | 1549 | ||
1548 | qib_get_counters(ppd, &cntrs); | 1550 | qib_get_counters(ppd, &cntrs); |
1549 | spin_lock_irqsave(&ppd->ibport_data.lock, flags); | 1551 | spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags); |
1550 | xmit_wait_counter = xmit_wait_get_value_delta(ppd); | 1552 | xmit_wait_counter = xmit_wait_get_value_delta(ppd); |
1551 | spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); | 1553 | spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags); |
1552 | 1554 | ||
1553 | /* Adjust counters for any resets done. */ | 1555 | /* Adjust counters for any resets done. */ |
1554 | cntrs.symbol_error_counter -= ibp->z_symbol_error_counter; | 1556 | cntrs.symbol_error_counter -= ibp->z_symbol_error_counter; |
@@ -1564,7 +1566,7 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp, | |||
1564 | cntrs.excessive_buffer_overrun_errors -= | 1566 | cntrs.excessive_buffer_overrun_errors -= |
1565 | ibp->z_excessive_buffer_overrun_errors; | 1567 | ibp->z_excessive_buffer_overrun_errors; |
1566 | cntrs.vl15_dropped -= ibp->z_vl15_dropped; | 1568 | cntrs.vl15_dropped -= ibp->z_vl15_dropped; |
1567 | cntrs.vl15_dropped += ibp->n_vl15_dropped; | 1569 | cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped; |
1568 | cntrs.port_xmit_data -= ibp->z_port_xmit_data; | 1570 | cntrs.port_xmit_data -= ibp->z_port_xmit_data; |
1569 | cntrs.port_rcv_data -= ibp->z_port_rcv_data; | 1571 | cntrs.port_rcv_data -= ibp->z_port_rcv_data; |
1570 | cntrs.port_xmit_packets -= ibp->z_port_xmit_packets; | 1572 | cntrs.port_xmit_packets -= ibp->z_port_xmit_packets; |
@@ -1743,7 +1745,7 @@ static int pma_set_portcounters(struct ib_pma_mad *pmp, | |||
1743 | cntrs.excessive_buffer_overrun_errors; | 1745 | cntrs.excessive_buffer_overrun_errors; |
1744 | 1746 | ||
1745 | if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) { | 1747 | if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) { |
1746 | ibp->n_vl15_dropped = 0; | 1748 | ibp->rvp.n_vl15_dropped = 0; |
1747 | ibp->z_vl15_dropped = cntrs.vl15_dropped; | 1749 | ibp->z_vl15_dropped = cntrs.vl15_dropped; |
1748 | } | 1750 | } |
1749 | 1751 | ||
@@ -1778,11 +1780,11 @@ static int pma_set_portcounters_cong(struct ib_pma_mad *pmp, | |||
1778 | ret = pma_get_portcounters_cong(pmp, ibdev, port); | 1780 | ret = pma_get_portcounters_cong(pmp, ibdev, port); |
1779 | 1781 | ||
1780 | if (counter_select & IB_PMA_SEL_CONG_XMIT) { | 1782 | if (counter_select & IB_PMA_SEL_CONG_XMIT) { |
1781 | spin_lock_irqsave(&ppd->ibport_data.lock, flags); | 1783 | spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags); |
1782 | ppd->cong_stats.counter = 0; | 1784 | ppd->cong_stats.counter = 0; |
1783 | dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, | 1785 | dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, |
1784 | 0x0); | 1786 | 0x0); |
1785 | spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); | 1787 | spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags); |
1786 | } | 1788 | } |
1787 | if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) { | 1789 | if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) { |
1788 | ibp->z_port_xmit_data = cntrs.port_xmit_data; | 1790 | ibp->z_port_xmit_data = cntrs.port_xmit_data; |
@@ -1806,7 +1808,7 @@ static int pma_set_portcounters_cong(struct ib_pma_mad *pmp, | |||
1806 | cntrs.local_link_integrity_errors; | 1808 | cntrs.local_link_integrity_errors; |
1807 | ibp->z_excessive_buffer_overrun_errors = | 1809 | ibp->z_excessive_buffer_overrun_errors = |
1808 | cntrs.excessive_buffer_overrun_errors; | 1810 | cntrs.excessive_buffer_overrun_errors; |
1809 | ibp->n_vl15_dropped = 0; | 1811 | ibp->rvp.n_vl15_dropped = 0; |
1810 | ibp->z_vl15_dropped = cntrs.vl15_dropped; | 1812 | ibp->z_vl15_dropped = cntrs.vl15_dropped; |
1811 | } | 1813 | } |
1812 | 1814 | ||
@@ -1916,12 +1918,12 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, | |||
1916 | ret = subn_get_vl_arb(smp, ibdev, port); | 1918 | ret = subn_get_vl_arb(smp, ibdev, port); |
1917 | goto bail; | 1919 | goto bail; |
1918 | case IB_SMP_ATTR_SM_INFO: | 1920 | case IB_SMP_ATTR_SM_INFO: |
1919 | if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { | 1921 | if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) { |
1920 | ret = IB_MAD_RESULT_SUCCESS | | 1922 | ret = IB_MAD_RESULT_SUCCESS | |
1921 | IB_MAD_RESULT_CONSUMED; | 1923 | IB_MAD_RESULT_CONSUMED; |
1922 | goto bail; | 1924 | goto bail; |
1923 | } | 1925 | } |
1924 | if (ibp->port_cap_flags & IB_PORT_SM) { | 1926 | if (ibp->rvp.port_cap_flags & IB_PORT_SM) { |
1925 | ret = IB_MAD_RESULT_SUCCESS; | 1927 | ret = IB_MAD_RESULT_SUCCESS; |
1926 | goto bail; | 1928 | goto bail; |
1927 | } | 1929 | } |
@@ -1950,12 +1952,12 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, | |||
1950 | ret = subn_set_vl_arb(smp, ibdev, port); | 1952 | ret = subn_set_vl_arb(smp, ibdev, port); |
1951 | goto bail; | 1953 | goto bail; |
1952 | case IB_SMP_ATTR_SM_INFO: | 1954 | case IB_SMP_ATTR_SM_INFO: |
1953 | if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { | 1955 | if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) { |
1954 | ret = IB_MAD_RESULT_SUCCESS | | 1956 | ret = IB_MAD_RESULT_SUCCESS | |
1955 | IB_MAD_RESULT_CONSUMED; | 1957 | IB_MAD_RESULT_CONSUMED; |
1956 | goto bail; | 1958 | goto bail; |
1957 | } | 1959 | } |
1958 | if (ibp->port_cap_flags & IB_PORT_SM) { | 1960 | if (ibp->rvp.port_cap_flags & IB_PORT_SM) { |
1959 | ret = IB_MAD_RESULT_SUCCESS; | 1961 | ret = IB_MAD_RESULT_SUCCESS; |
1960 | goto bail; | 1962 | goto bail; |
1961 | } | 1963 | } |
@@ -2456,7 +2458,7 @@ static void xmit_wait_timer_func(unsigned long opaque) | |||
2456 | unsigned long flags; | 2458 | unsigned long flags; |
2457 | u8 status; | 2459 | u8 status; |
2458 | 2460 | ||
2459 | spin_lock_irqsave(&ppd->ibport_data.lock, flags); | 2461 | spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags); |
2460 | if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) { | 2462 | if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) { |
2461 | status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); | 2463 | status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); |
2462 | if (status == IB_PMA_SAMPLE_STATUS_DONE) { | 2464 | if (status == IB_PMA_SAMPLE_STATUS_DONE) { |
@@ -2469,7 +2471,7 @@ static void xmit_wait_timer_func(unsigned long opaque) | |||
2469 | ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd); | 2471 | ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd); |
2470 | dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0); | 2472 | dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0); |
2471 | done: | 2473 | done: |
2472 | spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); | 2474 | spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags); |
2473 | mod_timer(&ppd->cong_stats.timer, jiffies + HZ); | 2475 | mod_timer(&ppd->cong_stats.timer, jiffies + HZ); |
2474 | } | 2476 | } |
2475 | 2477 | ||
@@ -2501,7 +2503,7 @@ int qib_create_agents(struct qib_ibdev *dev) | |||
2501 | dd->pport[p].cong_stats.timer.expires = 0; | 2503 | dd->pport[p].cong_stats.timer.expires = 0; |
2502 | add_timer(&dd->pport[p].cong_stats.timer); | 2504 | add_timer(&dd->pport[p].cong_stats.timer); |
2503 | 2505 | ||
2504 | ibp->send_agent = agent; | 2506 | ibp->rvp.send_agent = agent; |
2505 | } | 2507 | } |
2506 | 2508 | ||
2507 | return 0; | 2509 | return 0; |
@@ -2509,9 +2511,9 @@ int qib_create_agents(struct qib_ibdev *dev) | |||
2509 | err: | 2511 | err: |
2510 | for (p = 0; p < dd->num_pports; p++) { | 2512 | for (p = 0; p < dd->num_pports; p++) { |
2511 | ibp = &dd->pport[p].ibport_data; | 2513 | ibp = &dd->pport[p].ibport_data; |
2512 | if (ibp->send_agent) { | 2514 | if (ibp->rvp.send_agent) { |
2513 | agent = ibp->send_agent; | 2515 | agent = ibp->rvp.send_agent; |
2514 | ibp->send_agent = NULL; | 2516 | ibp->rvp.send_agent = NULL; |
2515 | ib_unregister_mad_agent(agent); | 2517 | ib_unregister_mad_agent(agent); |
2516 | } | 2518 | } |
2517 | } | 2519 | } |
@@ -2528,9 +2530,9 @@ void qib_free_agents(struct qib_ibdev *dev) | |||
2528 | 2530 | ||
2529 | for (p = 0; p < dd->num_pports; p++) { | 2531 | for (p = 0; p < dd->num_pports; p++) { |
2530 | ibp = &dd->pport[p].ibport_data; | 2532 | ibp = &dd->pport[p].ibport_data; |
2531 | if (ibp->send_agent) { | 2533 | if (ibp->rvp.send_agent) { |
2532 | agent = ibp->send_agent; | 2534 | agent = ibp->rvp.send_agent; |
2533 | ibp->send_agent = NULL; | 2535 | ibp->rvp.send_agent = NULL; |
2534 | ib_unregister_mad_agent(agent); | 2536 | ib_unregister_mad_agent(agent); |
2535 | } | 2537 | } |
2536 | if (ibp->sm_ah) { | 2538 | if (ibp->sm_ah) { |