diff options
| author | David S. Miller <davem@davemloft.net> | 2017-02-27 09:22:10 -0500 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2017-02-27 09:22:10 -0500 |
| commit | 2f44f75257d57f0d5668dba3a6ada0f4872132c9 (patch) | |
| tree | 696a4791873be6b94a1b5e91ea3ddb366a200368 | |
| parent | 4ca257eed6adf58d325c39c320a06dbcd34c43db (diff) | |
| parent | 6f437d431930ff86e4a971d29321951faadb97c7 (diff) | |
Merge branch 'qed-fixes'
Yuval Mintz says:
====================
qed: Bug fixes
Patch #1 addresses a day-one race which is dependent on the number of Vfs
[I.e., more child VFs from a single PF make it more probable].
Patch #2 corrects a race that got introduced in the last set of fixes for
qed, one that would happen each time PF transitions to UP state.
I've built & tested those against current net-next.
Please consider applying the series there.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed.h | 4 | ||||
| -rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_dev.c | 6 | ||||
| -rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_mcp.c | 3 | ||||
| -rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_sriov.c | 39 | ||||
| -rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_sriov.h | 4 |
5 files changed, 34 insertions, 22 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 61a9cd5be497..00c17fa6545b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h | |||
| @@ -688,7 +688,9 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, | |||
| 688 | #define OOO_LB_TC 9 | 688 | #define OOO_LB_TC 9 |
| 689 | 689 | ||
| 690 | int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); | 690 | int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); |
| 691 | void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate); | 691 | void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, |
| 692 | struct qed_ptt *p_ptt, | ||
| 693 | u32 min_pf_rate); | ||
| 692 | 694 | ||
| 693 | void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); | 695 | void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); |
| 694 | #define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) | 696 | #define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index d6c5a8165b5f..e2a081ceaf52 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
| @@ -3198,7 +3198,8 @@ int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate) | |||
| 3198 | } | 3198 | } |
| 3199 | 3199 | ||
| 3200 | /* API to configure WFQ from mcp link change */ | 3200 | /* API to configure WFQ from mcp link change */ |
| 3201 | void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) | 3201 | void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, |
| 3202 | struct qed_ptt *p_ptt, u32 min_pf_rate) | ||
| 3202 | { | 3203 | { |
| 3203 | int i; | 3204 | int i; |
| 3204 | 3205 | ||
| @@ -3212,8 +3213,7 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) | |||
| 3212 | for_each_hwfn(cdev, i) { | 3213 | for_each_hwfn(cdev, i) { |
| 3213 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; | 3214 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
| 3214 | 3215 | ||
| 3215 | __qed_configure_vp_wfq_on_link_change(p_hwfn, | 3216 | __qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, |
| 3216 | p_hwfn->p_dpc_ptt, | ||
| 3217 | min_pf_rate); | 3217 | min_pf_rate); |
| 3218 | } | 3218 | } |
| 3219 | } | 3219 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 314022df3469..87fde205149f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c | |||
| @@ -679,7 +679,8 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, | |||
| 679 | 679 | ||
| 680 | /* Min bandwidth configuration */ | 680 | /* Min bandwidth configuration */ |
| 681 | __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw); | 681 | __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw); |
| 682 | qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate); | 682 | qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt, |
| 683 | p_link->min_pf_rate); | ||
| 683 | 684 | ||
| 684 | p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); | 685 | p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); |
| 685 | p_link->an_complete = !!(status & | 686 | p_link->an_complete = !!(status & |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 29ed785f1dc2..253c2bbe1e4e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c | |||
| @@ -3014,8 +3014,7 @@ cleanup: | |||
| 3014 | ack_vfs[vfid / 32] |= BIT((vfid % 32)); | 3014 | ack_vfs[vfid / 32] |= BIT((vfid % 32)); |
| 3015 | p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= | 3015 | p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= |
| 3016 | ~(1ULL << (rel_vf_id % 64)); | 3016 | ~(1ULL << (rel_vf_id % 64)); |
| 3017 | p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &= | 3017 | p_vf->vf_mbx.b_pending_msg = false; |
| 3018 | ~(1ULL << (rel_vf_id % 64)); | ||
| 3019 | } | 3018 | } |
| 3020 | 3019 | ||
| 3021 | return rc; | 3020 | return rc; |
| @@ -3128,11 +3127,20 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, | |||
| 3128 | mbx = &p_vf->vf_mbx; | 3127 | mbx = &p_vf->vf_mbx; |
| 3129 | 3128 | ||
| 3130 | /* qed_iov_process_mbx_request */ | 3129 | /* qed_iov_process_mbx_request */ |
| 3131 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | 3130 | if (!mbx->b_pending_msg) { |
| 3132 | "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id); | 3131 | DP_NOTICE(p_hwfn, |
| 3132 | "VF[%02x]: Trying to process mailbox message when none is pending\n", | ||
| 3133 | p_vf->abs_vf_id); | ||
| 3134 | return; | ||
| 3135 | } | ||
| 3136 | mbx->b_pending_msg = false; | ||
| 3133 | 3137 | ||
| 3134 | mbx->first_tlv = mbx->req_virt->first_tlv; | 3138 | mbx->first_tlv = mbx->req_virt->first_tlv; |
| 3135 | 3139 | ||
| 3140 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | ||
| 3141 | "VF[%02x]: Processing mailbox message [type %04x]\n", | ||
| 3142 | p_vf->abs_vf_id, mbx->first_tlv.tl.type); | ||
| 3143 | |||
| 3136 | /* check if tlv type is known */ | 3144 | /* check if tlv type is known */ |
| 3137 | if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) && | 3145 | if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) && |
| 3138 | !p_vf->b_malicious) { | 3146 | !p_vf->b_malicious) { |
| @@ -3219,20 +3227,19 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, | |||
| 3219 | } | 3227 | } |
| 3220 | } | 3228 | } |
| 3221 | 3229 | ||
| 3222 | static void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid) | 3230 | void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) |
| 3223 | { | 3231 | { |
| 3224 | u64 add_bit = 1ULL << (vfid % 64); | 3232 | int i; |
| 3225 | 3233 | ||
| 3226 | p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit; | 3234 | memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); |
| 3227 | } | ||
| 3228 | 3235 | ||
| 3229 | static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn, | 3236 | qed_for_each_vf(p_hwfn, i) { |
| 3230 | u64 *events) | 3237 | struct qed_vf_info *p_vf; |
| 3231 | { | ||
| 3232 | u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events; | ||
| 3233 | 3238 | ||
| 3234 | memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH); | 3239 | p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; |
| 3235 | memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); | 3240 | if (p_vf->vf_mbx.b_pending_msg) |
| 3241 | events[i / 64] |= 1ULL << (i % 64); | ||
| 3242 | } | ||
| 3236 | } | 3243 | } |
| 3237 | 3244 | ||
| 3238 | static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, | 3245 | static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, |
| @@ -3266,7 +3273,7 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, | |||
| 3266 | p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; | 3273 | p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; |
| 3267 | 3274 | ||
| 3268 | /* Mark the event and schedule the workqueue */ | 3275 | /* Mark the event and schedule the workqueue */ |
| 3269 | qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id); | 3276 | p_vf->vf_mbx.b_pending_msg = true; |
| 3270 | qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG); | 3277 | qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG); |
| 3271 | 3278 | ||
| 3272 | return 0; | 3279 | return 0; |
| @@ -4030,7 +4037,7 @@ static void qed_handle_vf_msg(struct qed_hwfn *hwfn) | |||
| 4030 | return; | 4037 | return; |
| 4031 | } | 4038 | } |
| 4032 | 4039 | ||
| 4033 | qed_iov_pf_get_and_clear_pending_events(hwfn, events); | 4040 | qed_iov_pf_get_pending_events(hwfn, events); |
| 4034 | 4041 | ||
| 4035 | DP_VERBOSE(hwfn, QED_MSG_IOV, | 4042 | DP_VERBOSE(hwfn, QED_MSG_IOV, |
| 4036 | "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n", | 4043 | "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n", |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index fc08cc2da6a7..a89605821522 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h | |||
| @@ -140,6 +140,9 @@ struct qed_iov_vf_mbx { | |||
| 140 | /* Address in VF where a pending message is located */ | 140 | /* Address in VF where a pending message is located */ |
| 141 | dma_addr_t pending_req; | 141 | dma_addr_t pending_req; |
| 142 | 142 | ||
| 143 | /* Message from VF awaits handling */ | ||
| 144 | bool b_pending_msg; | ||
| 145 | |||
| 143 | u8 *offset; | 146 | u8 *offset; |
| 144 | 147 | ||
| 145 | /* saved VF request header */ | 148 | /* saved VF request header */ |
| @@ -232,7 +235,6 @@ struct qed_vf_info { | |||
| 232 | */ | 235 | */ |
| 233 | struct qed_pf_iov { | 236 | struct qed_pf_iov { |
| 234 | struct qed_vf_info vfs_array[MAX_NUM_VFS]; | 237 | struct qed_vf_info vfs_array[MAX_NUM_VFS]; |
| 235 | u64 pending_events[QED_VF_ARRAY_LENGTH]; | ||
| 236 | u64 pending_flr[QED_VF_ARRAY_LENGTH]; | 238 | u64 pending_flr[QED_VF_ARRAY_LENGTH]; |
| 237 | 239 | ||
| 238 | /* Allocate message address continuosuly and split to each VF */ | 240 | /* Allocate message address continuosuly and split to each VF */ |
