aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorJianxin Xiong <jianxin.xiong@intel.com>2016-05-19 08:21:57 -0400
committerDoug Ledford <dledford@redhat.com>2016-05-26 11:23:11 -0400
commitb583faf4dc6eaa64895c37b81983e75a8c3c1e4e (patch)
tree3b94997fa2b5f7c5c91e8351b9d917b9d28e02ed /drivers/staging
parentf70f5f6af36bce29fe2c4bc733a223b5746eb65f (diff)
IB/hfi1: Fix bug that blocks process on exit after port bounce
During the processing of a user SDMA request, if there was an error before the request counter was increased, the state of the packet queue could be updated incorrectly, causing the counter to underflow. As the result, the process could get stuck later since the counter could never get back to 0. This patch adds a condition to guard the packet queue update so that the counter is only decreased if it has been increased before the error happens. Reviewed-by: Mitko Haralanov <mitko.haralanov@intel.com> Signed-off-by: Jianxin Xiong <jianxin.xiong@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c
index aed2878c97f1..29f4795f866c 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ b/drivers/staging/rdma/hfi1/user_sdma.c
@@ -510,6 +510,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
510 struct sdma_req_info info; 510 struct sdma_req_info info;
511 struct user_sdma_request *req; 511 struct user_sdma_request *req;
512 u8 opcode, sc, vl; 512 u8 opcode, sc, vl;
513 int req_queued = 0;
513 514
514 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) { 515 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
515 hfi1_cdbg( 516 hfi1_cdbg(
@@ -706,6 +707,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
706 707
707 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); 708 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
708 atomic_inc(&pq->n_reqs); 709 atomic_inc(&pq->n_reqs);
710 req_queued = 1;
709 /* Send the first N packets in the request to buy us some time */ 711 /* Send the first N packets in the request to buy us some time */
710 ret = user_sdma_send_pkts(req, pcount); 712 ret = user_sdma_send_pkts(req, pcount);
711 if (unlikely(ret < 0 && ret != -EBUSY)) { 713 if (unlikely(ret < 0 && ret != -EBUSY)) {
@@ -750,7 +752,8 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
750 return 0; 752 return 0;
751free_req: 753free_req:
752 user_sdma_free_request(req, true); 754 user_sdma_free_request(req, true);
753 pq_update(pq); 755 if (req_queued)
756 pq_update(pq);
754 set_comp_state(pq, cq, info.comp_idx, ERROR, req->status); 757 set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
755 return ret; 758 return ret;
756} 759}