diff options
author | Arnd Bergmann <arnd@arndb.de> | 2016-09-19 07:57:26 -0400 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-12-12 16:31:45 -0500 |
commit | a0fa72683e78979ef1123d679b1c40ae28bd9096 (patch) | |
tree | 5222fc413b272f38aa61ca3c7811c04f05444087 /drivers/infiniband/sw | |
parent | e37a79e5d4cac3831fac3d4afbf2461f56b4b7bd (diff) |
IB/rxe: avoid putting a large struct rxe_qp on stack
A race condition fix added an rxe_qp structure to the stack in order
to be able to perform rollback in rxe_requester(), but the structure
is large enough to trigger the warning for possible stack overflow:
drivers/infiniband/sw/rxe/rxe_req.c: In function 'rxe_requester':
drivers/infiniband/sw/rxe/rxe_req.c:757:1: error: the frame size of 2064 bytes is larger than 1024 bytes [-Werror=frame-larger-than=]
This changes the rollback function to only save the psn inside
the qp, which is the only field we access in the rollback_qp
anyway.
Fixes: 3050b9985024 ("IB/rxe: Fix race condition between requester and completer")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/sw')
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_req.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 832846b73ea0..205222909e53 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c | |||
@@ -548,23 +548,23 @@ static void update_wqe_psn(struct rxe_qp *qp, | |||
548 | static void save_state(struct rxe_send_wqe *wqe, | 548 | static void save_state(struct rxe_send_wqe *wqe, |
549 | struct rxe_qp *qp, | 549 | struct rxe_qp *qp, |
550 | struct rxe_send_wqe *rollback_wqe, | 550 | struct rxe_send_wqe *rollback_wqe, |
551 | struct rxe_qp *rollback_qp) | 551 | u32 *rollback_psn) |
552 | { | 552 | { |
553 | rollback_wqe->state = wqe->state; | 553 | rollback_wqe->state = wqe->state; |
554 | rollback_wqe->first_psn = wqe->first_psn; | 554 | rollback_wqe->first_psn = wqe->first_psn; |
555 | rollback_wqe->last_psn = wqe->last_psn; | 555 | rollback_wqe->last_psn = wqe->last_psn; |
556 | rollback_qp->req.psn = qp->req.psn; | 556 | *rollback_psn = qp->req.psn; |
557 | } | 557 | } |
558 | 558 | ||
559 | static void rollback_state(struct rxe_send_wqe *wqe, | 559 | static void rollback_state(struct rxe_send_wqe *wqe, |
560 | struct rxe_qp *qp, | 560 | struct rxe_qp *qp, |
561 | struct rxe_send_wqe *rollback_wqe, | 561 | struct rxe_send_wqe *rollback_wqe, |
562 | struct rxe_qp *rollback_qp) | 562 | u32 rollback_psn) |
563 | { | 563 | { |
564 | wqe->state = rollback_wqe->state; | 564 | wqe->state = rollback_wqe->state; |
565 | wqe->first_psn = rollback_wqe->first_psn; | 565 | wqe->first_psn = rollback_wqe->first_psn; |
566 | wqe->last_psn = rollback_wqe->last_psn; | 566 | wqe->last_psn = rollback_wqe->last_psn; |
567 | qp->req.psn = rollback_qp->req.psn; | 567 | qp->req.psn = rollback_psn; |
568 | } | 568 | } |
569 | 569 | ||
570 | static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, | 570 | static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, |
@@ -593,8 +593,8 @@ int rxe_requester(void *arg) | |||
593 | int mtu; | 593 | int mtu; |
594 | int opcode; | 594 | int opcode; |
595 | int ret; | 595 | int ret; |
596 | struct rxe_qp rollback_qp; | ||
597 | struct rxe_send_wqe rollback_wqe; | 596 | struct rxe_send_wqe rollback_wqe; |
597 | u32 rollback_psn; | ||
598 | 598 | ||
599 | next_wqe: | 599 | next_wqe: |
600 | if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR)) | 600 | if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR)) |
@@ -718,7 +718,7 @@ next_wqe: | |||
718 | * rxe_xmit_packet(). | 718 | * rxe_xmit_packet(). |
719 | * Otherwise, completer might initiate an unjustified retry flow. | 719 | * Otherwise, completer might initiate an unjustified retry flow. |
720 | */ | 720 | */ |
721 | save_state(wqe, qp, &rollback_wqe, &rollback_qp); | 721 | save_state(wqe, qp, &rollback_wqe, &rollback_psn); |
722 | update_wqe_state(qp, wqe, &pkt); | 722 | update_wqe_state(qp, wqe, &pkt); |
723 | update_wqe_psn(qp, wqe, &pkt, payload); | 723 | update_wqe_psn(qp, wqe, &pkt, payload); |
724 | ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb); | 724 | ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb); |
@@ -726,7 +726,7 @@ next_wqe: | |||
726 | qp->need_req_skb = 1; | 726 | qp->need_req_skb = 1; |
727 | kfree_skb(skb); | 727 | kfree_skb(skb); |
728 | 728 | ||
729 | rollback_state(wqe, qp, &rollback_wqe, &rollback_qp); | 729 | rollback_state(wqe, qp, &rollback_wqe, rollback_psn); |
730 | 730 | ||
731 | if (ret == -EAGAIN) { | 731 | if (ret == -EAGAIN) { |
732 | rxe_run_task(&qp->req.task, 1); | 732 | rxe_run_task(&qp->req.task, 1); |