aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw
diff options
context:
space:
mode:
authorParav Pandit <pandit.parav@gmail.com>2016-09-28 16:24:12 -0400
committerDoug Ledford <dledford@redhat.com>2016-10-06 13:50:04 -0400
commit063af59597492d31c44e549d6c773b6485f7dc53 (patch)
treeda38a219f014a9fe8966faad735e53b966e8e0e0 /drivers/infiniband/sw
parent61347fa6087884305ea4a3a04501839fdb68dc76 (diff)
IB/rxe: Avoid scheduling tasklet for userspace QP
This patch avoids scheduing tasklet for WQE and protocol processing for user space QP. It performs the task in calling process context. To improve code readability kernel specific post_send handling moved to post_send_kernel() function. Signed-off-by: Parav Pandit <pandit.parav@gmail.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/sw')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c38
1 files changed, 25 insertions, 13 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 4552be960c6a..a5af6917fc1c 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -801,26 +801,15 @@ err1:
801 return err; 801 return err;
802} 802}
803 803
804static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 804static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
805 struct ib_send_wr **bad_wr) 805 struct ib_send_wr **bad_wr)
806{ 806{
807 int err = 0; 807 int err = 0;
808 struct rxe_qp *qp = to_rqp(ibqp);
809 unsigned int mask; 808 unsigned int mask;
810 unsigned int length = 0; 809 unsigned int length = 0;
811 int i; 810 int i;
812 int must_sched; 811 int must_sched;
813 812
814 if (unlikely(!qp->valid)) {
815 *bad_wr = wr;
816 return -EINVAL;
817 }
818
819 if (unlikely(qp->req.state < QP_STATE_READY)) {
820 *bad_wr = wr;
821 return -EINVAL;
822 }
823
824 while (wr) { 813 while (wr) {
825 mask = wr_opcode_mask(wr->opcode, qp); 814 mask = wr_opcode_mask(wr->opcode, qp);
826 if (unlikely(!mask)) { 815 if (unlikely(!mask)) {
@@ -861,6 +850,29 @@ static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
861 return err; 850 return err;
862} 851}
863 852
853static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
854 struct ib_send_wr **bad_wr)
855{
856 struct rxe_qp *qp = to_rqp(ibqp);
857
858 if (unlikely(!qp->valid)) {
859 *bad_wr = wr;
860 return -EINVAL;
861 }
862
863 if (unlikely(qp->req.state < QP_STATE_READY)) {
864 *bad_wr = wr;
865 return -EINVAL;
866 }
867
868 if (qp->is_user) {
869 /* Utilize process context to do protocol processing */
870 rxe_run_task(&qp->req.task, 0);
871 return 0;
872 } else
873 return rxe_post_send_kernel(qp, wr, bad_wr);
874}
875
864static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 876static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
865 struct ib_recv_wr **bad_wr) 877 struct ib_recv_wr **bad_wr)
866{ 878{