diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2015-10-24 17:27:27 -0400 |
---|---|---|
committer | Anna Schumaker <Anna.Schumaker@Netapp.com> | 2015-11-02 13:45:15 -0500 |
commit | a5b027e1897c811401862877d0ba4ca26fabc4da (patch) | |
tree | 547a8be04404af4a261c04306f44897b988cb88d | |
parent | 2da9ab3008f359857eb594b0b4b0fee62f2a73c2 (diff) |
xprtrdma: Saving IRQs no longer needed for rb_lock
Now that RPC replies are processed in a workqueue, there's no need
to disable IRQs when managing send and receive buffers. This saves
noticeable overhead per RPC.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Tested-By: Devesh Sharma <devesh.sharma@avagotech.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
-rw-r--r-- | net/sunrpc/xprtrdma/verbs.c | 24 |
1 files changed, 10 insertions, 14 deletions
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 3dd5a7c951c5..baa0523476a4 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -1063,24 +1063,23 @@ struct rpcrdma_req * | |||
1063 | rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) | 1063 | rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) |
1064 | { | 1064 | { |
1065 | struct rpcrdma_req *req; | 1065 | struct rpcrdma_req *req; |
1066 | unsigned long flags; | ||
1067 | 1066 | ||
1068 | spin_lock_irqsave(&buffers->rb_lock, flags); | 1067 | spin_lock(&buffers->rb_lock); |
1069 | if (list_empty(&buffers->rb_send_bufs)) | 1068 | if (list_empty(&buffers->rb_send_bufs)) |
1070 | goto out_reqbuf; | 1069 | goto out_reqbuf; |
1071 | req = rpcrdma_buffer_get_req_locked(buffers); | 1070 | req = rpcrdma_buffer_get_req_locked(buffers); |
1072 | if (list_empty(&buffers->rb_recv_bufs)) | 1071 | if (list_empty(&buffers->rb_recv_bufs)) |
1073 | goto out_repbuf; | 1072 | goto out_repbuf; |
1074 | req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers); | 1073 | req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers); |
1075 | spin_unlock_irqrestore(&buffers->rb_lock, flags); | 1074 | spin_unlock(&buffers->rb_lock); |
1076 | return req; | 1075 | return req; |
1077 | 1076 | ||
1078 | out_reqbuf: | 1077 | out_reqbuf: |
1079 | spin_unlock_irqrestore(&buffers->rb_lock, flags); | 1078 | spin_unlock(&buffers->rb_lock); |
1080 | pr_warn("RPC: %s: out of request buffers\n", __func__); | 1079 | pr_warn("RPC: %s: out of request buffers\n", __func__); |
1081 | return NULL; | 1080 | return NULL; |
1082 | out_repbuf: | 1081 | out_repbuf: |
1083 | spin_unlock_irqrestore(&buffers->rb_lock, flags); | 1082 | spin_unlock(&buffers->rb_lock); |
1084 | pr_warn("RPC: %s: out of reply buffers\n", __func__); | 1083 | pr_warn("RPC: %s: out of reply buffers\n", __func__); |
1085 | req->rl_reply = NULL; | 1084 | req->rl_reply = NULL; |
1086 | return req; | 1085 | return req; |
@@ -1095,16 +1094,15 @@ rpcrdma_buffer_put(struct rpcrdma_req *req) | |||
1095 | { | 1094 | { |
1096 | struct rpcrdma_buffer *buffers = req->rl_buffer; | 1095 | struct rpcrdma_buffer *buffers = req->rl_buffer; |
1097 | struct rpcrdma_rep *rep = req->rl_reply; | 1096 | struct rpcrdma_rep *rep = req->rl_reply; |
1098 | unsigned long flags; | ||
1099 | 1097 | ||
1100 | req->rl_niovs = 0; | 1098 | req->rl_niovs = 0; |
1101 | req->rl_reply = NULL; | 1099 | req->rl_reply = NULL; |
1102 | 1100 | ||
1103 | spin_lock_irqsave(&buffers->rb_lock, flags); | 1101 | spin_lock(&buffers->rb_lock); |
1104 | list_add_tail(&req->rl_free, &buffers->rb_send_bufs); | 1102 | list_add_tail(&req->rl_free, &buffers->rb_send_bufs); |
1105 | if (rep) | 1103 | if (rep) |
1106 | list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs); | 1104 | list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs); |
1107 | spin_unlock_irqrestore(&buffers->rb_lock, flags); | 1105 | spin_unlock(&buffers->rb_lock); |
1108 | } | 1106 | } |
1109 | 1107 | ||
1110 | /* | 1108 | /* |
@@ -1115,12 +1113,11 @@ void | |||
1115 | rpcrdma_recv_buffer_get(struct rpcrdma_req *req) | 1113 | rpcrdma_recv_buffer_get(struct rpcrdma_req *req) |
1116 | { | 1114 | { |
1117 | struct rpcrdma_buffer *buffers = req->rl_buffer; | 1115 | struct rpcrdma_buffer *buffers = req->rl_buffer; |
1118 | unsigned long flags; | ||
1119 | 1116 | ||
1120 | spin_lock_irqsave(&buffers->rb_lock, flags); | 1117 | spin_lock(&buffers->rb_lock); |
1121 | if (!list_empty(&buffers->rb_recv_bufs)) | 1118 | if (!list_empty(&buffers->rb_recv_bufs)) |
1122 | req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers); | 1119 | req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers); |
1123 | spin_unlock_irqrestore(&buffers->rb_lock, flags); | 1120 | spin_unlock(&buffers->rb_lock); |
1124 | } | 1121 | } |
1125 | 1122 | ||
1126 | /* | 1123 | /* |
@@ -1131,11 +1128,10 @@ void | |||
1131 | rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) | 1128 | rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) |
1132 | { | 1129 | { |
1133 | struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf; | 1130 | struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf; |
1134 | unsigned long flags; | ||
1135 | 1131 | ||
1136 | spin_lock_irqsave(&buffers->rb_lock, flags); | 1132 | spin_lock(&buffers->rb_lock); |
1137 | list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs); | 1133 | list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs); |
1138 | spin_unlock_irqrestore(&buffers->rb_lock, flags); | 1134 | spin_unlock(&buffers->rb_lock); |
1139 | } | 1135 | } |
1140 | 1136 | ||
1141 | /* | 1137 | /* |