aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp
diff options
context:
space:
mode:
authorMarcelo Ricardo Leitner <marcelo.leitner@gmail.com>2016-04-29 13:17:08 -0400
committerDavid S. Miller <davem@davemloft.net>2016-05-01 21:06:10 -0400
commit0970f5b3665933f5f0d069607c78fb10bd918b62 (patch)
treed9dac95d765c7337d9e32cbd0cc2013aba5bb74f /net/sctp
parent70e927b98bb632e0c987818835aacd6787ebe107 (diff)
sctp: signal sk_data_ready earlier on data chunks reception
Dave Miller pointed out that fb586f25300f ("sctp: delay calls to sk_data_ready() as much as possible") may insert latency specially if the receiving application is running on another CPU and that it would be better if we signalled as early as possible. This patch thus basically inverts the logic on fb586f25300f and signals it as early as possible, similar to what we had before. Fixes: fb586f25300f ("sctp: delay calls to sk_data_ready() as much as possible") Reported-by: Dave Miller <davem@davemloft.net> Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp')
-rw-r--r--net/sctp/sm_sideeffect.c7
-rw-r--r--net/sctp/ulpqueue.c25
2 files changed, 19 insertions, 13 deletions
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index e8f0112f9b28..aa3712259368 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1741,10 +1741,9 @@ out:
1741 } else if (local_cork) 1741 } else if (local_cork)
1742 error = sctp_outq_uncork(&asoc->outqueue, gfp); 1742 error = sctp_outq_uncork(&asoc->outqueue, gfp);
1743 1743
1744 if (sp->pending_data_ready) { 1744 if (sp->data_ready_signalled)
1745 sk->sk_data_ready(sk); 1745 sp->data_ready_signalled = 0;
1746 sp->pending_data_ready = 0; 1746
1747 }
1748 return error; 1747 return error;
1749nomem: 1748nomem:
1750 error = -ENOMEM; 1749 error = -ENOMEM;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index ec12a8920e5f..ec166d2bd2d9 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -194,6 +194,7 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
194int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) 194int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
195{ 195{
196 struct sock *sk = ulpq->asoc->base.sk; 196 struct sock *sk = ulpq->asoc->base.sk;
197 struct sctp_sock *sp = sctp_sk(sk);
197 struct sk_buff_head *queue, *skb_list; 198 struct sk_buff_head *queue, *skb_list;
198 struct sk_buff *skb = sctp_event2skb(event); 199 struct sk_buff *skb = sctp_event2skb(event);
199 int clear_pd = 0; 200 int clear_pd = 0;
@@ -211,7 +212,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
211 sk_incoming_cpu_update(sk); 212 sk_incoming_cpu_update(sk);
212 } 213 }
213 /* Check if the user wishes to receive this event. */ 214 /* Check if the user wishes to receive this event. */
214 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe)) 215 if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
215 goto out_free; 216 goto out_free;
216 217
217 /* If we are in partial delivery mode, post to the lobby until 218 /* If we are in partial delivery mode, post to the lobby until
@@ -219,7 +220,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
219 * the association the cause of the partial delivery. 220 * the association the cause of the partial delivery.
220 */ 221 */
221 222
222 if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) { 223 if (atomic_read(&sp->pd_mode) == 0) {
223 queue = &sk->sk_receive_queue; 224 queue = &sk->sk_receive_queue;
224 } else { 225 } else {
225 if (ulpq->pd_mode) { 226 if (ulpq->pd_mode) {
@@ -231,7 +232,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
231 if ((event->msg_flags & MSG_NOTIFICATION) || 232 if ((event->msg_flags & MSG_NOTIFICATION) ||
232 (SCTP_DATA_NOT_FRAG == 233 (SCTP_DATA_NOT_FRAG ==
233 (event->msg_flags & SCTP_DATA_FRAG_MASK))) 234 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
234 queue = &sctp_sk(sk)->pd_lobby; 235 queue = &sp->pd_lobby;
235 else { 236 else {
236 clear_pd = event->msg_flags & MSG_EOR; 237 clear_pd = event->msg_flags & MSG_EOR;
237 queue = &sk->sk_receive_queue; 238 queue = &sk->sk_receive_queue;
@@ -242,10 +243,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
242 * can queue this to the receive queue instead 243 * can queue this to the receive queue instead
243 * of the lobby. 244 * of the lobby.
244 */ 245 */
245 if (sctp_sk(sk)->frag_interleave) 246 if (sp->frag_interleave)
246 queue = &sk->sk_receive_queue; 247 queue = &sk->sk_receive_queue;
247 else 248 else
248 queue = &sctp_sk(sk)->pd_lobby; 249 queue = &sp->pd_lobby;
249 } 250 }
250 } 251 }
251 252
@@ -264,8 +265,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
264 if (clear_pd) 265 if (clear_pd)
265 sctp_ulpq_clear_pd(ulpq); 266 sctp_ulpq_clear_pd(ulpq);
266 267
267 if (queue == &sk->sk_receive_queue) 268 if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
268 sctp_sk(sk)->pending_data_ready = 1; 269 sp->data_ready_signalled = 1;
270 sk->sk_data_ready(sk);
271 }
269 return 1; 272 return 1;
270 273
271out_free: 274out_free:
@@ -1126,11 +1129,13 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1126{ 1129{
1127 struct sctp_ulpevent *ev = NULL; 1130 struct sctp_ulpevent *ev = NULL;
1128 struct sock *sk; 1131 struct sock *sk;
1132 struct sctp_sock *sp;
1129 1133
1130 if (!ulpq->pd_mode) 1134 if (!ulpq->pd_mode)
1131 return; 1135 return;
1132 1136
1133 sk = ulpq->asoc->base.sk; 1137 sk = ulpq->asoc->base.sk;
1138 sp = sctp_sk(sk);
1134 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT, 1139 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1135 &sctp_sk(sk)->subscribe)) 1140 &sctp_sk(sk)->subscribe))
1136 ev = sctp_ulpevent_make_pdapi(ulpq->asoc, 1141 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
@@ -1140,6 +1145,8 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1140 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev)); 1145 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1141 1146
1142 /* If there is data waiting, send it up the socket now. */ 1147 /* If there is data waiting, send it up the socket now. */
1143 if (sctp_ulpq_clear_pd(ulpq) || ev) 1148 if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
1144 sctp_sk(sk)->pending_data_ready = 1; 1149 sp->data_ready_signalled = 1;
1150 sk->sk_data_ready(sk);
1151 }
1145} 1152}