aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/sctp/sm_sideeffect.c42
1 files changed, 23 insertions, 19 deletions
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index f554b9a96e07..6098d4c42fa9 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -244,12 +244,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
244 int error; 244 int error;
245 struct sctp_transport *transport = (struct sctp_transport *) peer; 245 struct sctp_transport *transport = (struct sctp_transport *) peer;
246 struct sctp_association *asoc = transport->asoc; 246 struct sctp_association *asoc = transport->asoc;
247 struct net *net = sock_net(asoc->base.sk); 247 struct sock *sk = asoc->base.sk;
248 struct net *net = sock_net(sk);
248 249
249 /* Check whether a task is in the sock. */ 250 /* Check whether a task is in the sock. */
250 251
251 bh_lock_sock(asoc->base.sk); 252 bh_lock_sock(sk);
252 if (sock_owned_by_user(asoc->base.sk)) { 253 if (sock_owned_by_user(sk)) {
253 pr_debug("%s: sock is busy\n", __func__); 254 pr_debug("%s: sock is busy\n", __func__);
254 255
255 /* Try again later. */ 256 /* Try again later. */
@@ -272,10 +273,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
272 transport, GFP_ATOMIC); 273 transport, GFP_ATOMIC);
273 274
274 if (error) 275 if (error)
275 asoc->base.sk->sk_err = -error; 276 sk->sk_err = -error;
276 277
277out_unlock: 278out_unlock:
278 bh_unlock_sock(asoc->base.sk); 279 bh_unlock_sock(sk);
279 sctp_transport_put(transport); 280 sctp_transport_put(transport);
280} 281}
281 282
@@ -285,11 +286,12 @@ out_unlock:
285static void sctp_generate_timeout_event(struct sctp_association *asoc, 286static void sctp_generate_timeout_event(struct sctp_association *asoc,
286 sctp_event_timeout_t timeout_type) 287 sctp_event_timeout_t timeout_type)
287{ 288{
288 struct net *net = sock_net(asoc->base.sk); 289 struct sock *sk = asoc->base.sk;
290 struct net *net = sock_net(sk);
289 int error = 0; 291 int error = 0;
290 292
291 bh_lock_sock(asoc->base.sk); 293 bh_lock_sock(sk);
292 if (sock_owned_by_user(asoc->base.sk)) { 294 if (sock_owned_by_user(sk)) {
293 pr_debug("%s: sock is busy: timer %d\n", __func__, 295 pr_debug("%s: sock is busy: timer %d\n", __func__,
294 timeout_type); 296 timeout_type);
295 297
@@ -312,10 +314,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
312 (void *)timeout_type, GFP_ATOMIC); 314 (void *)timeout_type, GFP_ATOMIC);
313 315
314 if (error) 316 if (error)
315 asoc->base.sk->sk_err = -error; 317 sk->sk_err = -error;
316 318
317out_unlock: 319out_unlock:
318 bh_unlock_sock(asoc->base.sk); 320 bh_unlock_sock(sk);
319 sctp_association_put(asoc); 321 sctp_association_put(asoc);
320} 322}
321 323
@@ -365,10 +367,11 @@ void sctp_generate_heartbeat_event(unsigned long data)
365 int error = 0; 367 int error = 0;
366 struct sctp_transport *transport = (struct sctp_transport *) data; 368 struct sctp_transport *transport = (struct sctp_transport *) data;
367 struct sctp_association *asoc = transport->asoc; 369 struct sctp_association *asoc = transport->asoc;
368 struct net *net = sock_net(asoc->base.sk); 370 struct sock *sk = asoc->base.sk;
371 struct net *net = sock_net(sk);
369 372
370 bh_lock_sock(asoc->base.sk); 373 bh_lock_sock(sk);
371 if (sock_owned_by_user(asoc->base.sk)) { 374 if (sock_owned_by_user(sk)) {
372 pr_debug("%s: sock is busy\n", __func__); 375 pr_debug("%s: sock is busy\n", __func__);
373 376
374 /* Try again later. */ 377 /* Try again later. */
@@ -389,10 +392,10 @@ void sctp_generate_heartbeat_event(unsigned long data)
389 transport, GFP_ATOMIC); 392 transport, GFP_ATOMIC);
390 393
391 if (error) 394 if (error)
392 asoc->base.sk->sk_err = -error; 395 sk->sk_err = -error;
393 396
394out_unlock: 397out_unlock:
395 bh_unlock_sock(asoc->base.sk); 398 bh_unlock_sock(sk);
396 sctp_transport_put(transport); 399 sctp_transport_put(transport);
397} 400}
398 401
@@ -403,10 +406,11 @@ void sctp_generate_proto_unreach_event(unsigned long data)
403{ 406{
404 struct sctp_transport *transport = (struct sctp_transport *) data; 407 struct sctp_transport *transport = (struct sctp_transport *) data;
405 struct sctp_association *asoc = transport->asoc; 408 struct sctp_association *asoc = transport->asoc;
406 struct net *net = sock_net(asoc->base.sk); 409 struct sock *sk = asoc->base.sk;
410 struct net *net = sock_net(sk);
407 411
408 bh_lock_sock(asoc->base.sk); 412 bh_lock_sock(sk);
409 if (sock_owned_by_user(asoc->base.sk)) { 413 if (sock_owned_by_user(sk)) {
410 pr_debug("%s: sock is busy\n", __func__); 414 pr_debug("%s: sock is busy\n", __func__);
411 415
412 /* Try again later. */ 416 /* Try again later. */
@@ -427,7 +431,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
427 asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); 431 asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
428 432
429out_unlock: 433out_unlock:
430 bh_unlock_sock(asoc->base.sk); 434 bh_unlock_sock(sk);
431 sctp_association_put(asoc); 435 sctp_association_put(asoc);
432} 436}
433 437