diff options
-rw-r--r-- | net/llc/af_llc.c | 147 |
1 files changed, 67 insertions, 80 deletions
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index aed61e6376ed..436c8db67f55 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -426,12 +426,30 @@ static int llc_ui_connect(struct socket *sock, struct sockaddr *uaddr, | |||
426 | sk->sk_state = TCP_CLOSE; | 426 | sk->sk_state = TCP_CLOSE; |
427 | goto out; | 427 | goto out; |
428 | } | 428 | } |
429 | rc = llc_ui_wait_for_conn(sk, sk->sk_rcvtimeo); | 429 | |
430 | if (rc) | 430 | if (sk->sk_state == TCP_SYN_SENT) { |
431 | dprintk("%s: llc_ui_wait_for_conn failed=%d\n", __FUNCTION__, rc); | 431 | const int timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); |
432 | |||
433 | if (!timeo || !llc_ui_wait_for_conn(sk, timeo)) | ||
434 | goto out; | ||
435 | |||
436 | rc = sock_intr_errno(timeo); | ||
437 | if (signal_pending(current)) | ||
438 | goto out; | ||
439 | } | ||
440 | |||
441 | if (sk->sk_state == TCP_CLOSE) | ||
442 | goto sock_error; | ||
443 | |||
444 | sock->state = SS_CONNECTED; | ||
445 | rc = 0; | ||
432 | out: | 446 | out: |
433 | release_sock(sk); | 447 | release_sock(sk); |
434 | return rc; | 448 | return rc; |
449 | sock_error: | ||
450 | rc = sock_error(sk) ? : -ECONNABORTED; | ||
451 | sock->state = SS_UNCONNECTED; | ||
452 | goto out; | ||
435 | } | 453 | } |
436 | 454 | ||
437 | /** | 455 | /** |
@@ -472,117 +490,88 @@ out: | |||
472 | 490 | ||
473 | static int llc_ui_wait_for_disc(struct sock *sk, int timeout) | 491 | static int llc_ui_wait_for_disc(struct sock *sk, int timeout) |
474 | { | 492 | { |
475 | DECLARE_WAITQUEUE(wait, current); | 493 | DEFINE_WAIT(wait); |
476 | int rc; | 494 | int rc = 0; |
477 | 495 | ||
478 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | 496 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); |
479 | for (;;) { | 497 | while (sk->sk_state != TCP_CLOSE) { |
480 | __set_current_state(TASK_INTERRUPTIBLE); | 498 | release_sock(sk); |
481 | rc = 0; | 499 | timeout = schedule_timeout(timeout); |
482 | if (sk->sk_state != TCP_CLOSE) { | 500 | lock_sock(sk); |
483 | release_sock(sk); | ||
484 | timeout = schedule_timeout(timeout); | ||
485 | lock_sock(sk); | ||
486 | } else | ||
487 | break; | ||
488 | rc = -ERESTARTSYS; | 501 | rc = -ERESTARTSYS; |
489 | if (signal_pending(current)) | 502 | if (signal_pending(current)) |
490 | break; | 503 | break; |
491 | rc = -EAGAIN; | 504 | rc = -EAGAIN; |
492 | if (!timeout) | 505 | if (!timeout) |
493 | break; | 506 | break; |
507 | rc = 0; | ||
508 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | ||
494 | } | 509 | } |
495 | __set_current_state(TASK_RUNNING); | 510 | finish_wait(sk->sk_sleep, &wait); |
496 | remove_wait_queue(sk->sk_sleep, &wait); | ||
497 | return rc; | 511 | return rc; |
498 | } | 512 | } |
499 | 513 | ||
500 | static int llc_ui_wait_for_conn(struct sock *sk, int timeout) | 514 | static int llc_ui_wait_for_conn(struct sock *sk, int timeout) |
501 | { | 515 | { |
502 | DECLARE_WAITQUEUE(wait, current); | 516 | DEFINE_WAIT(wait); |
503 | int rc; | ||
504 | 517 | ||
505 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | 518 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); |
506 | for (;;) { | 519 | |
507 | __set_current_state(TASK_INTERRUPTIBLE); | 520 | while (sk->sk_state == TCP_SYN_SENT) { |
508 | rc = -EAGAIN; | 521 | release_sock(sk); |
509 | if (sk->sk_state == TCP_CLOSE) | 522 | timeout = schedule_timeout(timeout); |
510 | break; | 523 | lock_sock(sk); |
511 | rc = 0; | 524 | if (signal_pending(current) || !timeout) |
512 | if (sk->sk_state != TCP_ESTABLISHED) { | ||
513 | release_sock(sk); | ||
514 | timeout = schedule_timeout(timeout); | ||
515 | lock_sock(sk); | ||
516 | } else | ||
517 | break; | ||
518 | rc = -ERESTARTSYS; | ||
519 | if (signal_pending(current)) | ||
520 | break; | ||
521 | rc = -EAGAIN; | ||
522 | if (!timeout) | ||
523 | break; | 525 | break; |
526 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | ||
524 | } | 527 | } |
525 | __set_current_state(TASK_RUNNING); | 528 | finish_wait(sk->sk_sleep, &wait); |
526 | remove_wait_queue(sk->sk_sleep, &wait); | 529 | return timeout; |
527 | return rc; | ||
528 | } | 530 | } |
529 | 531 | ||
530 | static int llc_ui_wait_for_data(struct sock *sk, int timeout) | 532 | static int llc_ui_wait_for_data(struct sock *sk, int timeout) |
531 | { | 533 | { |
532 | DECLARE_WAITQUEUE(wait, current); | 534 | DEFINE_WAIT(wait); |
533 | int rc = 0; | 535 | int rc = 0; |
534 | 536 | ||
535 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | ||
536 | for (;;) { | 537 | for (;;) { |
537 | __set_current_state(TASK_INTERRUPTIBLE); | 538 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); |
538 | if (sk->sk_shutdown & RCV_SHUTDOWN) | 539 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
539 | break; | 540 | break; |
540 | /* | 541 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
541 | * Well, if we have backlog, try to process it now. | ||
542 | */ | ||
543 | if (sk->sk_backlog.tail) { | ||
544 | release_sock(sk); | ||
545 | lock_sock(sk); | ||
546 | } | ||
547 | rc = 0; | ||
548 | if (skb_queue_empty(&sk->sk_receive_queue)) { | ||
549 | release_sock(sk); | ||
550 | timeout = schedule_timeout(timeout); | ||
551 | lock_sock(sk); | ||
552 | } else | ||
553 | break; | 542 | break; |
543 | release_sock(sk); | ||
544 | timeout = schedule_timeout(timeout); | ||
545 | lock_sock(sk); | ||
554 | rc = -ERESTARTSYS; | 546 | rc = -ERESTARTSYS; |
555 | if (signal_pending(current)) | 547 | if (signal_pending(current)) |
556 | break; | 548 | break; |
557 | rc = -EAGAIN; | 549 | rc = -EAGAIN; |
558 | if (!timeout) | 550 | if (!timeout) |
559 | break; | 551 | break; |
552 | rc = 0; | ||
560 | } | 553 | } |
561 | __set_current_state(TASK_RUNNING); | 554 | finish_wait(sk->sk_sleep, &wait); |
562 | remove_wait_queue(sk->sk_sleep, &wait); | ||
563 | return rc; | 555 | return rc; |
564 | } | 556 | } |
565 | 557 | ||
566 | static int llc_ui_wait_for_busy_core(struct sock *sk, int timeout) | 558 | static int llc_ui_wait_for_busy_core(struct sock *sk, int timeout) |
567 | { | 559 | { |
568 | DECLARE_WAITQUEUE(wait, current); | 560 | DEFINE_WAIT(wait); |
569 | struct llc_sock *llc = llc_sk(sk); | 561 | struct llc_sock *llc = llc_sk(sk); |
570 | int rc; | 562 | int rc; |
571 | 563 | ||
572 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | ||
573 | for (;;) { | 564 | for (;;) { |
574 | dprintk("%s: looping...\n", __FUNCTION__); | 565 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); |
575 | __set_current_state(TASK_INTERRUPTIBLE); | ||
576 | rc = -ENOTCONN; | 566 | rc = -ENOTCONN; |
577 | if (sk->sk_shutdown & RCV_SHUTDOWN) | 567 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
578 | break; | 568 | break; |
579 | rc = 0; | 569 | rc = 0; |
580 | if (llc_data_accept_state(llc->state) || llc->p_flag) { | 570 | if (!llc_data_accept_state(llc->state) && !llc->p_flag) |
581 | release_sock(sk); | ||
582 | timeout = schedule_timeout(timeout); | ||
583 | lock_sock(sk); | ||
584 | } else | ||
585 | break; | 571 | break; |
572 | release_sock(sk); | ||
573 | timeout = schedule_timeout(timeout); | ||
574 | lock_sock(sk); | ||
586 | rc = -ERESTARTSYS; | 575 | rc = -ERESTARTSYS; |
587 | if (signal_pending(current)) | 576 | if (signal_pending(current)) |
588 | break; | 577 | break; |
@@ -590,8 +579,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, int timeout) | |||
590 | if (!timeout) | 579 | if (!timeout) |
591 | break; | 580 | break; |
592 | } | 581 | } |
593 | __set_current_state(TASK_RUNNING); | 582 | finish_wait(sk->sk_sleep, &wait); |
594 | remove_wait_queue(sk->sk_sleep, &wait); | ||
595 | return rc; | 583 | return rc; |
596 | } | 584 | } |
597 | 585 | ||
@@ -621,9 +609,11 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags) | |||
621 | sk->sk_state != TCP_LISTEN)) | 609 | sk->sk_state != TCP_LISTEN)) |
622 | goto out; | 610 | goto out; |
623 | /* wait for a connection to arrive. */ | 611 | /* wait for a connection to arrive. */ |
624 | rc = llc_ui_wait_for_data(sk, sk->sk_rcvtimeo); | 612 | if (skb_queue_empty(&sk->sk_receive_queue)) { |
625 | if (rc) | 613 | rc = llc_ui_wait_for_data(sk, sk->sk_rcvtimeo); |
626 | goto out; | 614 | if (rc) |
615 | goto out; | ||
616 | } | ||
627 | dprintk("%s: got a new connection on %02X\n", __FUNCTION__, | 617 | dprintk("%s: got a new connection on %02X\n", __FUNCTION__, |
628 | llc_sk(sk)->laddr.lsap); | 618 | llc_sk(sk)->laddr.lsap); |
629 | skb = skb_dequeue(&sk->sk_receive_queue); | 619 | skb = skb_dequeue(&sk->sk_receive_queue); |
@@ -672,19 +662,16 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
672 | struct sockaddr_llc *uaddr = (struct sockaddr_llc *)msg->msg_name; | 662 | struct sockaddr_llc *uaddr = (struct sockaddr_llc *)msg->msg_name; |
673 | struct sk_buff *skb; | 663 | struct sk_buff *skb; |
674 | size_t copied = 0; | 664 | size_t copied = 0; |
675 | int rc = -ENOMEM, timeout; | 665 | int rc = -ENOMEM; |
676 | int noblock = flags & MSG_DONTWAIT; | 666 | int noblock = flags & MSG_DONTWAIT; |
677 | 667 | ||
678 | dprintk("%s: receiving in %02X from %02X\n", __FUNCTION__, | 668 | dprintk("%s: receiving in %02X from %02X\n", __FUNCTION__, |
679 | llc_sk(sk)->laddr.lsap, llc_sk(sk)->daddr.lsap); | 669 | llc_sk(sk)->laddr.lsap, llc_sk(sk)->daddr.lsap); |
680 | lock_sock(sk); | 670 | lock_sock(sk); |
681 | timeout = sock_rcvtimeo(sk, noblock); | 671 | if (skb_queue_empty(&sk->sk_receive_queue)) { |
682 | rc = llc_ui_wait_for_data(sk, timeout); | 672 | rc = llc_ui_wait_for_data(sk, sock_rcvtimeo(sk, noblock)); |
683 | if (rc) { | 673 | if (rc) |
684 | dprintk("%s: llc_ui_wait_for_data failed recv " | 674 | goto out; |
685 | "in %02X from %02X\n", __FUNCTION__, | ||
686 | llc_sk(sk)->laddr.lsap, llc_sk(sk)->daddr.lsap); | ||
687 | goto out; | ||
688 | } | 675 | } |
689 | skb = skb_dequeue(&sk->sk_receive_queue); | 676 | skb = skb_dequeue(&sk->sk_receive_queue); |
690 | if (!skb) /* shutdown */ | 677 | if (!skb) /* shutdown */ |