aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp
diff options
context:
space:
mode:
Diffstat (limited to 'net/sctp')
-rw-r--r--net/sctp/bind_addr.c15
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/protocol.c147
-rw-r--r--net/sctp/socket.c46
4 files changed, 206 insertions, 4 deletions
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 83e3011c19ca..17d157325b66 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -534,6 +534,21 @@ int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope)
534 return 0; 534 return 0;
535} 535}
536 536
537int sctp_is_ep_boundall(struct sock *sk)
538{
539 struct sctp_bind_addr *bp;
540 struct sctp_sockaddr_entry *addr;
541
542 bp = &sctp_sk(sk)->ep->base.bind_addr;
543 if (sctp_list_single_entry(&bp->address_list)) {
544 addr = list_entry(bp->address_list.next,
545 struct sctp_sockaddr_entry, list);
546 if (sctp_is_any(sk, &addr->a))
547 return 1;
548 }
549 return 0;
550}
551
537/******************************************************************** 552/********************************************************************
538 * 3rd Level Abstractions 553 * 3rd Level Abstractions
539 ********************************************************************/ 554 ********************************************************************/
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 0bb0d7cb9f10..aabaee41dd3e 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -112,6 +112,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
112 addr->valid = 1; 112 addr->valid = 1;
113 spin_lock_bh(&sctp_local_addr_lock); 113 spin_lock_bh(&sctp_local_addr_lock);
114 list_add_tail_rcu(&addr->list, &sctp_local_addr_list); 114 list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
115 sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
115 spin_unlock_bh(&sctp_local_addr_lock); 116 spin_unlock_bh(&sctp_local_addr_lock);
116 } 117 }
117 break; 118 break;
@@ -122,6 +123,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
122 if (addr->a.sa.sa_family == AF_INET6 && 123 if (addr->a.sa.sa_family == AF_INET6 &&
123 ipv6_addr_equal(&addr->a.v6.sin6_addr, 124 ipv6_addr_equal(&addr->a.v6.sin6_addr,
124 &ifa->addr)) { 125 &ifa->addr)) {
126 sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
125 found = 1; 127 found = 1;
126 addr->valid = 0; 128 addr->valid = 0;
127 list_del_rcu(&addr->list); 129 list_del_rcu(&addr->list);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 67380a29e2e9..013c6136c546 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -623,6 +623,142 @@ static void sctp_v4_ecn_capable(struct sock *sk)
623 INET_ECN_xmit(sk); 623 INET_ECN_xmit(sk);
624} 624}
625 625
626void sctp_addr_wq_timeout_handler(unsigned long arg)
627{
628 struct sctp_sockaddr_entry *addrw, *temp;
629 struct sctp_sock *sp;
630
631 spin_lock_bh(&sctp_addr_wq_lock);
632
633 list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
634 SCTP_DEBUG_PRINTK_IPADDR("sctp_addrwq_timo_handler: the first ent in wq %p is ",
635 " for cmd %d at entry %p\n", &sctp_addr_waitq, &addrw->a, addrw->state,
636 addrw);
637
638 /* Now we send an ASCONF for each association */
639 /* Note. we currently don't handle link local IPv6 addressees */
640 if (addrw->a.sa.sa_family == AF_INET6) {
641 struct in6_addr *in6;
642
643 if (ipv6_addr_type(&addrw->a.v6.sin6_addr) &
644 IPV6_ADDR_LINKLOCAL)
645 goto free_next;
646
647 in6 = (struct in6_addr *)&addrw->a.v6.sin6_addr;
648 if (ipv6_chk_addr(&init_net, in6, NULL, 0) == 0 &&
649 addrw->state == SCTP_ADDR_NEW) {
650 unsigned long timeo_val;
651
652 SCTP_DEBUG_PRINTK("sctp_timo_handler: this is on DAD, trying %d sec later\n",
653 SCTP_ADDRESS_TICK_DELAY);
654 timeo_val = jiffies;
655 timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
656 mod_timer(&sctp_addr_wq_timer, timeo_val);
657 break;
658 }
659 }
660
661 list_for_each_entry(sp, &sctp_auto_asconf_splist, auto_asconf_list) {
662 struct sock *sk;
663
664 sk = sctp_opt2sk(sp);
665 /* ignore bound-specific endpoints */
666 if (!sctp_is_ep_boundall(sk))
667 continue;
668 sctp_bh_lock_sock(sk);
669 if (sctp_asconf_mgmt(sp, addrw) < 0)
670 SCTP_DEBUG_PRINTK("sctp_addrwq_timo_handler: sctp_asconf_mgmt failed\n");
671 sctp_bh_unlock_sock(sk);
672 }
673free_next:
674 list_del(&addrw->list);
675 kfree(addrw);
676 }
677 spin_unlock_bh(&sctp_addr_wq_lock);
678}
679
680static void sctp_free_addr_wq(void)
681{
682 struct sctp_sockaddr_entry *addrw;
683 struct sctp_sockaddr_entry *temp;
684
685 spin_lock_bh(&sctp_addr_wq_lock);
686 del_timer(&sctp_addr_wq_timer);
687 list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
688 list_del(&addrw->list);
689 kfree(addrw);
690 }
691 spin_unlock_bh(&sctp_addr_wq_lock);
692}
693
694/* lookup the entry for the same address in the addr_waitq
695 * sctp_addr_wq MUST be locked
696 */
697static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct sctp_sockaddr_entry *addr)
698{
699 struct sctp_sockaddr_entry *addrw;
700
701 list_for_each_entry(addrw, &sctp_addr_waitq, list) {
702 if (addrw->a.sa.sa_family != addr->a.sa.sa_family)
703 continue;
704 if (addrw->a.sa.sa_family == AF_INET) {
705 if (addrw->a.v4.sin_addr.s_addr ==
706 addr->a.v4.sin_addr.s_addr)
707 return addrw;
708 } else if (addrw->a.sa.sa_family == AF_INET6) {
709 if (ipv6_addr_equal(&addrw->a.v6.sin6_addr,
710 &addr->a.v6.sin6_addr))
711 return addrw;
712 }
713 }
714 return NULL;
715}
716
717void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *addr, int cmd)
718{
719 struct sctp_sockaddr_entry *addrw;
720 unsigned long timeo_val;
721
722 /* first, we check if an opposite message already exist in the queue.
723 * If we found such message, it is removed.
724 * This operation is a bit stupid, but the DHCP client attaches the
725 * new address after a couple of addition and deletion of that address
726 */
727
728 spin_lock_bh(&sctp_addr_wq_lock);
729 /* Offsets existing events in addr_wq */
730 addrw = sctp_addr_wq_lookup(addr);
731 if (addrw) {
732 if (addrw->state != cmd) {
733 SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt offsets existing entry for %d ",
734 " in wq %p\n", addrw->state, &addrw->a,
735 &sctp_addr_waitq);
736 list_del(&addrw->list);
737 kfree(addrw);
738 }
739 spin_unlock_bh(&sctp_addr_wq_lock);
740 return;
741 }
742
743 /* OK, we have to add the new address to the wait queue */
744 addrw = kmemdup(addr, sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
745 if (addrw == NULL) {
746 spin_unlock_bh(&sctp_addr_wq_lock);
747 return;
748 }
749 addrw->state = cmd;
750 list_add_tail(&addrw->list, &sctp_addr_waitq);
751 SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt add new entry for cmd:%d ",
752 " in wq %p\n", addrw->state, &addrw->a, &sctp_addr_waitq);
753
754 if (!timer_pending(&sctp_addr_wq_timer)) {
755 timeo_val = jiffies;
756 timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
757 mod_timer(&sctp_addr_wq_timer, timeo_val);
758 }
759 spin_unlock_bh(&sctp_addr_wq_lock);
760}
761
626/* Event handler for inet address addition/deletion events. 762/* Event handler for inet address addition/deletion events.
627 * The sctp_local_addr_list needs to be protocted by a spin lock since 763 * The sctp_local_addr_list needs to be protocted by a spin lock since
628 * multiple notifiers (say IPv4 and IPv6) may be running at the same 764 * multiple notifiers (say IPv4 and IPv6) may be running at the same
@@ -650,6 +786,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
650 addr->valid = 1; 786 addr->valid = 1;
651 spin_lock_bh(&sctp_local_addr_lock); 787 spin_lock_bh(&sctp_local_addr_lock);
652 list_add_tail_rcu(&addr->list, &sctp_local_addr_list); 788 list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
789 sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
653 spin_unlock_bh(&sctp_local_addr_lock); 790 spin_unlock_bh(&sctp_local_addr_lock);
654 } 791 }
655 break; 792 break;
@@ -660,6 +797,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
660 if (addr->a.sa.sa_family == AF_INET && 797 if (addr->a.sa.sa_family == AF_INET &&
661 addr->a.v4.sin_addr.s_addr == 798 addr->a.v4.sin_addr.s_addr ==
662 ifa->ifa_local) { 799 ifa->ifa_local) {
800 sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
663 found = 1; 801 found = 1;
664 addr->valid = 0; 802 addr->valid = 0;
665 list_del_rcu(&addr->list); 803 list_del_rcu(&addr->list);
@@ -1242,6 +1380,7 @@ SCTP_STATIC __init int sctp_init(void)
1242 /* Disable ADDIP by default. */ 1380 /* Disable ADDIP by default. */
1243 sctp_addip_enable = 0; 1381 sctp_addip_enable = 0;
1244 sctp_addip_noauth = 0; 1382 sctp_addip_noauth = 0;
1383 sctp_default_auto_asconf = 0;
1245 1384
1246 /* Enable PR-SCTP by default. */ 1385 /* Enable PR-SCTP by default. */
1247 sctp_prsctp_enable = 1; 1386 sctp_prsctp_enable = 1;
@@ -1266,6 +1405,13 @@ SCTP_STATIC __init int sctp_init(void)
1266 spin_lock_init(&sctp_local_addr_lock); 1405 spin_lock_init(&sctp_local_addr_lock);
1267 sctp_get_local_addr_list(); 1406 sctp_get_local_addr_list();
1268 1407
1408 /* Initialize the address event list */
1409 INIT_LIST_HEAD(&sctp_addr_waitq);
1410 INIT_LIST_HEAD(&sctp_auto_asconf_splist);
1411 spin_lock_init(&sctp_addr_wq_lock);
1412 sctp_addr_wq_timer.expires = 0;
1413 setup_timer(&sctp_addr_wq_timer, sctp_addr_wq_timeout_handler, 0);
1414
1269 status = sctp_v4_protosw_init(); 1415 status = sctp_v4_protosw_init();
1270 1416
1271 if (status) 1417 if (status)
@@ -1337,6 +1483,7 @@ SCTP_STATIC __exit void sctp_exit(void)
1337 /* Unregister with inet6/inet layers. */ 1483 /* Unregister with inet6/inet layers. */
1338 sctp_v6_del_protocol(); 1484 sctp_v6_del_protocol();
1339 sctp_v4_del_protocol(); 1485 sctp_v4_del_protocol();
1486 sctp_free_addr_wq();
1340 1487
1341 /* Free the control endpoint. */ 1488 /* Free the control endpoint. */
1342 inet_ctl_sock_destroy(sctp_ctl_sock); 1489 inet_ctl_sock_destroy(sctp_ctl_sock);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6766913a53e6..7eb1f1a736fb 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -811,6 +811,28 @@ out:
811 return retval; 811 return retval;
812} 812}
813 813
814/* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
815int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
816{
817 struct sock *sk = sctp_opt2sk(sp);
818 union sctp_addr *addr;
819 struct sctp_af *af;
820
821 /* It is safe to write port space in caller. */
822 addr = &addrw->a;
823 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port);
824 af = sctp_get_af_specific(addr->sa.sa_family);
825 if (!af)
826 return -EINVAL;
827 if (sctp_verify_addr(sk, addr, af->sockaddr_len))
828 return -EINVAL;
829
830 if (addrw->state == SCTP_ADDR_NEW)
831 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1);
832 else
833 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1);
834}
835
814/* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() 836/* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
815 * 837 *
816 * API 8.1 838 * API 8.1
@@ -3763,6 +3785,12 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3763 local_bh_disable(); 3785 local_bh_disable();
3764 percpu_counter_inc(&sctp_sockets_allocated); 3786 percpu_counter_inc(&sctp_sockets_allocated);
3765 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 3787 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
3788 if (sctp_default_auto_asconf) {
3789 list_add_tail(&sp->auto_asconf_list,
3790 &sctp_auto_asconf_splist);
3791 sp->do_auto_asconf = 1;
3792 } else
3793 sp->do_auto_asconf = 0;
3766 local_bh_enable(); 3794 local_bh_enable();
3767 3795
3768 return 0; 3796 return 0;
@@ -3771,13 +3799,17 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3771/* Cleanup any SCTP per socket resources. */ 3799/* Cleanup any SCTP per socket resources. */
3772SCTP_STATIC void sctp_destroy_sock(struct sock *sk) 3800SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
3773{ 3801{
3774 struct sctp_endpoint *ep; 3802 struct sctp_sock *sp;
3775 3803
3776 SCTP_DEBUG_PRINTK("sctp_destroy_sock(sk: %p)\n", sk); 3804 SCTP_DEBUG_PRINTK("sctp_destroy_sock(sk: %p)\n", sk);
3777 3805
3778 /* Release our hold on the endpoint. */ 3806 /* Release our hold on the endpoint. */
3779 ep = sctp_sk(sk)->ep; 3807 sp = sctp_sk(sk);
3780 sctp_endpoint_free(ep); 3808 if (sp->do_auto_asconf) {
3809 sp->do_auto_asconf = 0;
3810 list_del(&sp->auto_asconf_list);
3811 }
3812 sctp_endpoint_free(sp->ep);
3781 local_bh_disable(); 3813 local_bh_disable();
3782 percpu_counter_dec(&sctp_sockets_allocated); 3814 percpu_counter_dec(&sctp_sockets_allocated);
3783 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 3815 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
@@ -6512,6 +6544,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
6512 struct sk_buff *skb, *tmp; 6544 struct sk_buff *skb, *tmp;
6513 struct sctp_ulpevent *event; 6545 struct sctp_ulpevent *event;
6514 struct sctp_bind_hashbucket *head; 6546 struct sctp_bind_hashbucket *head;
6547 struct list_head tmplist;
6515 6548
6516 /* Migrate socket buffer sizes and all the socket level options to the 6549 /* Migrate socket buffer sizes and all the socket level options to the
6517 * new socket. 6550 * new socket.
@@ -6519,7 +6552,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
6519 newsk->sk_sndbuf = oldsk->sk_sndbuf; 6552 newsk->sk_sndbuf = oldsk->sk_sndbuf;
6520 newsk->sk_rcvbuf = oldsk->sk_rcvbuf; 6553 newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
6521 /* Brute force copy old sctp opt. */ 6554 /* Brute force copy old sctp opt. */
6522 inet_sk_copy_descendant(newsk, oldsk); 6555 if (oldsp->do_auto_asconf) {
6556 memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
6557 inet_sk_copy_descendant(newsk, oldsk);
6558 memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
6559 } else
6560 inet_sk_copy_descendant(newsk, oldsk);
6523 6561
6524 /* Restore the ep value that was overwritten with the above structure 6562 /* Restore the ep value that was overwritten with the above structure
6525 * copy. 6563 * copy.