diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-12-28 03:23:13 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-12-28 03:23:13 -0500 |
commit | 605c1a187f3ce82fbc243e2163c5ca8d1926df8e (patch) | |
tree | c8065a8c5606a66f81dc494ce22a5baa5e0dfe7e /net | |
parent | 17a2a9b57a9a7d2fd8f97df951b5e63e0bd56ef5 (diff) | |
parent | ce9277fb08e6e721482f7011ca28dcd0449b197c (diff) |
Merge branch 'iommu/fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu into x86/urgent
Diffstat (limited to 'net')
63 files changed, 676 insertions, 542 deletions
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 4dd873e3a1bb..be1cb909d8c0 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c | |||
@@ -42,6 +42,8 @@ | |||
42 | #include <net/9p/client.h> | 42 | #include <net/9p/client.h> |
43 | #include <net/9p/transport.h> | 43 | #include <net/9p/transport.h> |
44 | 44 | ||
45 | #include <linux/syscalls.h> /* killme */ | ||
46 | |||
45 | #define P9_PORT 564 | 47 | #define P9_PORT 564 |
46 | #define MAX_SOCK_BUF (64*1024) | 48 | #define MAX_SOCK_BUF (64*1024) |
47 | #define MAXPOLLWADDR 2 | 49 | #define MAXPOLLWADDR 2 |
@@ -788,24 +790,41 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd) | |||
788 | 790 | ||
789 | static int p9_socket_open(struct p9_client *client, struct socket *csocket) | 791 | static int p9_socket_open(struct p9_client *client, struct socket *csocket) |
790 | { | 792 | { |
791 | int fd, ret; | 793 | struct p9_trans_fd *p; |
794 | int ret, fd; | ||
795 | |||
796 | p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL); | ||
797 | if (!p) | ||
798 | return -ENOMEM; | ||
792 | 799 | ||
793 | csocket->sk->sk_allocation = GFP_NOIO; | 800 | csocket->sk->sk_allocation = GFP_NOIO; |
794 | fd = sock_map_fd(csocket, 0); | 801 | fd = sock_map_fd(csocket, 0); |
795 | if (fd < 0) { | 802 | if (fd < 0) { |
796 | P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n"); | 803 | P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n"); |
804 | sock_release(csocket); | ||
805 | kfree(p); | ||
797 | return fd; | 806 | return fd; |
798 | } | 807 | } |
799 | 808 | ||
800 | ret = p9_fd_open(client, fd, fd); | 809 | get_file(csocket->file); |
801 | if (ret < 0) { | 810 | get_file(csocket->file); |
802 | P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to open fd\n"); | 811 | p->wr = p->rd = csocket->file; |
812 | client->trans = p; | ||
813 | client->status = Connected; | ||
814 | |||
815 | sys_close(fd); /* still racy */ | ||
816 | |||
817 | p->rd->f_flags |= O_NONBLOCK; | ||
818 | |||
819 | p->conn = p9_conn_create(client); | ||
820 | if (IS_ERR(p->conn)) { | ||
821 | ret = PTR_ERR(p->conn); | ||
822 | p->conn = NULL; | ||
823 | kfree(p); | ||
824 | sockfd_put(csocket); | ||
803 | sockfd_put(csocket); | 825 | sockfd_put(csocket); |
804 | return ret; | 826 | return ret; |
805 | } | 827 | } |
806 | |||
807 | ((struct p9_trans_fd *)client->trans)->rd->f_flags |= O_NONBLOCK; | ||
808 | |||
809 | return 0; | 828 | return 0; |
810 | } | 829 | } |
811 | 830 | ||
@@ -883,7 +902,6 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) | |||
883 | struct socket *csocket; | 902 | struct socket *csocket; |
884 | struct sockaddr_in sin_server; | 903 | struct sockaddr_in sin_server; |
885 | struct p9_fd_opts opts; | 904 | struct p9_fd_opts opts; |
886 | struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */ | ||
887 | 905 | ||
888 | err = parse_opts(args, &opts); | 906 | err = parse_opts(args, &opts); |
889 | if (err < 0) | 907 | if (err < 0) |
@@ -897,12 +915,11 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) | |||
897 | sin_server.sin_family = AF_INET; | 915 | sin_server.sin_family = AF_INET; |
898 | sin_server.sin_addr.s_addr = in_aton(addr); | 916 | sin_server.sin_addr.s_addr = in_aton(addr); |
899 | sin_server.sin_port = htons(opts.port); | 917 | sin_server.sin_port = htons(opts.port); |
900 | sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket); | 918 | err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket); |
901 | 919 | ||
902 | if (!csocket) { | 920 | if (err) { |
903 | P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n"); | 921 | P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n"); |
904 | err = -EIO; | 922 | return err; |
905 | goto error; | ||
906 | } | 923 | } |
907 | 924 | ||
908 | err = csocket->ops->connect(csocket, | 925 | err = csocket->ops->connect(csocket, |
@@ -912,30 +929,11 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) | |||
912 | P9_EPRINTK(KERN_ERR, | 929 | P9_EPRINTK(KERN_ERR, |
913 | "p9_trans_tcp: problem connecting socket to %s\n", | 930 | "p9_trans_tcp: problem connecting socket to %s\n", |
914 | addr); | 931 | addr); |
915 | goto error; | ||
916 | } | ||
917 | |||
918 | err = p9_socket_open(client, csocket); | ||
919 | if (err < 0) | ||
920 | goto error; | ||
921 | |||
922 | p = (struct p9_trans_fd *) client->trans; | ||
923 | p->conn = p9_conn_create(client); | ||
924 | if (IS_ERR(p->conn)) { | ||
925 | err = PTR_ERR(p->conn); | ||
926 | p->conn = NULL; | ||
927 | goto error; | ||
928 | } | ||
929 | |||
930 | return 0; | ||
931 | |||
932 | error: | ||
933 | if (csocket) | ||
934 | sock_release(csocket); | 932 | sock_release(csocket); |
933 | return err; | ||
934 | } | ||
935 | 935 | ||
936 | kfree(p); | 936 | return p9_socket_open(client, csocket); |
937 | |||
938 | return err; | ||
939 | } | 937 | } |
940 | 938 | ||
941 | static int | 939 | static int |
@@ -944,49 +942,33 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) | |||
944 | int err; | 942 | int err; |
945 | struct socket *csocket; | 943 | struct socket *csocket; |
946 | struct sockaddr_un sun_server; | 944 | struct sockaddr_un sun_server; |
947 | struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */ | ||
948 | 945 | ||
949 | csocket = NULL; | 946 | csocket = NULL; |
950 | 947 | ||
951 | if (strlen(addr) > UNIX_PATH_MAX) { | 948 | if (strlen(addr) > UNIX_PATH_MAX) { |
952 | P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n", | 949 | P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n", |
953 | addr); | 950 | addr); |
954 | err = -ENAMETOOLONG; | 951 | return -ENAMETOOLONG; |
955 | goto error; | ||
956 | } | 952 | } |
957 | 953 | ||
958 | sun_server.sun_family = PF_UNIX; | 954 | sun_server.sun_family = PF_UNIX; |
959 | strcpy(sun_server.sun_path, addr); | 955 | strcpy(sun_server.sun_path, addr); |
960 | sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket); | 956 | err = sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket); |
957 | if (err < 0) { | ||
958 | P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n"); | ||
959 | return err; | ||
960 | } | ||
961 | err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server, | 961 | err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server, |
962 | sizeof(struct sockaddr_un) - 1, 0); | 962 | sizeof(struct sockaddr_un) - 1, 0); |
963 | if (err < 0) { | 963 | if (err < 0) { |
964 | P9_EPRINTK(KERN_ERR, | 964 | P9_EPRINTK(KERN_ERR, |
965 | "p9_trans_unix: problem connecting socket: %s: %d\n", | 965 | "p9_trans_unix: problem connecting socket: %s: %d\n", |
966 | addr, err); | 966 | addr, err); |
967 | goto error; | ||
968 | } | ||
969 | |||
970 | err = p9_socket_open(client, csocket); | ||
971 | if (err < 0) | ||
972 | goto error; | ||
973 | |||
974 | p = (struct p9_trans_fd *) client->trans; | ||
975 | p->conn = p9_conn_create(client); | ||
976 | if (IS_ERR(p->conn)) { | ||
977 | err = PTR_ERR(p->conn); | ||
978 | p->conn = NULL; | ||
979 | goto error; | ||
980 | } | ||
981 | |||
982 | return 0; | ||
983 | |||
984 | error: | ||
985 | if (csocket) | ||
986 | sock_release(csocket); | 967 | sock_release(csocket); |
968 | return err; | ||
969 | } | ||
987 | 970 | ||
988 | kfree(p); | 971 | return p9_socket_open(client, csocket); |
989 | return err; | ||
990 | } | 972 | } |
991 | 973 | ||
992 | static int | 974 | static int |
@@ -994,7 +976,7 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args) | |||
994 | { | 976 | { |
995 | int err; | 977 | int err; |
996 | struct p9_fd_opts opts; | 978 | struct p9_fd_opts opts; |
997 | struct p9_trans_fd *p = NULL; /* this get allocated in p9_fd_open */ | 979 | struct p9_trans_fd *p; |
998 | 980 | ||
999 | parse_opts(args, &opts); | 981 | parse_opts(args, &opts); |
1000 | 982 | ||
@@ -1005,21 +987,19 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args) | |||
1005 | 987 | ||
1006 | err = p9_fd_open(client, opts.rfd, opts.wfd); | 988 | err = p9_fd_open(client, opts.rfd, opts.wfd); |
1007 | if (err < 0) | 989 | if (err < 0) |
1008 | goto error; | 990 | return err; |
1009 | 991 | ||
1010 | p = (struct p9_trans_fd *) client->trans; | 992 | p = (struct p9_trans_fd *) client->trans; |
1011 | p->conn = p9_conn_create(client); | 993 | p->conn = p9_conn_create(client); |
1012 | if (IS_ERR(p->conn)) { | 994 | if (IS_ERR(p->conn)) { |
1013 | err = PTR_ERR(p->conn); | 995 | err = PTR_ERR(p->conn); |
1014 | p->conn = NULL; | 996 | p->conn = NULL; |
1015 | goto error; | 997 | fput(p->rd); |
998 | fput(p->wr); | ||
999 | return err; | ||
1016 | } | 1000 | } |
1017 | 1001 | ||
1018 | return 0; | 1002 | return 0; |
1019 | |||
1020 | error: | ||
1021 | kfree(p); | ||
1022 | return err; | ||
1023 | } | 1003 | } |
1024 | 1004 | ||
1025 | static struct p9_trans_module p9_tcp_trans = { | 1005 | static struct p9_trans_module p9_tcp_trans = { |
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 26a646d4eb32..c9230c398697 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c | |||
@@ -554,6 +554,12 @@ static const struct net_device_ops br2684_netdev_ops = { | |||
554 | .ndo_validate_addr = eth_validate_addr, | 554 | .ndo_validate_addr = eth_validate_addr, |
555 | }; | 555 | }; |
556 | 556 | ||
557 | static const struct net_device_ops br2684_netdev_ops_routed = { | ||
558 | .ndo_start_xmit = br2684_start_xmit, | ||
559 | .ndo_set_mac_address = br2684_mac_addr, | ||
560 | .ndo_change_mtu = eth_change_mtu | ||
561 | }; | ||
562 | |||
557 | static void br2684_setup(struct net_device *netdev) | 563 | static void br2684_setup(struct net_device *netdev) |
558 | { | 564 | { |
559 | struct br2684_dev *brdev = BRPRIV(netdev); | 565 | struct br2684_dev *brdev = BRPRIV(netdev); |
@@ -569,11 +575,10 @@ static void br2684_setup(struct net_device *netdev) | |||
569 | static void br2684_setup_routed(struct net_device *netdev) | 575 | static void br2684_setup_routed(struct net_device *netdev) |
570 | { | 576 | { |
571 | struct br2684_dev *brdev = BRPRIV(netdev); | 577 | struct br2684_dev *brdev = BRPRIV(netdev); |
572 | brdev->net_dev = netdev; | ||
573 | 578 | ||
579 | brdev->net_dev = netdev; | ||
574 | netdev->hard_header_len = 0; | 580 | netdev->hard_header_len = 0; |
575 | 581 | netdev->netdev_ops = &br2684_netdev_ops_routed; | |
576 | netdev->netdev_ops = &br2684_netdev_ops; | ||
577 | netdev->addr_len = 0; | 582 | netdev->addr_len = 0; |
578 | netdev->mtu = 1500; | 583 | netdev->mtu = 1500; |
579 | netdev->type = ARPHRD_PPP; | 584 | netdev->type = ARPHRD_PPP; |
diff --git a/net/atm/lec.c b/net/atm/lec.c index b2d644560323..42749b7b917c 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -62,7 +62,6 @@ static int lec_open(struct net_device *dev); | |||
62 | static netdev_tx_t lec_start_xmit(struct sk_buff *skb, | 62 | static netdev_tx_t lec_start_xmit(struct sk_buff *skb, |
63 | struct net_device *dev); | 63 | struct net_device *dev); |
64 | static int lec_close(struct net_device *dev); | 64 | static int lec_close(struct net_device *dev); |
65 | static void lec_init(struct net_device *dev); | ||
66 | static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, | 65 | static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, |
67 | const unsigned char *mac_addr); | 66 | const unsigned char *mac_addr); |
68 | static int lec_arp_remove(struct lec_priv *priv, | 67 | static int lec_arp_remove(struct lec_priv *priv, |
@@ -670,13 +669,6 @@ static const struct net_device_ops lec_netdev_ops = { | |||
670 | .ndo_set_multicast_list = lec_set_multicast_list, | 669 | .ndo_set_multicast_list = lec_set_multicast_list, |
671 | }; | 670 | }; |
672 | 671 | ||
673 | |||
674 | static void lec_init(struct net_device *dev) | ||
675 | { | ||
676 | dev->netdev_ops = &lec_netdev_ops; | ||
677 | printk("%s: Initialized!\n", dev->name); | ||
678 | } | ||
679 | |||
680 | static const unsigned char lec_ctrl_magic[] = { | 672 | static const unsigned char lec_ctrl_magic[] = { |
681 | 0xff, | 673 | 0xff, |
682 | 0x00, | 674 | 0x00, |
@@ -893,6 +885,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) | |||
893 | dev_lec[i] = alloc_etherdev(size); | 885 | dev_lec[i] = alloc_etherdev(size); |
894 | if (!dev_lec[i]) | 886 | if (!dev_lec[i]) |
895 | return -ENOMEM; | 887 | return -ENOMEM; |
888 | dev_lec[i]->netdev_ops = &lec_netdev_ops; | ||
896 | snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i); | 889 | snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i); |
897 | if (register_netdev(dev_lec[i])) { | 890 | if (register_netdev(dev_lec[i])) { |
898 | free_netdev(dev_lec[i]); | 891 | free_netdev(dev_lec[i]); |
@@ -901,7 +894,6 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) | |||
901 | 894 | ||
902 | priv = netdev_priv(dev_lec[i]); | 895 | priv = netdev_priv(dev_lec[i]); |
903 | priv->is_trdev = is_trdev; | 896 | priv->is_trdev = is_trdev; |
904 | lec_init(dev_lec[i]); | ||
905 | } else { | 897 | } else { |
906 | priv = netdev_priv(dev_lec[i]); | 898 | priv = netdev_priv(dev_lec[i]); |
907 | if (priv->lecd) | 899 | if (priv->lecd) |
diff --git a/net/compat.c b/net/compat.c index e1a56ade803b..a1fb1b079a82 100644 --- a/net/compat.c +++ b/net/compat.c | |||
@@ -754,26 +754,21 @@ asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len, | |||
754 | 754 | ||
755 | asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, | 755 | asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, |
756 | unsigned vlen, unsigned int flags, | 756 | unsigned vlen, unsigned int flags, |
757 | struct timespec __user *timeout) | 757 | struct compat_timespec __user *timeout) |
758 | { | 758 | { |
759 | int datagrams; | 759 | int datagrams; |
760 | struct timespec ktspec; | 760 | struct timespec ktspec; |
761 | struct compat_timespec __user *utspec; | ||
762 | 761 | ||
763 | if (timeout == NULL) | 762 | if (timeout == NULL) |
764 | return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, | 763 | return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, |
765 | flags | MSG_CMSG_COMPAT, NULL); | 764 | flags | MSG_CMSG_COMPAT, NULL); |
766 | 765 | ||
767 | utspec = (struct compat_timespec __user *)timeout; | 766 | if (get_compat_timespec(&ktspec, timeout)) |
768 | if (get_user(ktspec.tv_sec, &utspec->tv_sec) || | ||
769 | get_user(ktspec.tv_nsec, &utspec->tv_nsec)) | ||
770 | return -EFAULT; | 767 | return -EFAULT; |
771 | 768 | ||
772 | datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, | 769 | datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, |
773 | flags | MSG_CMSG_COMPAT, &ktspec); | 770 | flags | MSG_CMSG_COMPAT, &ktspec); |
774 | if (datagrams > 0 && | 771 | if (datagrams > 0 && put_compat_timespec(&ktspec, timeout)) |
775 | (put_user(ktspec.tv_sec, &utspec->tv_sec) || | ||
776 | put_user(ktspec.tv_nsec, &utspec->tv_nsec))) | ||
777 | datagrams = -EFAULT; | 772 | datagrams = -EFAULT; |
778 | 773 | ||
779 | return datagrams; | 774 | return datagrams; |
diff --git a/net/core/dev.c b/net/core/dev.c index c36a17aafcf3..be9924f60ec3 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -4771,21 +4771,23 @@ static void net_set_todo(struct net_device *dev) | |||
4771 | 4771 | ||
4772 | static void rollback_registered_many(struct list_head *head) | 4772 | static void rollback_registered_many(struct list_head *head) |
4773 | { | 4773 | { |
4774 | struct net_device *dev; | 4774 | struct net_device *dev, *tmp; |
4775 | 4775 | ||
4776 | BUG_ON(dev_boot_phase); | 4776 | BUG_ON(dev_boot_phase); |
4777 | ASSERT_RTNL(); | 4777 | ASSERT_RTNL(); |
4778 | 4778 | ||
4779 | list_for_each_entry(dev, head, unreg_list) { | 4779 | list_for_each_entry_safe(dev, tmp, head, unreg_list) { |
4780 | /* Some devices call without registering | 4780 | /* Some devices call without registering |
4781 | * for initialization unwind. | 4781 | * for initialization unwind. Remove those |
4782 | * devices and proceed with the remaining. | ||
4782 | */ | 4783 | */ |
4783 | if (dev->reg_state == NETREG_UNINITIALIZED) { | 4784 | if (dev->reg_state == NETREG_UNINITIALIZED) { |
4784 | pr_debug("unregister_netdevice: device %s/%p never " | 4785 | pr_debug("unregister_netdevice: device %s/%p never " |
4785 | "was registered\n", dev->name, dev); | 4786 | "was registered\n", dev->name, dev); |
4786 | 4787 | ||
4787 | WARN_ON(1); | 4788 | WARN_ON(1); |
4788 | return; | 4789 | list_del(&dev->unreg_list); |
4790 | continue; | ||
4789 | } | 4791 | } |
4790 | 4792 | ||
4791 | BUG_ON(dev->reg_state != NETREG_REGISTERED); | 4793 | BUG_ON(dev->reg_state != NETREG_REGISTERED); |
@@ -5033,6 +5035,11 @@ int register_netdevice(struct net_device *dev) | |||
5033 | rollback_registered(dev); | 5035 | rollback_registered(dev); |
5034 | dev->reg_state = NETREG_UNREGISTERED; | 5036 | dev->reg_state = NETREG_UNREGISTERED; |
5035 | } | 5037 | } |
5038 | /* | ||
5039 | * Prevent userspace races by waiting until the network | ||
5040 | * device is fully setup before sending notifications. | ||
5041 | */ | ||
5042 | rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | ||
5036 | 5043 | ||
5037 | out: | 5044 | out: |
5038 | return ret; | 5045 | return ret; |
@@ -5595,6 +5602,12 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
5595 | /* Notify protocols, that a new device appeared. */ | 5602 | /* Notify protocols, that a new device appeared. */ |
5596 | call_netdevice_notifiers(NETDEV_REGISTER, dev); | 5603 | call_netdevice_notifiers(NETDEV_REGISTER, dev); |
5597 | 5604 | ||
5605 | /* | ||
5606 | * Prevent userspace races by waiting until the network | ||
5607 | * device is fully setup before sending notifications. | ||
5608 | */ | ||
5609 | rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | ||
5610 | |||
5598 | synchronize_net(); | 5611 | synchronize_net(); |
5599 | err = 0; | 5612 | err = 0; |
5600 | out: | 5613 | out: |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 33148a568199..794bcb897ff0 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -1364,15 +1364,15 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi | |||
1364 | case NETDEV_UNREGISTER: | 1364 | case NETDEV_UNREGISTER: |
1365 | rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); | 1365 | rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); |
1366 | break; | 1366 | break; |
1367 | case NETDEV_REGISTER: | ||
1368 | rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | ||
1369 | break; | ||
1370 | case NETDEV_UP: | 1367 | case NETDEV_UP: |
1371 | case NETDEV_DOWN: | 1368 | case NETDEV_DOWN: |
1372 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); | 1369 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); |
1373 | break; | 1370 | break; |
1371 | case NETDEV_POST_INIT: | ||
1372 | case NETDEV_REGISTER: | ||
1374 | case NETDEV_CHANGE: | 1373 | case NETDEV_CHANGE: |
1375 | case NETDEV_GOING_DOWN: | 1374 | case NETDEV_GOING_DOWN: |
1375 | case NETDEV_UNREGISTER_BATCH: | ||
1376 | break; | 1376 | break; |
1377 | default: | 1377 | default: |
1378 | rtmsg_ifinfo(RTM_NEWLINK, dev, 0); | 1378 | rtmsg_ifinfo(RTM_NEWLINK, dev, 0); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index bfa3e7865a8c..93c4e060c91e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -93,7 +93,7 @@ static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, | |||
93 | 93 | ||
94 | 94 | ||
95 | /* Pipe buffer operations for a socket. */ | 95 | /* Pipe buffer operations for a socket. */ |
96 | static struct pipe_buf_operations sock_pipe_buf_ops = { | 96 | static const struct pipe_buf_operations sock_pipe_buf_ops = { |
97 | .can_merge = 0, | 97 | .can_merge = 0, |
98 | .map = generic_pipe_buf_map, | 98 | .map = generic_pipe_buf_map, |
99 | .unmap = generic_pipe_buf_unmap, | 99 | .unmap = generic_pipe_buf_unmap, |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index efbcfdc12796..dad7bc4878e0 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -408,7 +408,7 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
408 | 408 | ||
409 | dccp_sync_mss(newsk, dst_mtu(dst)); | 409 | dccp_sync_mss(newsk, dst_mtu(dst)); |
410 | 410 | ||
411 | __inet_hash_nolisten(newsk); | 411 | __inet_hash_nolisten(newsk, NULL); |
412 | __inet_inherit_port(sk, newsk); | 412 | __inet_inherit_port(sk, newsk); |
413 | 413 | ||
414 | return newsk; | 414 | return newsk; |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 6574215a1f51..baf05cf43c28 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -46,7 +46,7 @@ static void dccp_v6_hash(struct sock *sk) | |||
46 | return; | 46 | return; |
47 | } | 47 | } |
48 | local_bh_disable(); | 48 | local_bh_disable(); |
49 | __inet6_hash(sk); | 49 | __inet6_hash(sk, NULL); |
50 | local_bh_enable(); | 50 | local_bh_enable(); |
51 | } | 51 | } |
52 | } | 52 | } |
@@ -644,7 +644,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, | |||
644 | newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; | 644 | newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; |
645 | newinet->inet_rcv_saddr = LOOPBACK4_IPV6; | 645 | newinet->inet_rcv_saddr = LOOPBACK4_IPV6; |
646 | 646 | ||
647 | __inet6_hash(newsk); | 647 | __inet6_hash(newsk, NULL); |
648 | __inet_inherit_port(sk, newsk); | 648 | __inet_inherit_port(sk, newsk); |
649 | 649 | ||
650 | return newsk; | 650 | return newsk; |
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 70491d9035eb..0c94a1ac2946 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -166,7 +166,7 @@ config IP_PNP_DHCP | |||
166 | 166 | ||
167 | If unsure, say Y. Note that if you want to use DHCP, a DHCP server | 167 | If unsure, say Y. Note that if you want to use DHCP, a DHCP server |
168 | must be operating on your network. Read | 168 | must be operating on your network. Read |
169 | <file:Documentation/filesystems/nfsroot.txt> for details. | 169 | <file:Documentation/filesystems/nfs/nfsroot.txt> for details. |
170 | 170 | ||
171 | config IP_PNP_BOOTP | 171 | config IP_PNP_BOOTP |
172 | bool "IP: BOOTP support" | 172 | bool "IP: BOOTP support" |
@@ -181,7 +181,7 @@ config IP_PNP_BOOTP | |||
181 | does BOOTP itself, providing all necessary information on the kernel | 181 | does BOOTP itself, providing all necessary information on the kernel |
182 | command line, you can say N here. If unsure, say Y. Note that if you | 182 | command line, you can say N here. If unsure, say Y. Note that if you |
183 | want to use BOOTP, a BOOTP server must be operating on your network. | 183 | want to use BOOTP, a BOOTP server must be operating on your network. |
184 | Read <file:Documentation/filesystems/nfsroot.txt> for details. | 184 | Read <file:Documentation/filesystems/nfs/nfsroot.txt> for details. |
185 | 185 | ||
186 | config IP_PNP_RARP | 186 | config IP_PNP_RARP |
187 | bool "IP: RARP support" | 187 | bool "IP: RARP support" |
@@ -194,7 +194,7 @@ config IP_PNP_RARP | |||
194 | older protocol which is being obsoleted by BOOTP and DHCP), say Y | 194 | older protocol which is being obsoleted by BOOTP and DHCP), say Y |
195 | here. Note that if you want to use RARP, a RARP server must be | 195 | here. Note that if you want to use RARP, a RARP server must be |
196 | operating on your network. Read | 196 | operating on your network. Read |
197 | <file:Documentation/filesystems/nfsroot.txt> for details. | 197 | <file:Documentation/filesystems/nfs/nfsroot.txt> for details. |
198 | 198 | ||
199 | # not yet ready.. | 199 | # not yet ready.. |
200 | # bool ' IP: ARP support' CONFIG_IP_PNP_ARP | 200 | # bool ' IP: ARP support' CONFIG_IP_PNP_ARP |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 21e5e32d8c60..2b79377b468d 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -351,12 +351,13 @@ static inline u32 inet_sk_port_offset(const struct sock *sk) | |||
351 | inet->inet_dport); | 351 | inet->inet_dport); |
352 | } | 352 | } |
353 | 353 | ||
354 | void __inet_hash_nolisten(struct sock *sk) | 354 | int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw) |
355 | { | 355 | { |
356 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; | 356 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; |
357 | struct hlist_nulls_head *list; | 357 | struct hlist_nulls_head *list; |
358 | spinlock_t *lock; | 358 | spinlock_t *lock; |
359 | struct inet_ehash_bucket *head; | 359 | struct inet_ehash_bucket *head; |
360 | int twrefcnt = 0; | ||
360 | 361 | ||
361 | WARN_ON(!sk_unhashed(sk)); | 362 | WARN_ON(!sk_unhashed(sk)); |
362 | 363 | ||
@@ -367,8 +368,13 @@ void __inet_hash_nolisten(struct sock *sk) | |||
367 | 368 | ||
368 | spin_lock(lock); | 369 | spin_lock(lock); |
369 | __sk_nulls_add_node_rcu(sk, list); | 370 | __sk_nulls_add_node_rcu(sk, list); |
371 | if (tw) { | ||
372 | WARN_ON(sk->sk_hash != tw->tw_hash); | ||
373 | twrefcnt = inet_twsk_unhash(tw); | ||
374 | } | ||
370 | spin_unlock(lock); | 375 | spin_unlock(lock); |
371 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | 376 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); |
377 | return twrefcnt; | ||
372 | } | 378 | } |
373 | EXPORT_SYMBOL_GPL(__inet_hash_nolisten); | 379 | EXPORT_SYMBOL_GPL(__inet_hash_nolisten); |
374 | 380 | ||
@@ -378,7 +384,7 @@ static void __inet_hash(struct sock *sk) | |||
378 | struct inet_listen_hashbucket *ilb; | 384 | struct inet_listen_hashbucket *ilb; |
379 | 385 | ||
380 | if (sk->sk_state != TCP_LISTEN) { | 386 | if (sk->sk_state != TCP_LISTEN) { |
381 | __inet_hash_nolisten(sk); | 387 | __inet_hash_nolisten(sk, NULL); |
382 | return; | 388 | return; |
383 | } | 389 | } |
384 | 390 | ||
@@ -427,7 +433,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
427 | struct sock *sk, u32 port_offset, | 433 | struct sock *sk, u32 port_offset, |
428 | int (*check_established)(struct inet_timewait_death_row *, | 434 | int (*check_established)(struct inet_timewait_death_row *, |
429 | struct sock *, __u16, struct inet_timewait_sock **), | 435 | struct sock *, __u16, struct inet_timewait_sock **), |
430 | void (*hash)(struct sock *sk)) | 436 | int (*hash)(struct sock *sk, struct inet_timewait_sock *twp)) |
431 | { | 437 | { |
432 | struct inet_hashinfo *hinfo = death_row->hashinfo; | 438 | struct inet_hashinfo *hinfo = death_row->hashinfo; |
433 | const unsigned short snum = inet_sk(sk)->inet_num; | 439 | const unsigned short snum = inet_sk(sk)->inet_num; |
@@ -435,6 +441,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
435 | struct inet_bind_bucket *tb; | 441 | struct inet_bind_bucket *tb; |
436 | int ret; | 442 | int ret; |
437 | struct net *net = sock_net(sk); | 443 | struct net *net = sock_net(sk); |
444 | int twrefcnt = 1; | ||
438 | 445 | ||
439 | if (!snum) { | 446 | if (!snum) { |
440 | int i, remaining, low, high, port; | 447 | int i, remaining, low, high, port; |
@@ -493,13 +500,18 @@ ok: | |||
493 | inet_bind_hash(sk, tb, port); | 500 | inet_bind_hash(sk, tb, port); |
494 | if (sk_unhashed(sk)) { | 501 | if (sk_unhashed(sk)) { |
495 | inet_sk(sk)->inet_sport = htons(port); | 502 | inet_sk(sk)->inet_sport = htons(port); |
496 | hash(sk); | 503 | twrefcnt += hash(sk, tw); |
497 | } | 504 | } |
505 | if (tw) | ||
506 | twrefcnt += inet_twsk_bind_unhash(tw, hinfo); | ||
498 | spin_unlock(&head->lock); | 507 | spin_unlock(&head->lock); |
499 | 508 | ||
500 | if (tw) { | 509 | if (tw) { |
501 | inet_twsk_deschedule(tw, death_row); | 510 | inet_twsk_deschedule(tw, death_row); |
502 | inet_twsk_put(tw); | 511 | while (twrefcnt) { |
512 | twrefcnt--; | ||
513 | inet_twsk_put(tw); | ||
514 | } | ||
503 | } | 515 | } |
504 | 516 | ||
505 | ret = 0; | 517 | ret = 0; |
@@ -510,7 +522,7 @@ ok: | |||
510 | tb = inet_csk(sk)->icsk_bind_hash; | 522 | tb = inet_csk(sk)->icsk_bind_hash; |
511 | spin_lock_bh(&head->lock); | 523 | spin_lock_bh(&head->lock); |
512 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { | 524 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { |
513 | hash(sk); | 525 | hash(sk, NULL); |
514 | spin_unlock_bh(&head->lock); | 526 | spin_unlock_bh(&head->lock); |
515 | return 0; | 527 | return 0; |
516 | } else { | 528 | } else { |
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 0fdf45e4c90c..cc94cc2d8b2d 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -15,9 +15,13 @@ | |||
15 | #include <net/ip.h> | 15 | #include <net/ip.h> |
16 | 16 | ||
17 | 17 | ||
18 | /* | 18 | /** |
19 | * unhash a timewait socket from established hash | 19 | * inet_twsk_unhash - unhash a timewait socket from established hash |
20 | * lock must be hold by caller | 20 | * @tw: timewait socket |
21 | * | ||
22 | * unhash a timewait socket from established hash, if hashed. | ||
23 | * ehash lock must be held by caller. | ||
24 | * Returns 1 if caller should call inet_twsk_put() after lock release. | ||
21 | */ | 25 | */ |
22 | int inet_twsk_unhash(struct inet_timewait_sock *tw) | 26 | int inet_twsk_unhash(struct inet_timewait_sock *tw) |
23 | { | 27 | { |
@@ -26,6 +30,37 @@ int inet_twsk_unhash(struct inet_timewait_sock *tw) | |||
26 | 30 | ||
27 | hlist_nulls_del_rcu(&tw->tw_node); | 31 | hlist_nulls_del_rcu(&tw->tw_node); |
28 | sk_nulls_node_init(&tw->tw_node); | 32 | sk_nulls_node_init(&tw->tw_node); |
33 | /* | ||
34 | * We cannot call inet_twsk_put() ourself under lock, | ||
35 | * caller must call it for us. | ||
36 | */ | ||
37 | return 1; | ||
38 | } | ||
39 | |||
40 | /** | ||
41 | * inet_twsk_bind_unhash - unhash a timewait socket from bind hash | ||
42 | * @tw: timewait socket | ||
43 | * @hashinfo: hashinfo pointer | ||
44 | * | ||
45 | * unhash a timewait socket from bind hash, if hashed. | ||
46 | * bind hash lock must be held by caller. | ||
47 | * Returns 1 if caller should call inet_twsk_put() after lock release. | ||
48 | */ | ||
49 | int inet_twsk_bind_unhash(struct inet_timewait_sock *tw, | ||
50 | struct inet_hashinfo *hashinfo) | ||
51 | { | ||
52 | struct inet_bind_bucket *tb = tw->tw_tb; | ||
53 | |||
54 | if (!tb) | ||
55 | return 0; | ||
56 | |||
57 | __hlist_del(&tw->tw_bind_node); | ||
58 | tw->tw_tb = NULL; | ||
59 | inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); | ||
60 | /* | ||
61 | * We cannot call inet_twsk_put() ourself under lock, | ||
62 | * caller must call it for us. | ||
63 | */ | ||
29 | return 1; | 64 | return 1; |
30 | } | 65 | } |
31 | 66 | ||
@@ -34,7 +69,6 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw, | |||
34 | struct inet_hashinfo *hashinfo) | 69 | struct inet_hashinfo *hashinfo) |
35 | { | 70 | { |
36 | struct inet_bind_hashbucket *bhead; | 71 | struct inet_bind_hashbucket *bhead; |
37 | struct inet_bind_bucket *tb; | ||
38 | int refcnt; | 72 | int refcnt; |
39 | /* Unlink from established hashes. */ | 73 | /* Unlink from established hashes. */ |
40 | spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); | 74 | spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); |
@@ -46,15 +80,11 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw, | |||
46 | /* Disassociate with bind bucket. */ | 80 | /* Disassociate with bind bucket. */ |
47 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, | 81 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, |
48 | hashinfo->bhash_size)]; | 82 | hashinfo->bhash_size)]; |
83 | |||
49 | spin_lock(&bhead->lock); | 84 | spin_lock(&bhead->lock); |
50 | tb = tw->tw_tb; | 85 | refcnt += inet_twsk_bind_unhash(tw, hashinfo); |
51 | if (tb) { | ||
52 | __hlist_del(&tw->tw_bind_node); | ||
53 | tw->tw_tb = NULL; | ||
54 | inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); | ||
55 | refcnt++; | ||
56 | } | ||
57 | spin_unlock(&bhead->lock); | 86 | spin_unlock(&bhead->lock); |
87 | |||
58 | #ifdef SOCK_REFCNT_DEBUG | 88 | #ifdef SOCK_REFCNT_DEBUG |
59 | if (atomic_read(&tw->tw_refcnt) != 1) { | 89 | if (atomic_read(&tw->tw_refcnt) != 1) { |
60 | printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n", | 90 | printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n", |
@@ -126,7 +156,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |||
126 | 156 | ||
127 | /* | 157 | /* |
128 | * Notes : | 158 | * Notes : |
129 | * - We initially set tw_refcnt to 0 in inet_twsk_alloc() | 159 | * - We initially set tw_refcnt to 0 in inet_twsk_alloc() |
130 | * - We add one reference for the bhash link | 160 | * - We add one reference for the bhash link |
131 | * - We add one reference for the ehash link | 161 | * - We add one reference for the ehash link |
132 | * - We want this refcnt update done before allowing other | 162 | * - We want this refcnt update done before allowing other |
@@ -136,7 +166,6 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |||
136 | 166 | ||
137 | spin_unlock(lock); | 167 | spin_unlock(lock); |
138 | } | 168 | } |
139 | |||
140 | EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); | 169 | EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); |
141 | 170 | ||
142 | struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state) | 171 | struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state) |
@@ -177,7 +206,6 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat | |||
177 | 206 | ||
178 | return tw; | 207 | return tw; |
179 | } | 208 | } |
180 | |||
181 | EXPORT_SYMBOL_GPL(inet_twsk_alloc); | 209 | EXPORT_SYMBOL_GPL(inet_twsk_alloc); |
182 | 210 | ||
183 | /* Returns non-zero if quota exceeded. */ | 211 | /* Returns non-zero if quota exceeded. */ |
@@ -256,7 +284,6 @@ void inet_twdr_hangman(unsigned long data) | |||
256 | out: | 284 | out: |
257 | spin_unlock(&twdr->death_lock); | 285 | spin_unlock(&twdr->death_lock); |
258 | } | 286 | } |
259 | |||
260 | EXPORT_SYMBOL_GPL(inet_twdr_hangman); | 287 | EXPORT_SYMBOL_GPL(inet_twdr_hangman); |
261 | 288 | ||
262 | void inet_twdr_twkill_work(struct work_struct *work) | 289 | void inet_twdr_twkill_work(struct work_struct *work) |
@@ -287,7 +314,6 @@ void inet_twdr_twkill_work(struct work_struct *work) | |||
287 | spin_unlock_bh(&twdr->death_lock); | 314 | spin_unlock_bh(&twdr->death_lock); |
288 | } | 315 | } |
289 | } | 316 | } |
290 | |||
291 | EXPORT_SYMBOL_GPL(inet_twdr_twkill_work); | 317 | EXPORT_SYMBOL_GPL(inet_twdr_twkill_work); |
292 | 318 | ||
293 | /* These are always called from BH context. See callers in | 319 | /* These are always called from BH context. See callers in |
@@ -307,7 +333,6 @@ void inet_twsk_deschedule(struct inet_timewait_sock *tw, | |||
307 | spin_unlock(&twdr->death_lock); | 333 | spin_unlock(&twdr->death_lock); |
308 | __inet_twsk_kill(tw, twdr->hashinfo); | 334 | __inet_twsk_kill(tw, twdr->hashinfo); |
309 | } | 335 | } |
310 | |||
311 | EXPORT_SYMBOL(inet_twsk_deschedule); | 336 | EXPORT_SYMBOL(inet_twsk_deschedule); |
312 | 337 | ||
313 | void inet_twsk_schedule(struct inet_timewait_sock *tw, | 338 | void inet_twsk_schedule(struct inet_timewait_sock *tw, |
@@ -388,7 +413,6 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw, | |||
388 | mod_timer(&twdr->tw_timer, jiffies + twdr->period); | 413 | mod_timer(&twdr->tw_timer, jiffies + twdr->period); |
389 | spin_unlock(&twdr->death_lock); | 414 | spin_unlock(&twdr->death_lock); |
390 | } | 415 | } |
391 | |||
392 | EXPORT_SYMBOL_GPL(inet_twsk_schedule); | 416 | EXPORT_SYMBOL_GPL(inet_twsk_schedule); |
393 | 417 | ||
394 | void inet_twdr_twcal_tick(unsigned long data) | 418 | void inet_twdr_twcal_tick(unsigned long data) |
@@ -449,7 +473,6 @@ out: | |||
449 | #endif | 473 | #endif |
450 | spin_unlock(&twdr->death_lock); | 474 | spin_unlock(&twdr->death_lock); |
451 | } | 475 | } |
452 | |||
453 | EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); | 476 | EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); |
454 | 477 | ||
455 | void inet_twsk_purge(struct inet_hashinfo *hashinfo, | 478 | void inet_twsk_purge(struct inet_hashinfo *hashinfo, |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 4e08b7f2331c..10a6a604bf32 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -1446,7 +1446,7 @@ late_initcall(ip_auto_config); | |||
1446 | 1446 | ||
1447 | /* | 1447 | /* |
1448 | * Decode any IP configuration options in the "ip=" or "nfsaddrs=" kernel | 1448 | * Decode any IP configuration options in the "ip=" or "nfsaddrs=" kernel |
1449 | * command line parameter. See Documentation/filesystems/nfsroot.txt. | 1449 | * command line parameter. See Documentation/filesystems/nfs/nfsroot.txt. |
1450 | */ | 1450 | */ |
1451 | static int __init ic_proto_name(char *name) | 1451 | static int __init ic_proto_name(char *name) |
1452 | { | 1452 | { |
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c index fa2d6b6fc3e5..331ead3ebd1b 100644 --- a/net/ipv4/netfilter/nf_defrag_ipv4.c +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <net/route.h> | 14 | #include <net/route.h> |
15 | #include <net/ip.h> | 15 | #include <net/ip.h> |
16 | 16 | ||
17 | #include <linux/netfilter_bridge.h> | ||
17 | #include <linux/netfilter_ipv4.h> | 18 | #include <linux/netfilter_ipv4.h> |
18 | #include <net/netfilter/ipv4/nf_defrag_ipv4.h> | 19 | #include <net/netfilter/ipv4/nf_defrag_ipv4.h> |
19 | 20 | ||
@@ -34,6 +35,20 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) | |||
34 | return err; | 35 | return err; |
35 | } | 36 | } |
36 | 37 | ||
38 | static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum, | ||
39 | struct sk_buff *skb) | ||
40 | { | ||
41 | #ifdef CONFIG_BRIDGE_NETFILTER | ||
42 | if (skb->nf_bridge && | ||
43 | skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) | ||
44 | return IP_DEFRAG_CONNTRACK_BRIDGE_IN; | ||
45 | #endif | ||
46 | if (hooknum == NF_INET_PRE_ROUTING) | ||
47 | return IP_DEFRAG_CONNTRACK_IN; | ||
48 | else | ||
49 | return IP_DEFRAG_CONNTRACK_OUT; | ||
50 | } | ||
51 | |||
37 | static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, | 52 | static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, |
38 | struct sk_buff *skb, | 53 | struct sk_buff *skb, |
39 | const struct net_device *in, | 54 | const struct net_device *in, |
@@ -50,10 +65,8 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, | |||
50 | #endif | 65 | #endif |
51 | /* Gather fragments. */ | 66 | /* Gather fragments. */ |
52 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { | 67 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { |
53 | if (nf_ct_ipv4_gather_frags(skb, | 68 | enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb); |
54 | hooknum == NF_INET_PRE_ROUTING ? | 69 | if (nf_ct_ipv4_gather_frags(skb, user)) |
55 | IP_DEFRAG_CONNTRACK_IN : | ||
56 | IP_DEFRAG_CONNTRACK_OUT)) | ||
57 | return NF_STOLEN; | 70 | return NF_STOLEN; |
58 | } | 71 | } |
59 | return NF_ACCEPT; | 72 | return NF_ACCEPT; |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 26399ad2a289..66fd80ef2473 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -277,6 +277,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
277 | 277 | ||
278 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); | 278 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); |
279 | 279 | ||
280 | /* check for timestamp cookie support */ | ||
281 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | ||
282 | tcp_parse_options(skb, &tcp_opt, &hash_location, 0); | ||
283 | |||
284 | if (tcp_opt.saw_tstamp) | ||
285 | cookie_check_timestamp(&tcp_opt); | ||
286 | |||
280 | ret = NULL; | 287 | ret = NULL; |
281 | req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ | 288 | req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ |
282 | if (!req) | 289 | if (!req) |
@@ -292,6 +299,12 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
292 | ireq->loc_addr = ip_hdr(skb)->daddr; | 299 | ireq->loc_addr = ip_hdr(skb)->daddr; |
293 | ireq->rmt_addr = ip_hdr(skb)->saddr; | 300 | ireq->rmt_addr = ip_hdr(skb)->saddr; |
294 | ireq->ecn_ok = 0; | 301 | ireq->ecn_ok = 0; |
302 | ireq->snd_wscale = tcp_opt.snd_wscale; | ||
303 | ireq->rcv_wscale = tcp_opt.rcv_wscale; | ||
304 | ireq->sack_ok = tcp_opt.sack_ok; | ||
305 | ireq->wscale_ok = tcp_opt.wscale_ok; | ||
306 | ireq->tstamp_ok = tcp_opt.saw_tstamp; | ||
307 | req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; | ||
295 | 308 | ||
296 | /* We throwed the options of the initial SYN away, so we hope | 309 | /* We throwed the options of the initial SYN away, so we hope |
297 | * the ACK carries the same options again (see RFC1122 4.2.3.8) | 310 | * the ACK carries the same options again (see RFC1122 4.2.3.8) |
@@ -340,20 +353,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
340 | } | 353 | } |
341 | } | 354 | } |
342 | 355 | ||
343 | /* check for timestamp cookie support */ | ||
344 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | ||
345 | tcp_parse_options(skb, &tcp_opt, &hash_location, 0, &rt->u.dst); | ||
346 | |||
347 | if (tcp_opt.saw_tstamp) | ||
348 | cookie_check_timestamp(&tcp_opt); | ||
349 | |||
350 | ireq->snd_wscale = tcp_opt.snd_wscale; | ||
351 | ireq->rcv_wscale = tcp_opt.rcv_wscale; | ||
352 | ireq->sack_ok = tcp_opt.sack_ok; | ||
353 | ireq->wscale_ok = tcp_opt.wscale_ok; | ||
354 | ireq->tstamp_ok = tcp_opt.saw_tstamp; | ||
355 | req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; | ||
356 | |||
357 | /* Try to redo what tcp_v4_send_synack did. */ | 356 | /* Try to redo what tcp_v4_send_synack did. */ |
358 | req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); | 357 | req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); |
359 | 358 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c8666b70cde0..b0a26bb25e2e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2540,11 +2540,6 @@ static int do_tcp_getsockopt(struct sock *sk, int level, | |||
2540 | ctd.tcpct_cookie_desired = cvp->cookie_desired; | 2540 | ctd.tcpct_cookie_desired = cvp->cookie_desired; |
2541 | ctd.tcpct_s_data_desired = cvp->s_data_desired; | 2541 | ctd.tcpct_s_data_desired = cvp->s_data_desired; |
2542 | 2542 | ||
2543 | /* Cookie(s) saved, return as nonce */ | ||
2544 | if (sizeof(ctd.tcpct_value) < cvp->cookie_pair_size) { | ||
2545 | /* impossible? */ | ||
2546 | return -EINVAL; | ||
2547 | } | ||
2548 | memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0], | 2543 | memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0], |
2549 | cvp->cookie_pair_size); | 2544 | cvp->cookie_pair_size); |
2550 | ctd.tcpct_used = cvp->cookie_pair_size; | 2545 | ctd.tcpct_used = cvp->cookie_pair_size; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 57ae96a04220..28e029632493 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2717,6 +2717,35 @@ static void tcp_try_undo_dsack(struct sock *sk) | |||
2717 | } | 2717 | } |
2718 | } | 2718 | } |
2719 | 2719 | ||
2720 | /* We can clear retrans_stamp when there are no retransmissions in the | ||
2721 | * window. It would seem that it is trivially available for us in | ||
2722 | * tp->retrans_out, however, that kind of assumptions doesn't consider | ||
2723 | * what will happen if errors occur when sending retransmission for the | ||
2724 | * second time. ...It could the that such segment has only | ||
2725 | * TCPCB_EVER_RETRANS set at the present time. It seems that checking | ||
2726 | * the head skb is enough except for some reneging corner cases that | ||
2727 | * are not worth the effort. | ||
2728 | * | ||
2729 | * Main reason for all this complexity is the fact that connection dying | ||
2730 | * time now depends on the validity of the retrans_stamp, in particular, | ||
2731 | * that successive retransmissions of a segment must not advance | ||
2732 | * retrans_stamp under any conditions. | ||
2733 | */ | ||
2734 | static int tcp_any_retrans_done(struct sock *sk) | ||
2735 | { | ||
2736 | struct tcp_sock *tp = tcp_sk(sk); | ||
2737 | struct sk_buff *skb; | ||
2738 | |||
2739 | if (tp->retrans_out) | ||
2740 | return 1; | ||
2741 | |||
2742 | skb = tcp_write_queue_head(sk); | ||
2743 | if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) | ||
2744 | return 1; | ||
2745 | |||
2746 | return 0; | ||
2747 | } | ||
2748 | |||
2720 | /* Undo during fast recovery after partial ACK. */ | 2749 | /* Undo during fast recovery after partial ACK. */ |
2721 | 2750 | ||
2722 | static int tcp_try_undo_partial(struct sock *sk, int acked) | 2751 | static int tcp_try_undo_partial(struct sock *sk, int acked) |
@@ -2729,7 +2758,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked) | |||
2729 | /* Plain luck! Hole if filled with delayed | 2758 | /* Plain luck! Hole if filled with delayed |
2730 | * packet, rather than with a retransmit. | 2759 | * packet, rather than with a retransmit. |
2731 | */ | 2760 | */ |
2732 | if (tp->retrans_out == 0) | 2761 | if (!tcp_any_retrans_done(sk)) |
2733 | tp->retrans_stamp = 0; | 2762 | tp->retrans_stamp = 0; |
2734 | 2763 | ||
2735 | tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); | 2764 | tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); |
@@ -2788,7 +2817,7 @@ static void tcp_try_keep_open(struct sock *sk) | |||
2788 | struct tcp_sock *tp = tcp_sk(sk); | 2817 | struct tcp_sock *tp = tcp_sk(sk); |
2789 | int state = TCP_CA_Open; | 2818 | int state = TCP_CA_Open; |
2790 | 2819 | ||
2791 | if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker) | 2820 | if (tcp_left_out(tp) || tcp_any_retrans_done(sk) || tp->undo_marker) |
2792 | state = TCP_CA_Disorder; | 2821 | state = TCP_CA_Disorder; |
2793 | 2822 | ||
2794 | if (inet_csk(sk)->icsk_ca_state != state) { | 2823 | if (inet_csk(sk)->icsk_ca_state != state) { |
@@ -2803,7 +2832,7 @@ static void tcp_try_to_open(struct sock *sk, int flag) | |||
2803 | 2832 | ||
2804 | tcp_verify_left_out(tp); | 2833 | tcp_verify_left_out(tp); |
2805 | 2834 | ||
2806 | if (!tp->frto_counter && tp->retrans_out == 0) | 2835 | if (!tp->frto_counter && !tcp_any_retrans_done(sk)) |
2807 | tp->retrans_stamp = 0; | 2836 | tp->retrans_stamp = 0; |
2808 | 2837 | ||
2809 | if (flag & FLAG_ECE) | 2838 | if (flag & FLAG_ECE) |
@@ -3698,7 +3727,7 @@ old_ack: | |||
3698 | * the fast version below fails. | 3727 | * the fast version below fails. |
3699 | */ | 3728 | */ |
3700 | void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | 3729 | void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, |
3701 | u8 **hvpp, int estab, struct dst_entry *dst) | 3730 | u8 **hvpp, int estab) |
3702 | { | 3731 | { |
3703 | unsigned char *ptr; | 3732 | unsigned char *ptr; |
3704 | struct tcphdr *th = tcp_hdr(skb); | 3733 | struct tcphdr *th = tcp_hdr(skb); |
@@ -3737,8 +3766,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
3737 | break; | 3766 | break; |
3738 | case TCPOPT_WINDOW: | 3767 | case TCPOPT_WINDOW: |
3739 | if (opsize == TCPOLEN_WINDOW && th->syn && | 3768 | if (opsize == TCPOLEN_WINDOW && th->syn && |
3740 | !estab && sysctl_tcp_window_scaling && | 3769 | !estab && sysctl_tcp_window_scaling) { |
3741 | !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)) { | ||
3742 | __u8 snd_wscale = *(__u8 *)ptr; | 3770 | __u8 snd_wscale = *(__u8 *)ptr; |
3743 | opt_rx->wscale_ok = 1; | 3771 | opt_rx->wscale_ok = 1; |
3744 | if (snd_wscale > 14) { | 3772 | if (snd_wscale > 14) { |
@@ -3754,8 +3782,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
3754 | case TCPOPT_TIMESTAMP: | 3782 | case TCPOPT_TIMESTAMP: |
3755 | if ((opsize == TCPOLEN_TIMESTAMP) && | 3783 | if ((opsize == TCPOLEN_TIMESTAMP) && |
3756 | ((estab && opt_rx->tstamp_ok) || | 3784 | ((estab && opt_rx->tstamp_ok) || |
3757 | (!estab && sysctl_tcp_timestamps && | 3785 | (!estab && sysctl_tcp_timestamps))) { |
3758 | !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP)))) { | ||
3759 | opt_rx->saw_tstamp = 1; | 3786 | opt_rx->saw_tstamp = 1; |
3760 | opt_rx->rcv_tsval = get_unaligned_be32(ptr); | 3787 | opt_rx->rcv_tsval = get_unaligned_be32(ptr); |
3761 | opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); | 3788 | opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); |
@@ -3763,8 +3790,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
3763 | break; | 3790 | break; |
3764 | case TCPOPT_SACK_PERM: | 3791 | case TCPOPT_SACK_PERM: |
3765 | if (opsize == TCPOLEN_SACK_PERM && th->syn && | 3792 | if (opsize == TCPOLEN_SACK_PERM && th->syn && |
3766 | !estab && sysctl_tcp_sack && | 3793 | !estab && sysctl_tcp_sack) { |
3767 | !dst_feature(dst, RTAX_FEATURE_NO_SACK)) { | ||
3768 | opt_rx->sack_ok = 1; | 3794 | opt_rx->sack_ok = 1; |
3769 | tcp_sack_reset(opt_rx); | 3795 | tcp_sack_reset(opt_rx); |
3770 | } | 3796 | } |
@@ -3849,7 +3875,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, | |||
3849 | if (tcp_parse_aligned_timestamp(tp, th)) | 3875 | if (tcp_parse_aligned_timestamp(tp, th)) |
3850 | return 1; | 3876 | return 1; |
3851 | } | 3877 | } |
3852 | tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL); | 3878 | tcp_parse_options(skb, &tp->rx_opt, hvpp, 1); |
3853 | return 1; | 3879 | return 1; |
3854 | } | 3880 | } |
3855 | 3881 | ||
@@ -4104,10 +4130,8 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, | |||
4104 | static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) | 4130 | static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) |
4105 | { | 4131 | { |
4106 | struct tcp_sock *tp = tcp_sk(sk); | 4132 | struct tcp_sock *tp = tcp_sk(sk); |
4107 | struct dst_entry *dst = __sk_dst_get(sk); | ||
4108 | 4133 | ||
4109 | if (tcp_is_sack(tp) && sysctl_tcp_dsack && | 4134 | if (tcp_is_sack(tp) && sysctl_tcp_dsack) { |
4110 | !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) { | ||
4111 | int mib_idx; | 4135 | int mib_idx; |
4112 | 4136 | ||
4113 | if (before(seq, tp->rcv_nxt)) | 4137 | if (before(seq, tp->rcv_nxt)) |
@@ -4136,15 +4160,13 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) | |||
4136 | static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) | 4160 | static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) |
4137 | { | 4161 | { |
4138 | struct tcp_sock *tp = tcp_sk(sk); | 4162 | struct tcp_sock *tp = tcp_sk(sk); |
4139 | struct dst_entry *dst = __sk_dst_get(sk); | ||
4140 | 4163 | ||
4141 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && | 4164 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && |
4142 | before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | 4165 | before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { |
4143 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); | 4166 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); |
4144 | tcp_enter_quickack_mode(sk); | 4167 | tcp_enter_quickack_mode(sk); |
4145 | 4168 | ||
4146 | if (tcp_is_sack(tp) && sysctl_tcp_dsack && | 4169 | if (tcp_is_sack(tp) && sysctl_tcp_dsack) { |
4147 | !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) { | ||
4148 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; | 4170 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; |
4149 | 4171 | ||
4150 | if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) | 4172 | if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) |
@@ -5399,11 +5421,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
5399 | u8 *hash_location; | 5421 | u8 *hash_location; |
5400 | struct inet_connection_sock *icsk = inet_csk(sk); | 5422 | struct inet_connection_sock *icsk = inet_csk(sk); |
5401 | struct tcp_sock *tp = tcp_sk(sk); | 5423 | struct tcp_sock *tp = tcp_sk(sk); |
5402 | struct dst_entry *dst = __sk_dst_get(sk); | ||
5403 | struct tcp_cookie_values *cvp = tp->cookie_values; | 5424 | struct tcp_cookie_values *cvp = tp->cookie_values; |
5404 | int saved_clamp = tp->rx_opt.mss_clamp; | 5425 | int saved_clamp = tp->rx_opt.mss_clamp; |
5405 | 5426 | ||
5406 | tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, dst); | 5427 | tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0); |
5407 | 5428 | ||
5408 | if (th->ack) { | 5429 | if (th->ack) { |
5409 | /* rfc793: | 5430 | /* rfc793: |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 29002ab26e0d..65b8ebfd078a 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1262,20 +1262,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1262 | tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; | 1262 | tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; |
1263 | #endif | 1263 | #endif |
1264 | 1264 | ||
1265 | ireq = inet_rsk(req); | ||
1266 | ireq->loc_addr = daddr; | ||
1267 | ireq->rmt_addr = saddr; | ||
1268 | ireq->no_srccheck = inet_sk(sk)->transparent; | ||
1269 | ireq->opt = tcp_v4_save_options(sk, skb); | ||
1270 | |||
1271 | dst = inet_csk_route_req(sk, req); | ||
1272 | if(!dst) | ||
1273 | goto drop_and_free; | ||
1274 | |||
1275 | tcp_clear_options(&tmp_opt); | 1265 | tcp_clear_options(&tmp_opt); |
1276 | tmp_opt.mss_clamp = TCP_MSS_DEFAULT; | 1266 | tmp_opt.mss_clamp = TCP_MSS_DEFAULT; |
1277 | tmp_opt.user_mss = tp->rx_opt.user_mss; | 1267 | tmp_opt.user_mss = tp->rx_opt.user_mss; |
1278 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst); | 1268 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0); |
1279 | 1269 | ||
1280 | if (tmp_opt.cookie_plus > 0 && | 1270 | if (tmp_opt.cookie_plus > 0 && |
1281 | tmp_opt.saw_tstamp && | 1271 | tmp_opt.saw_tstamp && |
@@ -1319,8 +1309,14 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1319 | tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; | 1309 | tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; |
1320 | tcp_openreq_init(req, &tmp_opt, skb); | 1310 | tcp_openreq_init(req, &tmp_opt, skb); |
1321 | 1311 | ||
1312 | ireq = inet_rsk(req); | ||
1313 | ireq->loc_addr = daddr; | ||
1314 | ireq->rmt_addr = saddr; | ||
1315 | ireq->no_srccheck = inet_sk(sk)->transparent; | ||
1316 | ireq->opt = tcp_v4_save_options(sk, skb); | ||
1317 | |||
1322 | if (security_inet_conn_request(sk, skb, req)) | 1318 | if (security_inet_conn_request(sk, skb, req)) |
1323 | goto drop_and_release; | 1319 | goto drop_and_free; |
1324 | 1320 | ||
1325 | if (!want_cookie) | 1321 | if (!want_cookie) |
1326 | TCP_ECN_create_request(req, tcp_hdr(skb)); | 1322 | TCP_ECN_create_request(req, tcp_hdr(skb)); |
@@ -1345,6 +1341,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1345 | */ | 1341 | */ |
1346 | if (tmp_opt.saw_tstamp && | 1342 | if (tmp_opt.saw_tstamp && |
1347 | tcp_death_row.sysctl_tw_recycle && | 1343 | tcp_death_row.sysctl_tw_recycle && |
1344 | (dst = inet_csk_route_req(sk, req)) != NULL && | ||
1348 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && | 1345 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && |
1349 | peer->v4daddr == saddr) { | 1346 | peer->v4daddr == saddr) { |
1350 | if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && | 1347 | if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && |
@@ -1464,7 +1461,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1464 | } | 1461 | } |
1465 | #endif | 1462 | #endif |
1466 | 1463 | ||
1467 | __inet_hash_nolisten(newsk); | 1464 | __inet_hash_nolisten(newsk, NULL); |
1468 | __inet_inherit_port(sk, newsk); | 1465 | __inet_inherit_port(sk, newsk); |
1469 | 1466 | ||
1470 | return newsk; | 1467 | return newsk; |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 87accec8d097..f206ee5dda80 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -95,9 +95,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, | |||
95 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); | 95 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); |
96 | int paws_reject = 0; | 96 | int paws_reject = 0; |
97 | 97 | ||
98 | tmp_opt.saw_tstamp = 0; | ||
98 | if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { | 99 | if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { |
99 | tmp_opt.tstamp_ok = 1; | 100 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0); |
100 | tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL); | ||
101 | 101 | ||
102 | if (tmp_opt.saw_tstamp) { | 102 | if (tmp_opt.saw_tstamp) { |
103 | tmp_opt.ts_recent = tcptw->tw_ts_recent; | 103 | tmp_opt.ts_recent = tcptw->tw_ts_recent; |
@@ -526,9 +526,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
526 | __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); | 526 | __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); |
527 | int paws_reject = 0; | 527 | int paws_reject = 0; |
528 | 528 | ||
529 | if ((th->doff > (sizeof(*th) >> 2)) && (req->ts_recent)) { | 529 | tmp_opt.saw_tstamp = 0; |
530 | tmp_opt.tstamp_ok = 1; | 530 | if (th->doff > (sizeof(struct tcphdr)>>2)) { |
531 | tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL); | 531 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0); |
532 | 532 | ||
533 | if (tmp_opt.saw_tstamp) { | 533 | if (tmp_opt.saw_tstamp) { |
534 | tmp_opt.ts_recent = req->ts_recent; | 534 | tmp_opt.ts_recent = req->ts_recent; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 93316a96d820..383ce237640f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -553,7 +553,6 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, | |||
553 | struct tcp_md5sig_key **md5) { | 553 | struct tcp_md5sig_key **md5) { |
554 | struct tcp_sock *tp = tcp_sk(sk); | 554 | struct tcp_sock *tp = tcp_sk(sk); |
555 | struct tcp_cookie_values *cvp = tp->cookie_values; | 555 | struct tcp_cookie_values *cvp = tp->cookie_values; |
556 | struct dst_entry *dst = __sk_dst_get(sk); | ||
557 | unsigned remaining = MAX_TCP_OPTION_SPACE; | 556 | unsigned remaining = MAX_TCP_OPTION_SPACE; |
558 | u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? | 557 | u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? |
559 | tcp_cookie_size_check(cvp->cookie_desired) : | 558 | tcp_cookie_size_check(cvp->cookie_desired) : |
@@ -581,22 +580,18 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, | |||
581 | opts->mss = tcp_advertise_mss(sk); | 580 | opts->mss = tcp_advertise_mss(sk); |
582 | remaining -= TCPOLEN_MSS_ALIGNED; | 581 | remaining -= TCPOLEN_MSS_ALIGNED; |
583 | 582 | ||
584 | if (likely(sysctl_tcp_timestamps && | 583 | if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { |
585 | !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) && | ||
586 | *md5 == NULL)) { | ||
587 | opts->options |= OPTION_TS; | 584 | opts->options |= OPTION_TS; |
588 | opts->tsval = TCP_SKB_CB(skb)->when; | 585 | opts->tsval = TCP_SKB_CB(skb)->when; |
589 | opts->tsecr = tp->rx_opt.ts_recent; | 586 | opts->tsecr = tp->rx_opt.ts_recent; |
590 | remaining -= TCPOLEN_TSTAMP_ALIGNED; | 587 | remaining -= TCPOLEN_TSTAMP_ALIGNED; |
591 | } | 588 | } |
592 | if (likely(sysctl_tcp_window_scaling && | 589 | if (likely(sysctl_tcp_window_scaling)) { |
593 | !dst_feature(dst, RTAX_FEATURE_NO_WSCALE))) { | ||
594 | opts->ws = tp->rx_opt.rcv_wscale; | 590 | opts->ws = tp->rx_opt.rcv_wscale; |
595 | opts->options |= OPTION_WSCALE; | 591 | opts->options |= OPTION_WSCALE; |
596 | remaining -= TCPOLEN_WSCALE_ALIGNED; | 592 | remaining -= TCPOLEN_WSCALE_ALIGNED; |
597 | } | 593 | } |
598 | if (likely(sysctl_tcp_sack && | 594 | if (likely(sysctl_tcp_sack)) { |
599 | !dst_feature(dst, RTAX_FEATURE_NO_SACK))) { | ||
600 | opts->options |= OPTION_SACK_ADVERTISE; | 595 | opts->options |= OPTION_SACK_ADVERTISE; |
601 | if (unlikely(!(OPTION_TS & opts->options))) | 596 | if (unlikely(!(OPTION_TS & opts->options))) |
602 | remaining -= TCPOLEN_SACKPERM_ALIGNED; | 597 | remaining -= TCPOLEN_SACKPERM_ALIGNED; |
@@ -2527,9 +2522,7 @@ static void tcp_connect_init(struct sock *sk) | |||
2527 | * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. | 2522 | * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. |
2528 | */ | 2523 | */ |
2529 | tp->tcp_header_len = sizeof(struct tcphdr) + | 2524 | tp->tcp_header_len = sizeof(struct tcphdr) + |
2530 | (sysctl_tcp_timestamps && | 2525 | (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); |
2531 | (!dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) ? | ||
2532 | TCPOLEN_TSTAMP_ALIGNED : 0)); | ||
2533 | 2526 | ||
2534 | #ifdef CONFIG_TCP_MD5SIG | 2527 | #ifdef CONFIG_TCP_MD5SIG |
2535 | if (tp->af_specific->md5_lookup(sk, sk) != NULL) | 2528 | if (tp->af_specific->md5_lookup(sk, sk) != NULL) |
@@ -2555,8 +2548,7 @@ static void tcp_connect_init(struct sock *sk) | |||
2555 | tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), | 2548 | tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), |
2556 | &tp->rcv_wnd, | 2549 | &tp->rcv_wnd, |
2557 | &tp->window_clamp, | 2550 | &tp->window_clamp, |
2558 | (sysctl_tcp_window_scaling && | 2551 | sysctl_tcp_window_scaling, |
2559 | !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)), | ||
2560 | &rcv_wscale); | 2552 | &rcv_wscale); |
2561 | 2553 | ||
2562 | tp->rx_opt.rcv_wscale = rcv_wscale; | 2554 | tp->rx_opt.rcv_wscale = rcv_wscale; |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 8353a538cd4c..8816a20c2597 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -132,6 +132,35 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) | |||
132 | } | 132 | } |
133 | } | 133 | } |
134 | 134 | ||
135 | /* This function calculates a "timeout" which is equivalent to the timeout of a | ||
136 | * TCP connection after "boundary" unsucessful, exponentially backed-off | ||
137 | * retransmissions with an initial RTO of TCP_RTO_MIN. | ||
138 | */ | ||
139 | static bool retransmits_timed_out(struct sock *sk, | ||
140 | unsigned int boundary) | ||
141 | { | ||
142 | unsigned int timeout, linear_backoff_thresh; | ||
143 | unsigned int start_ts; | ||
144 | |||
145 | if (!inet_csk(sk)->icsk_retransmits) | ||
146 | return false; | ||
147 | |||
148 | if (unlikely(!tcp_sk(sk)->retrans_stamp)) | ||
149 | start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when; | ||
150 | else | ||
151 | start_ts = tcp_sk(sk)->retrans_stamp; | ||
152 | |||
153 | linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); | ||
154 | |||
155 | if (boundary <= linear_backoff_thresh) | ||
156 | timeout = ((2 << boundary) - 1) * TCP_RTO_MIN; | ||
157 | else | ||
158 | timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN + | ||
159 | (boundary - linear_backoff_thresh) * TCP_RTO_MAX; | ||
160 | |||
161 | return (tcp_time_stamp - start_ts) >= timeout; | ||
162 | } | ||
163 | |||
135 | /* A write timeout has occurred. Process the after effects. */ | 164 | /* A write timeout has occurred. Process the after effects. */ |
136 | static int tcp_write_timeout(struct sock *sk) | 165 | static int tcp_write_timeout(struct sock *sk) |
137 | { | 166 | { |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1f9534846ca9..f0126fdd7e04 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -216,9 +216,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
216 | * force rand to be an odd multiple of UDP_HTABLE_SIZE | 216 | * force rand to be an odd multiple of UDP_HTABLE_SIZE |
217 | */ | 217 | */ |
218 | rand = (rand | 1) * (udptable->mask + 1); | 218 | rand = (rand | 1) * (udptable->mask + 1); |
219 | for (last = first + udptable->mask + 1; | 219 | last = first + udptable->mask + 1; |
220 | first != last; | 220 | do { |
221 | first++) { | ||
222 | hslot = udp_hashslot(udptable, net, first); | 221 | hslot = udp_hashslot(udptable, net, first); |
223 | bitmap_zero(bitmap, PORTS_PER_CHAIN); | 222 | bitmap_zero(bitmap, PORTS_PER_CHAIN); |
224 | spin_lock_bh(&hslot->lock); | 223 | spin_lock_bh(&hslot->lock); |
@@ -238,7 +237,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
238 | snum += rand; | 237 | snum += rand; |
239 | } while (snum != first); | 238 | } while (snum != first); |
240 | spin_unlock_bh(&hslot->lock); | 239 | spin_unlock_bh(&hslot->lock); |
241 | } | 240 | } while (++first != last); |
242 | goto fail; | 241 | goto fail; |
243 | } else { | 242 | } else { |
244 | hslot = udp_hashslot(udptable, net, snum); | 243 | hslot = udp_hashslot(udptable, net, snum); |
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index c813e294ec0c..633a6c266136 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c | |||
@@ -22,9 +22,10 @@ | |||
22 | #include <net/inet6_hashtables.h> | 22 | #include <net/inet6_hashtables.h> |
23 | #include <net/ip.h> | 23 | #include <net/ip.h> |
24 | 24 | ||
25 | void __inet6_hash(struct sock *sk) | 25 | int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw) |
26 | { | 26 | { |
27 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; | 27 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; |
28 | int twrefcnt = 0; | ||
28 | 29 | ||
29 | WARN_ON(!sk_unhashed(sk)); | 30 | WARN_ON(!sk_unhashed(sk)); |
30 | 31 | ||
@@ -45,10 +46,15 @@ void __inet6_hash(struct sock *sk) | |||
45 | lock = inet_ehash_lockp(hashinfo, hash); | 46 | lock = inet_ehash_lockp(hashinfo, hash); |
46 | spin_lock(lock); | 47 | spin_lock(lock); |
47 | __sk_nulls_add_node_rcu(sk, list); | 48 | __sk_nulls_add_node_rcu(sk, list); |
49 | if (tw) { | ||
50 | WARN_ON(sk->sk_hash != tw->tw_hash); | ||
51 | twrefcnt = inet_twsk_unhash(tw); | ||
52 | } | ||
48 | spin_unlock(lock); | 53 | spin_unlock(lock); |
49 | } | 54 | } |
50 | 55 | ||
51 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | 56 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); |
57 | return twrefcnt; | ||
52 | } | 58 | } |
53 | EXPORT_SYMBOL(__inet6_hash); | 59 | EXPORT_SYMBOL(__inet6_hash); |
54 | 60 | ||
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 5f2ec208a8c3..0956ebabbff2 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <net/ipv6.h> | 20 | #include <net/ipv6.h> |
21 | #include <net/inet_frag.h> | 21 | #include <net/inet_frag.h> |
22 | 22 | ||
23 | #include <linux/netfilter_bridge.h> | ||
23 | #include <linux/netfilter_ipv6.h> | 24 | #include <linux/netfilter_ipv6.h> |
24 | #include <net/netfilter/nf_conntrack.h> | 25 | #include <net/netfilter/nf_conntrack.h> |
25 | #include <net/netfilter/nf_conntrack_helper.h> | 26 | #include <net/netfilter/nf_conntrack_helper.h> |
@@ -187,6 +188,21 @@ out: | |||
187 | return nf_conntrack_confirm(skb); | 188 | return nf_conntrack_confirm(skb); |
188 | } | 189 | } |
189 | 190 | ||
191 | static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, | ||
192 | struct sk_buff *skb) | ||
193 | { | ||
194 | #ifdef CONFIG_BRIDGE_NETFILTER | ||
195 | if (skb->nf_bridge && | ||
196 | skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) | ||
197 | return IP6_DEFRAG_CONNTRACK_BRIDGE_IN; | ||
198 | #endif | ||
199 | if (hooknum == NF_INET_PRE_ROUTING) | ||
200 | return IP6_DEFRAG_CONNTRACK_IN; | ||
201 | else | ||
202 | return IP6_DEFRAG_CONNTRACK_OUT; | ||
203 | |||
204 | } | ||
205 | |||
190 | static unsigned int ipv6_defrag(unsigned int hooknum, | 206 | static unsigned int ipv6_defrag(unsigned int hooknum, |
191 | struct sk_buff *skb, | 207 | struct sk_buff *skb, |
192 | const struct net_device *in, | 208 | const struct net_device *in, |
@@ -199,8 +215,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum, | |||
199 | if (skb->nfct) | 215 | if (skb->nfct) |
200 | return NF_ACCEPT; | 216 | return NF_ACCEPT; |
201 | 217 | ||
202 | reasm = nf_ct_frag6_gather(skb); | 218 | reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); |
203 | |||
204 | /* queued */ | 219 | /* queued */ |
205 | if (reasm == NULL) | 220 | if (reasm == NULL) |
206 | return NF_STOLEN; | 221 | return NF_STOLEN; |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index e0b9424fa1b2..312c20adc83f 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -168,13 +168,14 @@ out: | |||
168 | /* Creation primitives. */ | 168 | /* Creation primitives. */ |
169 | 169 | ||
170 | static __inline__ struct nf_ct_frag6_queue * | 170 | static __inline__ struct nf_ct_frag6_queue * |
171 | fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst) | 171 | fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst) |
172 | { | 172 | { |
173 | struct inet_frag_queue *q; | 173 | struct inet_frag_queue *q; |
174 | struct ip6_create_arg arg; | 174 | struct ip6_create_arg arg; |
175 | unsigned int hash; | 175 | unsigned int hash; |
176 | 176 | ||
177 | arg.id = id; | 177 | arg.id = id; |
178 | arg.user = user; | ||
178 | arg.src = src; | 179 | arg.src = src; |
179 | arg.dst = dst; | 180 | arg.dst = dst; |
180 | 181 | ||
@@ -559,7 +560,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff) | |||
559 | return 0; | 560 | return 0; |
560 | } | 561 | } |
561 | 562 | ||
562 | struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) | 563 | struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user) |
563 | { | 564 | { |
564 | struct sk_buff *clone; | 565 | struct sk_buff *clone; |
565 | struct net_device *dev = skb->dev; | 566 | struct net_device *dev = skb->dev; |
@@ -605,7 +606,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) | |||
605 | if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh) | 606 | if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh) |
606 | nf_ct_frag6_evictor(); | 607 | nf_ct_frag6_evictor(); |
607 | 608 | ||
608 | fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr); | 609 | fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr); |
609 | if (fq == NULL) { | 610 | if (fq == NULL) { |
610 | pr_debug("Can't find and can't create new queue\n"); | 611 | pr_debug("Can't find and can't create new queue\n"); |
611 | goto ret_orig; | 612 | goto ret_orig; |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 4d98549a6868..3b3a95607125 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -72,6 +72,7 @@ struct frag_queue | |||
72 | struct inet_frag_queue q; | 72 | struct inet_frag_queue q; |
73 | 73 | ||
74 | __be32 id; /* fragment id */ | 74 | __be32 id; /* fragment id */ |
75 | u32 user; | ||
75 | struct in6_addr saddr; | 76 | struct in6_addr saddr; |
76 | struct in6_addr daddr; | 77 | struct in6_addr daddr; |
77 | 78 | ||
@@ -141,7 +142,7 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a) | |||
141 | struct ip6_create_arg *arg = a; | 142 | struct ip6_create_arg *arg = a; |
142 | 143 | ||
143 | fq = container_of(q, struct frag_queue, q); | 144 | fq = container_of(q, struct frag_queue, q); |
144 | return (fq->id == arg->id && | 145 | return (fq->id == arg->id && fq->user == arg->user && |
145 | ipv6_addr_equal(&fq->saddr, arg->src) && | 146 | ipv6_addr_equal(&fq->saddr, arg->src) && |
146 | ipv6_addr_equal(&fq->daddr, arg->dst)); | 147 | ipv6_addr_equal(&fq->daddr, arg->dst)); |
147 | } | 148 | } |
@@ -163,6 +164,7 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a) | |||
163 | struct ip6_create_arg *arg = a; | 164 | struct ip6_create_arg *arg = a; |
164 | 165 | ||
165 | fq->id = arg->id; | 166 | fq->id = arg->id; |
167 | fq->user = arg->user; | ||
166 | ipv6_addr_copy(&fq->saddr, arg->src); | 168 | ipv6_addr_copy(&fq->saddr, arg->src); |
167 | ipv6_addr_copy(&fq->daddr, arg->dst); | 169 | ipv6_addr_copy(&fq->daddr, arg->dst); |
168 | } | 170 | } |
@@ -243,6 +245,7 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst, | |||
243 | unsigned int hash; | 245 | unsigned int hash; |
244 | 246 | ||
245 | arg.id = id; | 247 | arg.id = id; |
248 | arg.user = IP6_DEFRAG_LOCAL_DELIVER; | ||
246 | arg.src = src; | 249 | arg.src = src; |
247 | arg.dst = dst; | 250 | arg.dst = dst; |
248 | 251 | ||
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 5b9af508b8f2..7208a06576c6 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -185,6 +185,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
185 | 185 | ||
186 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); | 186 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); |
187 | 187 | ||
188 | /* check for timestamp cookie support */ | ||
189 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | ||
190 | tcp_parse_options(skb, &tcp_opt, &hash_location, 0); | ||
191 | |||
192 | if (tcp_opt.saw_tstamp) | ||
193 | cookie_check_timestamp(&tcp_opt); | ||
194 | |||
188 | ret = NULL; | 195 | ret = NULL; |
189 | req = inet6_reqsk_alloc(&tcp6_request_sock_ops); | 196 | req = inet6_reqsk_alloc(&tcp6_request_sock_ops); |
190 | if (!req) | 197 | if (!req) |
@@ -218,6 +225,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
218 | req->expires = 0UL; | 225 | req->expires = 0UL; |
219 | req->retrans = 0; | 226 | req->retrans = 0; |
220 | ireq->ecn_ok = 0; | 227 | ireq->ecn_ok = 0; |
228 | ireq->snd_wscale = tcp_opt.snd_wscale; | ||
229 | ireq->rcv_wscale = tcp_opt.rcv_wscale; | ||
230 | ireq->sack_ok = tcp_opt.sack_ok; | ||
231 | ireq->wscale_ok = tcp_opt.wscale_ok; | ||
232 | ireq->tstamp_ok = tcp_opt.saw_tstamp; | ||
233 | req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; | ||
221 | treq->rcv_isn = ntohl(th->seq) - 1; | 234 | treq->rcv_isn = ntohl(th->seq) - 1; |
222 | treq->snt_isn = cookie; | 235 | treq->snt_isn = cookie; |
223 | 236 | ||
@@ -253,21 +266,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
253 | goto out_free; | 266 | goto out_free; |
254 | } | 267 | } |
255 | 268 | ||
256 | /* check for timestamp cookie support */ | ||
257 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | ||
258 | tcp_parse_options(skb, &tcp_opt, &hash_location, 0, dst); | ||
259 | |||
260 | if (tcp_opt.saw_tstamp) | ||
261 | cookie_check_timestamp(&tcp_opt); | ||
262 | |||
263 | req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; | ||
264 | |||
265 | ireq->snd_wscale = tcp_opt.snd_wscale; | ||
266 | ireq->rcv_wscale = tcp_opt.rcv_wscale; | ||
267 | ireq->sack_ok = tcp_opt.sack_ok; | ||
268 | ireq->wscale_ok = tcp_opt.wscale_ok; | ||
269 | ireq->tstamp_ok = tcp_opt.saw_tstamp; | ||
270 | |||
271 | req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); | 269 | req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); |
272 | tcp_select_initial_window(tcp_full_space(sk), req->mss, | 270 | tcp_select_initial_window(tcp_full_space(sk), req->mss, |
273 | &req->rcv_wnd, &req->window_clamp, | 271 | &req->rcv_wnd, &req->window_clamp, |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index aadd7cef73b3..febfd595a40d 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -96,7 +96,7 @@ static void tcp_v6_hash(struct sock *sk) | |||
96 | return; | 96 | return; |
97 | } | 97 | } |
98 | local_bh_disable(); | 98 | local_bh_disable(); |
99 | __inet6_hash(sk); | 99 | __inet6_hash(sk, NULL); |
100 | local_bh_enable(); | 100 | local_bh_enable(); |
101 | } | 101 | } |
102 | } | 102 | } |
@@ -1169,7 +1169,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1169 | struct inet6_request_sock *treq; | 1169 | struct inet6_request_sock *treq; |
1170 | struct ipv6_pinfo *np = inet6_sk(sk); | 1170 | struct ipv6_pinfo *np = inet6_sk(sk); |
1171 | struct tcp_sock *tp = tcp_sk(sk); | 1171 | struct tcp_sock *tp = tcp_sk(sk); |
1172 | struct dst_entry *dst = __sk_dst_get(sk); | ||
1173 | __u32 isn = TCP_SKB_CB(skb)->when; | 1172 | __u32 isn = TCP_SKB_CB(skb)->when; |
1174 | #ifdef CONFIG_SYN_COOKIES | 1173 | #ifdef CONFIG_SYN_COOKIES |
1175 | int want_cookie = 0; | 1174 | int want_cookie = 0; |
@@ -1208,7 +1207,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1208 | tcp_clear_options(&tmp_opt); | 1207 | tcp_clear_options(&tmp_opt); |
1209 | tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); | 1208 | tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); |
1210 | tmp_opt.user_mss = tp->rx_opt.user_mss; | 1209 | tmp_opt.user_mss = tp->rx_opt.user_mss; |
1211 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst); | 1210 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0); |
1212 | 1211 | ||
1213 | if (tmp_opt.cookie_plus > 0 && | 1212 | if (tmp_opt.cookie_plus > 0 && |
1214 | tmp_opt.saw_tstamp && | 1213 | tmp_opt.saw_tstamp && |
@@ -1496,7 +1495,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1496 | } | 1495 | } |
1497 | #endif | 1496 | #endif |
1498 | 1497 | ||
1499 | __inet6_hash(newsk); | 1498 | __inet6_hash(newsk, NULL); |
1500 | __inet_inherit_port(sk, newsk); | 1499 | __inet_inherit_port(sk, newsk); |
1501 | 1500 | ||
1502 | return newsk; | 1501 | return newsk; |
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h index b001c361ad30..4300df35d37d 100644 --- a/net/irda/irnet/irnet.h +++ b/net/irda/irnet/irnet.h | |||
@@ -249,6 +249,7 @@ | |||
249 | #include <linux/poll.h> | 249 | #include <linux/poll.h> |
250 | #include <linux/capability.h> | 250 | #include <linux/capability.h> |
251 | #include <linux/ctype.h> /* isspace() */ | 251 | #include <linux/ctype.h> /* isspace() */ |
252 | #include <linux/string.h> /* skip_spaces() */ | ||
252 | #include <asm/uaccess.h> | 253 | #include <asm/uaccess.h> |
253 | #include <linux/init.h> | 254 | #include <linux/init.h> |
254 | 255 | ||
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 7dea882dbb75..156020d138b5 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c | |||
@@ -76,9 +76,8 @@ irnet_ctrl_write(irnet_socket * ap, | |||
76 | /* Look at the next command */ | 76 | /* Look at the next command */ |
77 | start = next; | 77 | start = next; |
78 | 78 | ||
79 | /* Scrap whitespaces before the command */ | 79 | /* Scrap whitespaces before the command */ |
80 | while(isspace(*start)) | 80 | start = skip_spaces(start); |
81 | start++; | ||
82 | 81 | ||
83 | /* ',' is our command separator */ | 82 | /* ',' is our command separator */ |
84 | next = strchr(start, ','); | 83 | next = strchr(start, ','); |
@@ -133,8 +132,7 @@ irnet_ctrl_write(irnet_socket * ap, | |||
133 | char * endp; | 132 | char * endp; |
134 | 133 | ||
135 | /* Scrap whitespaces before the command */ | 134 | /* Scrap whitespaces before the command */ |
136 | while(isspace(*begp)) | 135 | begp = skip_spaces(begp); |
137 | begp++; | ||
138 | 136 | ||
139 | /* Convert argument to a number (last arg is the base) */ | 137 | /* Convert argument to a number (last arg is the base) */ |
140 | addr = simple_strtoul(begp, &endp, 16); | 138 | addr = simple_strtoul(begp, &endp, 16); |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 1e428863574f..c18286a2167b 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -221,7 +221,7 @@ static int afiucv_pm_restore_thaw(struct device *dev) | |||
221 | return 0; | 221 | return 0; |
222 | } | 222 | } |
223 | 223 | ||
224 | static struct dev_pm_ops afiucv_pm_ops = { | 224 | static const struct dev_pm_ops afiucv_pm_ops = { |
225 | .prepare = afiucv_pm_prepare, | 225 | .prepare = afiucv_pm_prepare, |
226 | .complete = afiucv_pm_complete, | 226 | .complete = afiucv_pm_complete, |
227 | .freeze = afiucv_pm_freeze, | 227 | .freeze = afiucv_pm_freeze, |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 3b1f5f5f8de7..fd8b28361a64 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -93,7 +93,7 @@ static int iucv_pm_freeze(struct device *); | |||
93 | static int iucv_pm_thaw(struct device *); | 93 | static int iucv_pm_thaw(struct device *); |
94 | static int iucv_pm_restore(struct device *); | 94 | static int iucv_pm_restore(struct device *); |
95 | 95 | ||
96 | static struct dev_pm_ops iucv_pm_ops = { | 96 | static const struct dev_pm_ops iucv_pm_ops = { |
97 | .prepare = iucv_pm_prepare, | 97 | .prepare = iucv_pm_prepare, |
98 | .complete = iucv_pm_complete, | 98 | .complete = iucv_pm_complete, |
99 | .freeze = iucv_pm_freeze, | 99 | .freeze = iucv_pm_freeze, |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 84209fbbeb17..76fa6fef6473 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -1193,6 +1193,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, | |||
1193 | x->aalg->alg_key_len = key->sadb_key_bits; | 1193 | x->aalg->alg_key_len = key->sadb_key_bits; |
1194 | memcpy(x->aalg->alg_key, key+1, keysize); | 1194 | memcpy(x->aalg->alg_key, key+1, keysize); |
1195 | } | 1195 | } |
1196 | x->aalg->alg_trunc_len = a->uinfo.auth.icv_truncbits; | ||
1196 | x->props.aalgo = sa->sadb_sa_auth; | 1197 | x->props.aalgo = sa->sadb_sa_auth; |
1197 | /* x->algo.flags = sa->sadb_sa_flags; */ | 1198 | /* x->algo.flags = sa->sadb_sa_flags; */ |
1198 | } | 1199 | } |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 93ee1fd5c08d..6dc3579c0ac5 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -354,7 +354,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) | |||
354 | sinfo->rx_packets = sta->rx_packets; | 354 | sinfo->rx_packets = sta->rx_packets; |
355 | sinfo->tx_packets = sta->tx_packets; | 355 | sinfo->tx_packets = sta->tx_packets; |
356 | 356 | ||
357 | if (sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) { | 357 | if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) || |
358 | (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) { | ||
358 | sinfo->filled |= STATION_INFO_SIGNAL; | 359 | sinfo->filled |= STATION_INFO_SIGNAL; |
359 | sinfo->signal = (s8)sta->last_signal; | 360 | sinfo->signal = (s8)sta->last_signal; |
360 | } | 361 | } |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 419f186cfcf0..91dc8636d644 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -746,6 +746,7 @@ struct ieee80211_local { | |||
746 | unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ | 746 | unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ |
747 | 747 | ||
748 | bool pspolling; | 748 | bool pspolling; |
749 | bool scan_ps_enabled; | ||
749 | /* | 750 | /* |
750 | * PS can only be enabled when we have exactly one managed | 751 | * PS can only be enabled when we have exactly one managed |
751 | * interface (and monitors) in PS, this then points there. | 752 | * interface (and monitors) in PS, this then points there. |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index c0fe46493f71..6a4331429598 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -427,7 +427,7 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, | |||
427 | char *addr5, char *addr6) | 427 | char *addr5, char *addr6) |
428 | { | 428 | { |
429 | int aelen = 0; | 429 | int aelen = 0; |
430 | memset(meshhdr, 0, sizeof(meshhdr)); | 430 | memset(meshhdr, 0, sizeof(*meshhdr)); |
431 | meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; | 431 | meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; |
432 | put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum); | 432 | put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum); |
433 | sdata->u.mesh.mesh_seqnum++; | 433 | sdata->u.mesh.mesh_seqnum++; |
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 31e102541869..85562c59d7d6 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -188,8 +188,9 @@ struct mesh_rmc { | |||
188 | */ | 188 | */ |
189 | #define MESH_PREQ_MIN_INT 10 | 189 | #define MESH_PREQ_MIN_INT 10 |
190 | #define MESH_DIAM_TRAVERSAL_TIME 50 | 190 | #define MESH_DIAM_TRAVERSAL_TIME 50 |
191 | /* Paths will be refreshed if they are closer than PATH_REFRESH_TIME to their | 191 | /* A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds before |
192 | * expiration | 192 | * timing out. This way it will remain ACTIVE and no data frames will be |
193 | * unnecesarily held in the pending queue. | ||
193 | */ | 194 | */ |
194 | #define MESH_PATH_REFRESH_TIME 1000 | 195 | #define MESH_PATH_REFRESH_TIME 1000 |
195 | #define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME) | 196 | #define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME) |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 833b2f3670c5..d28acb6b1f81 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -937,7 +937,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb, | |||
937 | 937 | ||
938 | if (mpath->flags & MESH_PATH_ACTIVE) { | 938 | if (mpath->flags & MESH_PATH_ACTIVE) { |
939 | if (time_after(jiffies, | 939 | if (time_after(jiffies, |
940 | mpath->exp_time + | 940 | mpath->exp_time - |
941 | msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && | 941 | msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && |
942 | !memcmp(sdata->dev->dev_addr, hdr->addr4, ETH_ALEN) && | 942 | !memcmp(sdata->dev->dev_addr, hdr->addr4, ETH_ALEN) && |
943 | !(mpath->flags & MESH_PATH_RESOLVING) && | 943 | !(mpath->flags & MESH_PATH_RESOLVING) && |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 6dc7b5ad9a41..d8d50fb5e823 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1083,8 +1083,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
1083 | 1083 | ||
1084 | ieee80211_set_wmm_default(sdata); | 1084 | ieee80211_set_wmm_default(sdata); |
1085 | 1085 | ||
1086 | ieee80211_recalc_idle(local); | ||
1087 | |||
1088 | /* channel(_type) changes are handled by ieee80211_hw_config */ | 1086 | /* channel(_type) changes are handled by ieee80211_hw_config */ |
1089 | local->oper_channel_type = NL80211_CHAN_NO_HT; | 1087 | local->oper_channel_type = NL80211_CHAN_NO_HT; |
1090 | 1088 | ||
@@ -1370,6 +1368,7 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, | |||
1370 | 1368 | ||
1371 | if (!wk) { | 1369 | if (!wk) { |
1372 | ieee80211_set_disassoc(sdata, true); | 1370 | ieee80211_set_disassoc(sdata, true); |
1371 | ieee80211_recalc_idle(sdata->local); | ||
1373 | } else { | 1372 | } else { |
1374 | list_del(&wk->list); | 1373 | list_del(&wk->list); |
1375 | kfree(wk); | 1374 | kfree(wk); |
@@ -1403,6 +1402,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, | |||
1403 | sdata->dev->name, mgmt->sa, reason_code); | 1402 | sdata->dev->name, mgmt->sa, reason_code); |
1404 | 1403 | ||
1405 | ieee80211_set_disassoc(sdata, false); | 1404 | ieee80211_set_disassoc(sdata, false); |
1405 | ieee80211_recalc_idle(sdata->local); | ||
1406 | return RX_MGMT_CFG80211_DISASSOC; | 1406 | return RX_MGMT_CFG80211_DISASSOC; |
1407 | } | 1407 | } |
1408 | 1408 | ||
@@ -2117,6 +2117,7 @@ static void ieee80211_sta_work(struct work_struct *work) | |||
2117 | " after %dms, disconnecting.\n", | 2117 | " after %dms, disconnecting.\n", |
2118 | bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); | 2118 | bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); |
2119 | ieee80211_set_disassoc(sdata, true); | 2119 | ieee80211_set_disassoc(sdata, true); |
2120 | ieee80211_recalc_idle(local); | ||
2120 | mutex_unlock(&ifmgd->mtx); | 2121 | mutex_unlock(&ifmgd->mtx); |
2121 | /* | 2122 | /* |
2122 | * must be outside lock due to cfg80211, | 2123 | * must be outside lock due to cfg80211, |
@@ -2560,6 +2561,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, | |||
2560 | IEEE80211_STYPE_DEAUTH, req->reason_code, | 2561 | IEEE80211_STYPE_DEAUTH, req->reason_code, |
2561 | cookie); | 2562 | cookie); |
2562 | 2563 | ||
2564 | ieee80211_recalc_idle(sdata->local); | ||
2565 | |||
2563 | return 0; | 2566 | return 0; |
2564 | } | 2567 | } |
2565 | 2568 | ||
@@ -2592,5 +2595,8 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, | |||
2592 | ieee80211_send_deauth_disassoc(sdata, req->bss->bssid, | 2595 | ieee80211_send_deauth_disassoc(sdata, req->bss->bssid, |
2593 | IEEE80211_STYPE_DISASSOC, req->reason_code, | 2596 | IEEE80211_STYPE_DISASSOC, req->reason_code, |
2594 | cookie); | 2597 | cookie); |
2598 | |||
2599 | ieee80211_recalc_idle(sdata->local); | ||
2600 | |||
2595 | return 0; | 2601 | return 0; |
2596 | } | 2602 | } |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index f237df408378..9f2807aeaf52 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1712,7 +1712,6 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1712 | mpp_path_add(proxied_addr, mpp_addr, sdata); | 1712 | mpp_path_add(proxied_addr, mpp_addr, sdata); |
1713 | } else { | 1713 | } else { |
1714 | spin_lock_bh(&mppath->state_lock); | 1714 | spin_lock_bh(&mppath->state_lock); |
1715 | mppath->exp_time = jiffies; | ||
1716 | if (compare_ether_addr(mppath->mpp, mpp_addr) != 0) | 1715 | if (compare_ether_addr(mppath->mpp, mpp_addr) != 0) |
1717 | memcpy(mppath->mpp, mpp_addr, ETH_ALEN); | 1716 | memcpy(mppath->mpp, mpp_addr, ETH_ALEN); |
1718 | spin_unlock_bh(&mppath->state_lock); | 1717 | spin_unlock_bh(&mppath->state_lock); |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 4cf387c944bf..f1a4c7160300 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -227,7 +227,8 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) | |||
227 | static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata) | 227 | static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata) |
228 | { | 228 | { |
229 | struct ieee80211_local *local = sdata->local; | 229 | struct ieee80211_local *local = sdata->local; |
230 | bool ps = false; | 230 | |
231 | local->scan_ps_enabled = false; | ||
231 | 232 | ||
232 | /* FIXME: what to do when local->pspolling is true? */ | 233 | /* FIXME: what to do when local->pspolling is true? */ |
233 | 234 | ||
@@ -235,12 +236,13 @@ static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata) | |||
235 | cancel_work_sync(&local->dynamic_ps_enable_work); | 236 | cancel_work_sync(&local->dynamic_ps_enable_work); |
236 | 237 | ||
237 | if (local->hw.conf.flags & IEEE80211_CONF_PS) { | 238 | if (local->hw.conf.flags & IEEE80211_CONF_PS) { |
238 | ps = true; | 239 | local->scan_ps_enabled = true; |
239 | local->hw.conf.flags &= ~IEEE80211_CONF_PS; | 240 | local->hw.conf.flags &= ~IEEE80211_CONF_PS; |
240 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); | 241 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); |
241 | } | 242 | } |
242 | 243 | ||
243 | if (!ps || !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) | 244 | if (!(local->scan_ps_enabled) || |
245 | !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) | ||
244 | /* | 246 | /* |
245 | * If power save was enabled, no need to send a nullfunc | 247 | * If power save was enabled, no need to send a nullfunc |
246 | * frame because AP knows that we are sleeping. But if the | 248 | * frame because AP knows that we are sleeping. But if the |
@@ -261,7 +263,7 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata) | |||
261 | 263 | ||
262 | if (!local->ps_sdata) | 264 | if (!local->ps_sdata) |
263 | ieee80211_send_nullfunc(local, sdata, 0); | 265 | ieee80211_send_nullfunc(local, sdata, 0); |
264 | else { | 266 | else if (local->scan_ps_enabled) { |
265 | /* | 267 | /* |
266 | * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware | 268 | * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware |
267 | * will send a nullfunc frame with the powersave bit set | 269 | * will send a nullfunc frame with the powersave bit set |
@@ -277,6 +279,16 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata) | |||
277 | */ | 279 | */ |
278 | local->hw.conf.flags |= IEEE80211_CONF_PS; | 280 | local->hw.conf.flags |= IEEE80211_CONF_PS; |
279 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); | 281 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); |
282 | } else if (local->hw.conf.dynamic_ps_timeout > 0) { | ||
283 | /* | ||
284 | * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer | ||
285 | * had been running before leaving the operating channel, | ||
286 | * restart the timer now and send a nullfunc frame to inform | ||
287 | * the AP that we are awake. | ||
288 | */ | ||
289 | ieee80211_send_nullfunc(local, sdata, 0); | ||
290 | mod_timer(&local->dynamic_ps_timer, jiffies + | ||
291 | msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); | ||
280 | } | 292 | } |
281 | } | 293 | } |
282 | 294 | ||
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index d09f78bb2442..78a6e924c7e1 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -579,7 +579,7 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, | |||
579 | if (elen > left) | 579 | if (elen > left) |
580 | break; | 580 | break; |
581 | 581 | ||
582 | if (calc_crc && id < 64 && (filter & BIT(id))) | 582 | if (calc_crc && id < 64 && (filter & (1ULL << id))) |
583 | crc = crc32_be(crc, pos - 2, elen + 2); | 583 | crc = crc32_be(crc, pos - 2, elen + 2); |
584 | 584 | ||
585 | switch (id) { | 585 | switch (id) { |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index b95699f00545..847ffca40184 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -1366,6 +1366,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, | |||
1366 | == sysctl_ip_vs_sync_threshold[0])) || | 1366 | == sysctl_ip_vs_sync_threshold[0])) || |
1367 | ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) && | 1367 | ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) && |
1368 | ((cp->state == IP_VS_TCP_S_FIN_WAIT) || | 1368 | ((cp->state == IP_VS_TCP_S_FIN_WAIT) || |
1369 | (cp->state == IP_VS_TCP_S_CLOSE) || | ||
1369 | (cp->state == IP_VS_TCP_S_CLOSE_WAIT) || | 1370 | (cp->state == IP_VS_TCP_S_CLOSE_WAIT) || |
1370 | (cp->state == IP_VS_TCP_S_TIME_WAIT))))) | 1371 | (cp->state == IP_VS_TCP_S_TIME_WAIT))))) |
1371 | ip_vs_sync_conn(cp); | 1372 | ip_vs_sync_conn(cp); |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index e55a6861d26f..6bde12da2fe0 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -2714,6 +2714,8 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc, | |||
2714 | if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr)))) | 2714 | if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr)))) |
2715 | return -EINVAL; | 2715 | return -EINVAL; |
2716 | 2716 | ||
2717 | memset(usvc, 0, sizeof(*usvc)); | ||
2718 | |||
2717 | usvc->af = nla_get_u16(nla_af); | 2719 | usvc->af = nla_get_u16(nla_af); |
2718 | #ifdef CONFIG_IP_VS_IPV6 | 2720 | #ifdef CONFIG_IP_VS_IPV6 |
2719 | if (usvc->af != AF_INET && usvc->af != AF_INET6) | 2721 | if (usvc->af != AF_INET && usvc->af != AF_INET6) |
@@ -2901,6 +2903,8 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest, | |||
2901 | if (!(nla_addr && nla_port)) | 2903 | if (!(nla_addr && nla_port)) |
2902 | return -EINVAL; | 2904 | return -EINVAL; |
2903 | 2905 | ||
2906 | memset(udest, 0, sizeof(*udest)); | ||
2907 | |||
2904 | nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr)); | 2908 | nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr)); |
2905 | udest->port = nla_get_u16(nla_port); | 2909 | udest->port = nla_get_u16(nla_port); |
2906 | 2910 | ||
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index eb0ceb846527..fc70a49c0afd 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c | |||
@@ -482,8 +482,7 @@ static ssize_t recent_old_proc_write(struct file *file, | |||
482 | if (copy_from_user(buf, input, size)) | 482 | if (copy_from_user(buf, input, size)) |
483 | return -EFAULT; | 483 | return -EFAULT; |
484 | 484 | ||
485 | while (isspace(*c)) | 485 | c = skip_spaces(c); |
486 | c++; | ||
487 | 486 | ||
488 | if (size - (c - buf) < 5) | 487 | if (size - (c - buf) < 5) |
489 | return c - buf; | 488 | return c - buf; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 020562164b56..e0516a22be2e 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -415,7 +415,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
415 | { | 415 | { |
416 | struct sock *sk = sock->sk; | 416 | struct sock *sk = sock->sk; |
417 | struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name; | 417 | struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name; |
418 | struct sk_buff *skb; | 418 | struct sk_buff *skb = NULL; |
419 | struct net_device *dev; | 419 | struct net_device *dev; |
420 | __be16 proto = 0; | 420 | __be16 proto = 0; |
421 | int err; | 421 | int err; |
@@ -437,6 +437,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
437 | */ | 437 | */ |
438 | 438 | ||
439 | saddr->spkt_device[13] = 0; | 439 | saddr->spkt_device[13] = 0; |
440 | retry: | ||
440 | rcu_read_lock(); | 441 | rcu_read_lock(); |
441 | dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); | 442 | dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); |
442 | err = -ENODEV; | 443 | err = -ENODEV; |
@@ -456,58 +457,48 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
456 | if (len > dev->mtu + dev->hard_header_len) | 457 | if (len > dev->mtu + dev->hard_header_len) |
457 | goto out_unlock; | 458 | goto out_unlock; |
458 | 459 | ||
459 | err = -ENOBUFS; | 460 | if (!skb) { |
460 | skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL); | 461 | size_t reserved = LL_RESERVED_SPACE(dev); |
461 | 462 | unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; | |
462 | /* | 463 | |
463 | * If the write buffer is full, then tough. At this level the user | 464 | rcu_read_unlock(); |
464 | * gets to deal with the problem - do your own algorithmic backoffs. | 465 | skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL); |
465 | * That's far more flexible. | 466 | if (skb == NULL) |
466 | */ | 467 | return -ENOBUFS; |
467 | 468 | /* FIXME: Save some space for broken drivers that write a hard | |
468 | if (skb == NULL) | 469 | * header at transmission time by themselves. PPP is the notable |
469 | goto out_unlock; | 470 | * one here. This should really be fixed at the driver level. |
470 | 471 | */ | |
471 | /* | 472 | skb_reserve(skb, reserved); |
472 | * Fill it in | 473 | skb_reset_network_header(skb); |
473 | */ | 474 | |
474 | 475 | /* Try to align data part correctly */ | |
475 | /* FIXME: Save some space for broken drivers that write a | 476 | if (hhlen) { |
476 | * hard header at transmission time by themselves. PPP is the | 477 | skb->data -= hhlen; |
477 | * notable one here. This should really be fixed at the driver level. | 478 | skb->tail -= hhlen; |
478 | */ | 479 | if (len < hhlen) |
479 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); | 480 | skb_reset_network_header(skb); |
480 | skb_reset_network_header(skb); | 481 | } |
481 | 482 | err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); | |
482 | /* Try to align data part correctly */ | 483 | if (err) |
483 | if (dev->header_ops) { | 484 | goto out_free; |
484 | skb->data -= dev->hard_header_len; | 485 | goto retry; |
485 | skb->tail -= dev->hard_header_len; | ||
486 | if (len < dev->hard_header_len) | ||
487 | skb_reset_network_header(skb); | ||
488 | } | 486 | } |
489 | 487 | ||
490 | /* Returns -EFAULT on error */ | 488 | |
491 | err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); | ||
492 | skb->protocol = proto; | 489 | skb->protocol = proto; |
493 | skb->dev = dev; | 490 | skb->dev = dev; |
494 | skb->priority = sk->sk_priority; | 491 | skb->priority = sk->sk_priority; |
495 | skb->mark = sk->sk_mark; | 492 | skb->mark = sk->sk_mark; |
496 | if (err) | ||
497 | goto out_free; | ||
498 | |||
499 | /* | ||
500 | * Now send it | ||
501 | */ | ||
502 | 493 | ||
503 | dev_queue_xmit(skb); | 494 | dev_queue_xmit(skb); |
504 | rcu_read_unlock(); | 495 | rcu_read_unlock(); |
505 | return len; | 496 | return len; |
506 | 497 | ||
507 | out_free: | ||
508 | kfree_skb(skb); | ||
509 | out_unlock: | 498 | out_unlock: |
510 | rcu_read_unlock(); | 499 | rcu_read_unlock(); |
500 | out_free: | ||
501 | kfree_skb(skb); | ||
511 | return err; | 502 | return err; |
512 | } | 503 | } |
513 | 504 | ||
diff --git a/net/rds/ib.c b/net/rds/ib.c index 536ebe5d3f6b..3b8992361042 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c | |||
@@ -182,8 +182,8 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn, | |||
182 | ic = conn->c_transport_data; | 182 | ic = conn->c_transport_data; |
183 | dev_addr = &ic->i_cm_id->route.addr.dev_addr; | 183 | dev_addr = &ic->i_cm_id->route.addr.dev_addr; |
184 | 184 | ||
185 | ib_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); | 185 | rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); |
186 | ib_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); | 186 | rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); |
187 | 187 | ||
188 | rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); | 188 | rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); |
189 | iinfo->max_send_wr = ic->i_send_ring.w_nr; | 189 | iinfo->max_send_wr = ic->i_send_ring.w_nr; |
diff --git a/net/rds/iw.c b/net/rds/iw.c index db224f7c2937..b28fa8525b24 100644 --- a/net/rds/iw.c +++ b/net/rds/iw.c | |||
@@ -184,8 +184,8 @@ static int rds_iw_conn_info_visitor(struct rds_connection *conn, | |||
184 | ic = conn->c_transport_data; | 184 | ic = conn->c_transport_data; |
185 | dev_addr = &ic->i_cm_id->route.addr.dev_addr; | 185 | dev_addr = &ic->i_cm_id->route.addr.dev_addr; |
186 | 186 | ||
187 | ib_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); | 187 | rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); |
188 | ib_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); | 188 | rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); |
189 | 189 | ||
190 | rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client); | 190 | rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client); |
191 | iinfo->max_send_wr = ic->i_send_ring.w_nr; | 191 | iinfo->max_send_wr = ic->i_send_ring.w_nr; |
diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 448e5a0fcc2e..c218e07e5caf 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c | |||
@@ -579,6 +579,8 @@ static ssize_t rfkill_name_show(struct device *dev, | |||
579 | 579 | ||
580 | static const char *rfkill_get_type_str(enum rfkill_type type) | 580 | static const char *rfkill_get_type_str(enum rfkill_type type) |
581 | { | 581 | { |
582 | BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1); | ||
583 | |||
582 | switch (type) { | 584 | switch (type) { |
583 | case RFKILL_TYPE_WLAN: | 585 | case RFKILL_TYPE_WLAN: |
584 | return "wlan"; | 586 | return "wlan"; |
@@ -597,8 +599,6 @@ static const char *rfkill_get_type_str(enum rfkill_type type) | |||
597 | default: | 599 | default: |
598 | BUG(); | 600 | BUG(); |
599 | } | 601 | } |
600 | |||
601 | BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1); | ||
602 | } | 602 | } |
603 | 603 | ||
604 | static ssize_t rfkill_type_show(struct device *dev, | 604 | static ssize_t rfkill_type_show(struct device *dev, |
diff --git a/net/socket.c b/net/socket.c index b94c3dd71015..769c386bd428 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -312,18 +312,6 @@ static struct file_system_type sock_fs_type = { | |||
312 | .kill_sb = kill_anon_super, | 312 | .kill_sb = kill_anon_super, |
313 | }; | 313 | }; |
314 | 314 | ||
315 | static int sockfs_delete_dentry(struct dentry *dentry) | ||
316 | { | ||
317 | /* | ||
318 | * At creation time, we pretended this dentry was hashed | ||
319 | * (by clearing DCACHE_UNHASHED bit in d_flags) | ||
320 | * At delete time, we restore the truth : not hashed. | ||
321 | * (so that dput() can proceed correctly) | ||
322 | */ | ||
323 | dentry->d_flags |= DCACHE_UNHASHED; | ||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | /* | 315 | /* |
328 | * sockfs_dname() is called from d_path(). | 316 | * sockfs_dname() is called from d_path(). |
329 | */ | 317 | */ |
@@ -334,7 +322,6 @@ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen) | |||
334 | } | 322 | } |
335 | 323 | ||
336 | static const struct dentry_operations sockfs_dentry_operations = { | 324 | static const struct dentry_operations sockfs_dentry_operations = { |
337 | .d_delete = sockfs_delete_dentry, | ||
338 | .d_dname = sockfs_dname, | 325 | .d_dname = sockfs_dname, |
339 | }; | 326 | }; |
340 | 327 | ||
@@ -355,68 +342,55 @@ static const struct dentry_operations sockfs_dentry_operations = { | |||
355 | * but we take care of internal coherence yet. | 342 | * but we take care of internal coherence yet. |
356 | */ | 343 | */ |
357 | 344 | ||
358 | static int sock_alloc_fd(struct file **filep, int flags) | 345 | static int sock_alloc_file(struct socket *sock, struct file **f, int flags) |
359 | { | 346 | { |
347 | struct qstr name = { .name = "" }; | ||
348 | struct path path; | ||
349 | struct file *file; | ||
360 | int fd; | 350 | int fd; |
361 | 351 | ||
362 | fd = get_unused_fd_flags(flags); | 352 | fd = get_unused_fd_flags(flags); |
363 | if (likely(fd >= 0)) { | 353 | if (unlikely(fd < 0)) |
364 | struct file *file = get_empty_filp(); | 354 | return fd; |
365 | |||
366 | *filep = file; | ||
367 | if (unlikely(!file)) { | ||
368 | put_unused_fd(fd); | ||
369 | return -ENFILE; | ||
370 | } | ||
371 | } else | ||
372 | *filep = NULL; | ||
373 | return fd; | ||
374 | } | ||
375 | |||
376 | static int sock_attach_fd(struct socket *sock, struct file *file, int flags) | ||
377 | { | ||
378 | struct dentry *dentry; | ||
379 | struct qstr name = { .name = "" }; | ||
380 | 355 | ||
381 | dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name); | 356 | path.dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name); |
382 | if (unlikely(!dentry)) | 357 | if (unlikely(!path.dentry)) { |
358 | put_unused_fd(fd); | ||
383 | return -ENOMEM; | 359 | return -ENOMEM; |
360 | } | ||
361 | path.mnt = mntget(sock_mnt); | ||
384 | 362 | ||
385 | dentry->d_op = &sockfs_dentry_operations; | 363 | path.dentry->d_op = &sockfs_dentry_operations; |
386 | /* | 364 | d_instantiate(path.dentry, SOCK_INODE(sock)); |
387 | * We dont want to push this dentry into global dentry hash table. | 365 | SOCK_INODE(sock)->i_fop = &socket_file_ops; |
388 | * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED | ||
389 | * This permits a working /proc/$pid/fd/XXX on sockets | ||
390 | */ | ||
391 | dentry->d_flags &= ~DCACHE_UNHASHED; | ||
392 | d_instantiate(dentry, SOCK_INODE(sock)); | ||
393 | 366 | ||
394 | sock->file = file; | 367 | file = alloc_file(&path, FMODE_READ | FMODE_WRITE, |
395 | init_file(file, sock_mnt, dentry, FMODE_READ | FMODE_WRITE, | ||
396 | &socket_file_ops); | 368 | &socket_file_ops); |
397 | SOCK_INODE(sock)->i_fop = &socket_file_ops; | 369 | if (unlikely(!file)) { |
370 | /* drop dentry, keep inode */ | ||
371 | atomic_inc(&path.dentry->d_inode->i_count); | ||
372 | path_put(&path); | ||
373 | put_unused_fd(fd); | ||
374 | return -ENFILE; | ||
375 | } | ||
376 | |||
377 | sock->file = file; | ||
398 | file->f_flags = O_RDWR | (flags & O_NONBLOCK); | 378 | file->f_flags = O_RDWR | (flags & O_NONBLOCK); |
399 | file->f_pos = 0; | 379 | file->f_pos = 0; |
400 | file->private_data = sock; | 380 | file->private_data = sock; |
401 | 381 | ||
402 | return 0; | 382 | *f = file; |
383 | return fd; | ||
403 | } | 384 | } |
404 | 385 | ||
405 | int sock_map_fd(struct socket *sock, int flags) | 386 | int sock_map_fd(struct socket *sock, int flags) |
406 | { | 387 | { |
407 | struct file *newfile; | 388 | struct file *newfile; |
408 | int fd = sock_alloc_fd(&newfile, flags); | 389 | int fd = sock_alloc_file(sock, &newfile, flags); |
409 | 390 | ||
410 | if (likely(fd >= 0)) { | 391 | if (likely(fd >= 0)) |
411 | int err = sock_attach_fd(sock, newfile, flags); | ||
412 | |||
413 | if (unlikely(err < 0)) { | ||
414 | put_filp(newfile); | ||
415 | put_unused_fd(fd); | ||
416 | return err; | ||
417 | } | ||
418 | fd_install(fd, newfile); | 392 | fd_install(fd, newfile); |
419 | } | 393 | |
420 | return fd; | 394 | return fd; |
421 | } | 395 | } |
422 | 396 | ||
@@ -1390,29 +1364,19 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol, | |||
1390 | if (err < 0) | 1364 | if (err < 0) |
1391 | goto out_release_both; | 1365 | goto out_release_both; |
1392 | 1366 | ||
1393 | fd1 = sock_alloc_fd(&newfile1, flags & O_CLOEXEC); | 1367 | fd1 = sock_alloc_file(sock1, &newfile1, flags); |
1394 | if (unlikely(fd1 < 0)) { | 1368 | if (unlikely(fd1 < 0)) { |
1395 | err = fd1; | 1369 | err = fd1; |
1396 | goto out_release_both; | 1370 | goto out_release_both; |
1397 | } | 1371 | } |
1398 | 1372 | ||
1399 | fd2 = sock_alloc_fd(&newfile2, flags & O_CLOEXEC); | 1373 | fd2 = sock_alloc_file(sock2, &newfile2, flags); |
1400 | if (unlikely(fd2 < 0)) { | 1374 | if (unlikely(fd2 < 0)) { |
1401 | err = fd2; | 1375 | err = fd2; |
1402 | put_filp(newfile1); | ||
1403 | put_unused_fd(fd1); | ||
1404 | goto out_release_both; | ||
1405 | } | ||
1406 | |||
1407 | err = sock_attach_fd(sock1, newfile1, flags & O_NONBLOCK); | ||
1408 | if (unlikely(err < 0)) { | ||
1409 | goto out_fd2; | ||
1410 | } | ||
1411 | |||
1412 | err = sock_attach_fd(sock2, newfile2, flags & O_NONBLOCK); | ||
1413 | if (unlikely(err < 0)) { | ||
1414 | fput(newfile1); | 1376 | fput(newfile1); |
1415 | goto out_fd1; | 1377 | put_unused_fd(fd1); |
1378 | sock_release(sock2); | ||
1379 | goto out; | ||
1416 | } | 1380 | } |
1417 | 1381 | ||
1418 | audit_fd_pair(fd1, fd2); | 1382 | audit_fd_pair(fd1, fd2); |
@@ -1438,16 +1402,6 @@ out_release_1: | |||
1438 | sock_release(sock1); | 1402 | sock_release(sock1); |
1439 | out: | 1403 | out: |
1440 | return err; | 1404 | return err; |
1441 | |||
1442 | out_fd2: | ||
1443 | put_filp(newfile1); | ||
1444 | sock_release(sock1); | ||
1445 | out_fd1: | ||
1446 | put_filp(newfile2); | ||
1447 | sock_release(sock2); | ||
1448 | put_unused_fd(fd1); | ||
1449 | put_unused_fd(fd2); | ||
1450 | goto out; | ||
1451 | } | 1405 | } |
1452 | 1406 | ||
1453 | /* | 1407 | /* |
@@ -1551,17 +1505,13 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, | |||
1551 | */ | 1505 | */ |
1552 | __module_get(newsock->ops->owner); | 1506 | __module_get(newsock->ops->owner); |
1553 | 1507 | ||
1554 | newfd = sock_alloc_fd(&newfile, flags & O_CLOEXEC); | 1508 | newfd = sock_alloc_file(newsock, &newfile, flags); |
1555 | if (unlikely(newfd < 0)) { | 1509 | if (unlikely(newfd < 0)) { |
1556 | err = newfd; | 1510 | err = newfd; |
1557 | sock_release(newsock); | 1511 | sock_release(newsock); |
1558 | goto out_put; | 1512 | goto out_put; |
1559 | } | 1513 | } |
1560 | 1514 | ||
1561 | err = sock_attach_fd(newsock, newfile, flags & O_NONBLOCK); | ||
1562 | if (err < 0) | ||
1563 | goto out_fd_simple; | ||
1564 | |||
1565 | err = security_socket_accept(sock, newsock); | 1515 | err = security_socket_accept(sock, newsock); |
1566 | if (err) | 1516 | if (err) |
1567 | goto out_fd; | 1517 | goto out_fd; |
@@ -1591,11 +1541,6 @@ out_put: | |||
1591 | fput_light(sock->file, fput_needed); | 1541 | fput_light(sock->file, fput_needed); |
1592 | out: | 1542 | out: |
1593 | return err; | 1543 | return err; |
1594 | out_fd_simple: | ||
1595 | sock_release(newsock); | ||
1596 | put_filp(newfile); | ||
1597 | put_unused_fd(newfd); | ||
1598 | goto out_put; | ||
1599 | out_fd: | 1544 | out_fd: |
1600 | fput(newfile); | 1545 | fput(newfile); |
1601 | put_unused_fd(newfd); | 1546 | put_unused_fd(newfd); |
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c index c7450c8f0a7c..6dcdd2517819 100644 --- a/net/sunrpc/addr.c +++ b/net/sunrpc/addr.c | |||
@@ -55,16 +55,8 @@ static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap, | |||
55 | 55 | ||
56 | /* | 56 | /* |
57 | * RFC 4291, Section 2.2.1 | 57 | * RFC 4291, Section 2.2.1 |
58 | * | ||
59 | * To keep the result as short as possible, especially | ||
60 | * since we don't shorthand, we don't want leading zeros | ||
61 | * in each halfword, so avoid %pI6. | ||
62 | */ | 58 | */ |
63 | return snprintf(buf, buflen, "%x:%x:%x:%x:%x:%x:%x:%x", | 59 | return snprintf(buf, buflen, "%pI6c", addr); |
64 | ntohs(addr->s6_addr16[0]), ntohs(addr->s6_addr16[1]), | ||
65 | ntohs(addr->s6_addr16[2]), ntohs(addr->s6_addr16[3]), | ||
66 | ntohs(addr->s6_addr16[4]), ntohs(addr->s6_addr16[5]), | ||
67 | ntohs(addr->s6_addr16[6]), ntohs(addr->s6_addr16[7])); | ||
68 | } | 60 | } |
69 | 61 | ||
70 | static size_t rpc_ntop6(const struct sockaddr *sap, | 62 | static size_t rpc_ntop6(const struct sockaddr *sap, |
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 7535a7bed2fa..f394fc190a49 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -123,16 +123,19 @@ rpcauth_unhash_cred_locked(struct rpc_cred *cred) | |||
123 | clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); | 123 | clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); |
124 | } | 124 | } |
125 | 125 | ||
126 | static void | 126 | static int |
127 | rpcauth_unhash_cred(struct rpc_cred *cred) | 127 | rpcauth_unhash_cred(struct rpc_cred *cred) |
128 | { | 128 | { |
129 | spinlock_t *cache_lock; | 129 | spinlock_t *cache_lock; |
130 | int ret; | ||
130 | 131 | ||
131 | cache_lock = &cred->cr_auth->au_credcache->lock; | 132 | cache_lock = &cred->cr_auth->au_credcache->lock; |
132 | spin_lock(cache_lock); | 133 | spin_lock(cache_lock); |
133 | if (atomic_read(&cred->cr_count) == 0) | 134 | ret = atomic_read(&cred->cr_count) == 0; |
135 | if (ret) | ||
134 | rpcauth_unhash_cred_locked(cred); | 136 | rpcauth_unhash_cred_locked(cred); |
135 | spin_unlock(cache_lock); | 137 | spin_unlock(cache_lock); |
138 | return ret; | ||
136 | } | 139 | } |
137 | 140 | ||
138 | /* | 141 | /* |
@@ -446,31 +449,35 @@ void | |||
446 | put_rpccred(struct rpc_cred *cred) | 449 | put_rpccred(struct rpc_cred *cred) |
447 | { | 450 | { |
448 | /* Fast path for unhashed credentials */ | 451 | /* Fast path for unhashed credentials */ |
449 | if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) | 452 | if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) == 0) { |
450 | goto need_lock; | 453 | if (atomic_dec_and_test(&cred->cr_count)) |
451 | 454 | cred->cr_ops->crdestroy(cred); | |
452 | if (!atomic_dec_and_test(&cred->cr_count)) | ||
453 | return; | 455 | return; |
454 | goto out_destroy; | 456 | } |
455 | need_lock: | 457 | |
456 | if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock)) | 458 | if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock)) |
457 | return; | 459 | return; |
458 | if (!list_empty(&cred->cr_lru)) { | 460 | if (!list_empty(&cred->cr_lru)) { |
459 | number_cred_unused--; | 461 | number_cred_unused--; |
460 | list_del_init(&cred->cr_lru); | 462 | list_del_init(&cred->cr_lru); |
461 | } | 463 | } |
462 | if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) | ||
463 | rpcauth_unhash_cred(cred); | ||
464 | if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) { | 464 | if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) { |
465 | cred->cr_expire = jiffies; | 465 | if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0) { |
466 | list_add_tail(&cred->cr_lru, &cred_unused); | 466 | cred->cr_expire = jiffies; |
467 | number_cred_unused++; | 467 | list_add_tail(&cred->cr_lru, &cred_unused); |
468 | spin_unlock(&rpc_credcache_lock); | 468 | number_cred_unused++; |
469 | return; | 469 | goto out_nodestroy; |
470 | } | ||
471 | if (!rpcauth_unhash_cred(cred)) { | ||
472 | /* We were hashed and someone looked us up... */ | ||
473 | goto out_nodestroy; | ||
474 | } | ||
470 | } | 475 | } |
471 | spin_unlock(&rpc_credcache_lock); | 476 | spin_unlock(&rpc_credcache_lock); |
472 | out_destroy: | ||
473 | cred->cr_ops->crdestroy(cred); | 477 | cred->cr_ops->crdestroy(cred); |
478 | return; | ||
479 | out_nodestroy: | ||
480 | spin_unlock(&rpc_credcache_lock); | ||
474 | } | 481 | } |
475 | EXPORT_SYMBOL_GPL(put_rpccred); | 482 | EXPORT_SYMBOL_GPL(put_rpccred); |
476 | 483 | ||
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index fc6a43ccd950..3c3c50f38a1c 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -304,7 +304,7 @@ __gss_find_upcall(struct rpc_inode *rpci, uid_t uid) | |||
304 | * to that upcall instead of adding the new upcall. | 304 | * to that upcall instead of adding the new upcall. |
305 | */ | 305 | */ |
306 | static inline struct gss_upcall_msg * | 306 | static inline struct gss_upcall_msg * |
307 | gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg) | 307 | gss_add_msg(struct gss_upcall_msg *gss_msg) |
308 | { | 308 | { |
309 | struct rpc_inode *rpci = gss_msg->inode; | 309 | struct rpc_inode *rpci = gss_msg->inode; |
310 | struct inode *inode = &rpci->vfs_inode; | 310 | struct inode *inode = &rpci->vfs_inode; |
@@ -445,7 +445,7 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr | |||
445 | gss_new = gss_alloc_msg(gss_auth, uid, clnt, gss_cred->gc_machine_cred); | 445 | gss_new = gss_alloc_msg(gss_auth, uid, clnt, gss_cred->gc_machine_cred); |
446 | if (IS_ERR(gss_new)) | 446 | if (IS_ERR(gss_new)) |
447 | return gss_new; | 447 | return gss_new; |
448 | gss_msg = gss_add_msg(gss_auth, gss_new); | 448 | gss_msg = gss_add_msg(gss_new); |
449 | if (gss_msg == gss_new) { | 449 | if (gss_msg == gss_new) { |
450 | struct inode *inode = &gss_new->inode->vfs_inode; | 450 | struct inode *inode = &gss_new->inode->vfs_inode; |
451 | int res = rpc_queue_upcall(inode, &gss_new->msg); | 451 | int res = rpc_queue_upcall(inode, &gss_new->msg); |
@@ -485,7 +485,7 @@ gss_refresh_upcall(struct rpc_task *task) | |||
485 | dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, | 485 | dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, |
486 | cred->cr_uid); | 486 | cred->cr_uid); |
487 | gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); | 487 | gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); |
488 | if (IS_ERR(gss_msg) == -EAGAIN) { | 488 | if (PTR_ERR(gss_msg) == -EAGAIN) { |
489 | /* XXX: warning on the first, under the assumption we | 489 | /* XXX: warning on the first, under the assumption we |
490 | * shouldn't normally hit this case on a refresh. */ | 490 | * shouldn't normally hit this case on a refresh. */ |
491 | warn_gssd(); | 491 | warn_gssd(); |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 38829e20500b..154034b675bd 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -79,7 +79,7 @@ static void call_connect_status(struct rpc_task *task); | |||
79 | 79 | ||
80 | static __be32 *rpc_encode_header(struct rpc_task *task); | 80 | static __be32 *rpc_encode_header(struct rpc_task *task); |
81 | static __be32 *rpc_verify_header(struct rpc_task *task); | 81 | static __be32 *rpc_verify_header(struct rpc_task *task); |
82 | static int rpc_ping(struct rpc_clnt *clnt, int flags); | 82 | static int rpc_ping(struct rpc_clnt *clnt); |
83 | 83 | ||
84 | static void rpc_register_client(struct rpc_clnt *clnt) | 84 | static void rpc_register_client(struct rpc_clnt *clnt) |
85 | { | 85 | { |
@@ -340,7 +340,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) | |||
340 | return clnt; | 340 | return clnt; |
341 | 341 | ||
342 | if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { | 342 | if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { |
343 | int err = rpc_ping(clnt, RPC_TASK_SOFT); | 343 | int err = rpc_ping(clnt); |
344 | if (err != 0) { | 344 | if (err != 0) { |
345 | rpc_shutdown_client(clnt); | 345 | rpc_shutdown_client(clnt); |
346 | return ERR_PTR(err); | 346 | return ERR_PTR(err); |
@@ -528,7 +528,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, | |||
528 | clnt->cl_prog = program->number; | 528 | clnt->cl_prog = program->number; |
529 | clnt->cl_vers = version->number; | 529 | clnt->cl_vers = version->number; |
530 | clnt->cl_stats = program->stats; | 530 | clnt->cl_stats = program->stats; |
531 | err = rpc_ping(clnt, RPC_TASK_SOFT); | 531 | err = rpc_ping(clnt); |
532 | if (err != 0) { | 532 | if (err != 0) { |
533 | rpc_shutdown_client(clnt); | 533 | rpc_shutdown_client(clnt); |
534 | clnt = ERR_PTR(err); | 534 | clnt = ERR_PTR(err); |
@@ -1060,7 +1060,7 @@ call_bind_status(struct rpc_task *task) | |||
1060 | goto retry_timeout; | 1060 | goto retry_timeout; |
1061 | case -EPFNOSUPPORT: | 1061 | case -EPFNOSUPPORT: |
1062 | /* server doesn't support any rpcbind version we know of */ | 1062 | /* server doesn't support any rpcbind version we know of */ |
1063 | dprintk("RPC: %5u remote rpcbind service unavailable\n", | 1063 | dprintk("RPC: %5u unrecognized remote rpcbind service\n", |
1064 | task->tk_pid); | 1064 | task->tk_pid); |
1065 | break; | 1065 | break; |
1066 | case -EPROTONOSUPPORT: | 1066 | case -EPROTONOSUPPORT: |
@@ -1069,6 +1069,21 @@ call_bind_status(struct rpc_task *task) | |||
1069 | task->tk_status = 0; | 1069 | task->tk_status = 0; |
1070 | task->tk_action = call_bind; | 1070 | task->tk_action = call_bind; |
1071 | return; | 1071 | return; |
1072 | case -ECONNREFUSED: /* connection problems */ | ||
1073 | case -ECONNRESET: | ||
1074 | case -ENOTCONN: | ||
1075 | case -EHOSTDOWN: | ||
1076 | case -EHOSTUNREACH: | ||
1077 | case -ENETUNREACH: | ||
1078 | case -EPIPE: | ||
1079 | dprintk("RPC: %5u remote rpcbind unreachable: %d\n", | ||
1080 | task->tk_pid, task->tk_status); | ||
1081 | if (!RPC_IS_SOFTCONN(task)) { | ||
1082 | rpc_delay(task, 5*HZ); | ||
1083 | goto retry_timeout; | ||
1084 | } | ||
1085 | status = task->tk_status; | ||
1086 | break; | ||
1072 | default: | 1087 | default: |
1073 | dprintk("RPC: %5u unrecognized rpcbind error (%d)\n", | 1088 | dprintk("RPC: %5u unrecognized rpcbind error (%d)\n", |
1074 | task->tk_pid, -task->tk_status); | 1089 | task->tk_pid, -task->tk_status); |
@@ -1180,11 +1195,25 @@ static void | |||
1180 | call_transmit_status(struct rpc_task *task) | 1195 | call_transmit_status(struct rpc_task *task) |
1181 | { | 1196 | { |
1182 | task->tk_action = call_status; | 1197 | task->tk_action = call_status; |
1198 | |||
1199 | /* | ||
1200 | * Common case: success. Force the compiler to put this | ||
1201 | * test first. | ||
1202 | */ | ||
1203 | if (task->tk_status == 0) { | ||
1204 | xprt_end_transmit(task); | ||
1205 | rpc_task_force_reencode(task); | ||
1206 | return; | ||
1207 | } | ||
1208 | |||
1183 | switch (task->tk_status) { | 1209 | switch (task->tk_status) { |
1184 | case -EAGAIN: | 1210 | case -EAGAIN: |
1185 | break; | 1211 | break; |
1186 | default: | 1212 | default: |
1213 | dprint_status(task); | ||
1187 | xprt_end_transmit(task); | 1214 | xprt_end_transmit(task); |
1215 | rpc_task_force_reencode(task); | ||
1216 | break; | ||
1188 | /* | 1217 | /* |
1189 | * Special cases: if we've been waiting on the | 1218 | * Special cases: if we've been waiting on the |
1190 | * socket's write_space() callback, or if the | 1219 | * socket's write_space() callback, or if the |
@@ -1192,11 +1221,16 @@ call_transmit_status(struct rpc_task *task) | |||
1192 | * then hold onto the transport lock. | 1221 | * then hold onto the transport lock. |
1193 | */ | 1222 | */ |
1194 | case -ECONNREFUSED: | 1223 | case -ECONNREFUSED: |
1195 | case -ECONNRESET: | ||
1196 | case -ENOTCONN: | ||
1197 | case -EHOSTDOWN: | 1224 | case -EHOSTDOWN: |
1198 | case -EHOSTUNREACH: | 1225 | case -EHOSTUNREACH: |
1199 | case -ENETUNREACH: | 1226 | case -ENETUNREACH: |
1227 | if (RPC_IS_SOFTCONN(task)) { | ||
1228 | xprt_end_transmit(task); | ||
1229 | rpc_exit(task, task->tk_status); | ||
1230 | break; | ||
1231 | } | ||
1232 | case -ECONNRESET: | ||
1233 | case -ENOTCONN: | ||
1200 | case -EPIPE: | 1234 | case -EPIPE: |
1201 | rpc_task_force_reencode(task); | 1235 | rpc_task_force_reencode(task); |
1202 | } | 1236 | } |
@@ -1346,6 +1380,10 @@ call_timeout(struct rpc_task *task) | |||
1346 | dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); | 1380 | dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); |
1347 | task->tk_timeouts++; | 1381 | task->tk_timeouts++; |
1348 | 1382 | ||
1383 | if (RPC_IS_SOFTCONN(task)) { | ||
1384 | rpc_exit(task, -ETIMEDOUT); | ||
1385 | return; | ||
1386 | } | ||
1349 | if (RPC_IS_SOFT(task)) { | 1387 | if (RPC_IS_SOFT(task)) { |
1350 | if (clnt->cl_chatty) | 1388 | if (clnt->cl_chatty) |
1351 | printk(KERN_NOTICE "%s: server %s not responding, timed out\n", | 1389 | printk(KERN_NOTICE "%s: server %s not responding, timed out\n", |
@@ -1675,14 +1713,14 @@ static struct rpc_procinfo rpcproc_null = { | |||
1675 | .p_decode = rpcproc_decode_null, | 1713 | .p_decode = rpcproc_decode_null, |
1676 | }; | 1714 | }; |
1677 | 1715 | ||
1678 | static int rpc_ping(struct rpc_clnt *clnt, int flags) | 1716 | static int rpc_ping(struct rpc_clnt *clnt) |
1679 | { | 1717 | { |
1680 | struct rpc_message msg = { | 1718 | struct rpc_message msg = { |
1681 | .rpc_proc = &rpcproc_null, | 1719 | .rpc_proc = &rpcproc_null, |
1682 | }; | 1720 | }; |
1683 | int err; | 1721 | int err; |
1684 | msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); | 1722 | msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); |
1685 | err = rpc_call_sync(clnt, &msg, flags); | 1723 | err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN); |
1686 | put_rpccred(msg.rpc_cred); | 1724 | put_rpccred(msg.rpc_cred); |
1687 | return err; | 1725 | return err; |
1688 | } | 1726 | } |
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 830faf4d9997..3e3772d8eb92 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/in6.h> | 20 | #include <linux/in6.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/errno.h> | 22 | #include <linux/errno.h> |
23 | #include <linux/mutex.h> | ||
23 | #include <net/ipv6.h> | 24 | #include <net/ipv6.h> |
24 | 25 | ||
25 | #include <linux/sunrpc/clnt.h> | 26 | #include <linux/sunrpc/clnt.h> |
@@ -110,6 +111,9 @@ static void rpcb_getport_done(struct rpc_task *, void *); | |||
110 | static void rpcb_map_release(void *data); | 111 | static void rpcb_map_release(void *data); |
111 | static struct rpc_program rpcb_program; | 112 | static struct rpc_program rpcb_program; |
112 | 113 | ||
114 | static struct rpc_clnt * rpcb_local_clnt; | ||
115 | static struct rpc_clnt * rpcb_local_clnt4; | ||
116 | |||
113 | struct rpcbind_args { | 117 | struct rpcbind_args { |
114 | struct rpc_xprt * r_xprt; | 118 | struct rpc_xprt * r_xprt; |
115 | 119 | ||
@@ -163,21 +167,60 @@ static const struct sockaddr_in rpcb_inaddr_loopback = { | |||
163 | .sin_port = htons(RPCBIND_PORT), | 167 | .sin_port = htons(RPCBIND_PORT), |
164 | }; | 168 | }; |
165 | 169 | ||
166 | static struct rpc_clnt *rpcb_create_local(struct sockaddr *addr, | 170 | static DEFINE_MUTEX(rpcb_create_local_mutex); |
167 | size_t addrlen, u32 version) | 171 | |
172 | /* | ||
173 | * Returns zero on success, otherwise a negative errno value | ||
174 | * is returned. | ||
175 | */ | ||
176 | static int rpcb_create_local(void) | ||
168 | { | 177 | { |
169 | struct rpc_create_args args = { | 178 | struct rpc_create_args args = { |
170 | .protocol = XPRT_TRANSPORT_UDP, | 179 | .protocol = XPRT_TRANSPORT_TCP, |
171 | .address = addr, | 180 | .address = (struct sockaddr *)&rpcb_inaddr_loopback, |
172 | .addrsize = addrlen, | 181 | .addrsize = sizeof(rpcb_inaddr_loopback), |
173 | .servername = "localhost", | 182 | .servername = "localhost", |
174 | .program = &rpcb_program, | 183 | .program = &rpcb_program, |
175 | .version = version, | 184 | .version = RPCBVERS_2, |
176 | .authflavor = RPC_AUTH_UNIX, | 185 | .authflavor = RPC_AUTH_UNIX, |
177 | .flags = RPC_CLNT_CREATE_NOPING, | 186 | .flags = RPC_CLNT_CREATE_NOPING, |
178 | }; | 187 | }; |
188 | struct rpc_clnt *clnt, *clnt4; | ||
189 | int result = 0; | ||
190 | |||
191 | if (rpcb_local_clnt) | ||
192 | return result; | ||
193 | |||
194 | mutex_lock(&rpcb_create_local_mutex); | ||
195 | if (rpcb_local_clnt) | ||
196 | goto out; | ||
197 | |||
198 | clnt = rpc_create(&args); | ||
199 | if (IS_ERR(clnt)) { | ||
200 | dprintk("RPC: failed to create local rpcbind " | ||
201 | "client (errno %ld).\n", PTR_ERR(clnt)); | ||
202 | result = -PTR_ERR(clnt); | ||
203 | goto out; | ||
204 | } | ||
179 | 205 | ||
180 | return rpc_create(&args); | 206 | /* |
207 | * This results in an RPC ping. On systems running portmapper, | ||
208 | * the v4 ping will fail. Proceed anyway, but disallow rpcb | ||
209 | * v4 upcalls. | ||
210 | */ | ||
211 | clnt4 = rpc_bind_new_program(clnt, &rpcb_program, RPCBVERS_4); | ||
212 | if (IS_ERR(clnt4)) { | ||
213 | dprintk("RPC: failed to create local rpcbind v4 " | ||
214 | "cleint (errno %ld).\n", PTR_ERR(clnt4)); | ||
215 | clnt4 = NULL; | ||
216 | } | ||
217 | |||
218 | rpcb_local_clnt = clnt; | ||
219 | rpcb_local_clnt4 = clnt4; | ||
220 | |||
221 | out: | ||
222 | mutex_unlock(&rpcb_create_local_mutex); | ||
223 | return result; | ||
181 | } | 224 | } |
182 | 225 | ||
183 | static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr, | 226 | static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr, |
@@ -209,22 +252,13 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr, | |||
209 | return rpc_create(&args); | 252 | return rpc_create(&args); |
210 | } | 253 | } |
211 | 254 | ||
212 | static int rpcb_register_call(const u32 version, struct rpc_message *msg) | 255 | static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg) |
213 | { | 256 | { |
214 | struct sockaddr *addr = (struct sockaddr *)&rpcb_inaddr_loopback; | ||
215 | size_t addrlen = sizeof(rpcb_inaddr_loopback); | ||
216 | struct rpc_clnt *rpcb_clnt; | ||
217 | int result, error = 0; | 257 | int result, error = 0; |
218 | 258 | ||
219 | msg->rpc_resp = &result; | 259 | msg->rpc_resp = &result; |
220 | 260 | ||
221 | rpcb_clnt = rpcb_create_local(addr, addrlen, version); | 261 | error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN); |
222 | if (!IS_ERR(rpcb_clnt)) { | ||
223 | error = rpc_call_sync(rpcb_clnt, msg, 0); | ||
224 | rpc_shutdown_client(rpcb_clnt); | ||
225 | } else | ||
226 | error = PTR_ERR(rpcb_clnt); | ||
227 | |||
228 | if (error < 0) { | 262 | if (error < 0) { |
229 | dprintk("RPC: failed to contact local rpcbind " | 263 | dprintk("RPC: failed to contact local rpcbind " |
230 | "server (errno %d).\n", -error); | 264 | "server (errno %d).\n", -error); |
@@ -279,6 +313,11 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port) | |||
279 | struct rpc_message msg = { | 313 | struct rpc_message msg = { |
280 | .rpc_argp = &map, | 314 | .rpc_argp = &map, |
281 | }; | 315 | }; |
316 | int error; | ||
317 | |||
318 | error = rpcb_create_local(); | ||
319 | if (error) | ||
320 | return error; | ||
282 | 321 | ||
283 | dprintk("RPC: %sregistering (%u, %u, %d, %u) with local " | 322 | dprintk("RPC: %sregistering (%u, %u, %d, %u) with local " |
284 | "rpcbind\n", (port ? "" : "un"), | 323 | "rpcbind\n", (port ? "" : "un"), |
@@ -288,7 +327,7 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port) | |||
288 | if (port) | 327 | if (port) |
289 | msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET]; | 328 | msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET]; |
290 | 329 | ||
291 | return rpcb_register_call(RPCBVERS_2, &msg); | 330 | return rpcb_register_call(rpcb_local_clnt, &msg); |
292 | } | 331 | } |
293 | 332 | ||
294 | /* | 333 | /* |
@@ -313,7 +352,7 @@ static int rpcb_register_inet4(const struct sockaddr *sap, | |||
313 | if (port) | 352 | if (port) |
314 | msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; | 353 | msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; |
315 | 354 | ||
316 | result = rpcb_register_call(RPCBVERS_4, msg); | 355 | result = rpcb_register_call(rpcb_local_clnt4, msg); |
317 | kfree(map->r_addr); | 356 | kfree(map->r_addr); |
318 | return result; | 357 | return result; |
319 | } | 358 | } |
@@ -340,7 +379,7 @@ static int rpcb_register_inet6(const struct sockaddr *sap, | |||
340 | if (port) | 379 | if (port) |
341 | msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; | 380 | msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; |
342 | 381 | ||
343 | result = rpcb_register_call(RPCBVERS_4, msg); | 382 | result = rpcb_register_call(rpcb_local_clnt4, msg); |
344 | kfree(map->r_addr); | 383 | kfree(map->r_addr); |
345 | return result; | 384 | return result; |
346 | } | 385 | } |
@@ -356,7 +395,7 @@ static int rpcb_unregister_all_protofamilies(struct rpc_message *msg) | |||
356 | map->r_addr = ""; | 395 | map->r_addr = ""; |
357 | msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; | 396 | msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; |
358 | 397 | ||
359 | return rpcb_register_call(RPCBVERS_4, msg); | 398 | return rpcb_register_call(rpcb_local_clnt4, msg); |
360 | } | 399 | } |
361 | 400 | ||
362 | /** | 401 | /** |
@@ -414,6 +453,13 @@ int rpcb_v4_register(const u32 program, const u32 version, | |||
414 | struct rpc_message msg = { | 453 | struct rpc_message msg = { |
415 | .rpc_argp = &map, | 454 | .rpc_argp = &map, |
416 | }; | 455 | }; |
456 | int error; | ||
457 | |||
458 | error = rpcb_create_local(); | ||
459 | if (error) | ||
460 | return error; | ||
461 | if (rpcb_local_clnt4 == NULL) | ||
462 | return -EPROTONOSUPPORT; | ||
417 | 463 | ||
418 | if (address == NULL) | 464 | if (address == NULL) |
419 | return rpcb_unregister_all_protofamilies(&msg); | 465 | return rpcb_unregister_all_protofamilies(&msg); |
@@ -491,7 +537,7 @@ static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt, struct rpcbi | |||
491 | .rpc_message = &msg, | 537 | .rpc_message = &msg, |
492 | .callback_ops = &rpcb_getport_ops, | 538 | .callback_ops = &rpcb_getport_ops, |
493 | .callback_data = map, | 539 | .callback_data = map, |
494 | .flags = RPC_TASK_ASYNC, | 540 | .flags = RPC_TASK_ASYNC | RPC_TASK_SOFTCONN, |
495 | }; | 541 | }; |
496 | 542 | ||
497 | return rpc_run_task(&task_setup_data); | 543 | return rpc_run_task(&task_setup_data); |
@@ -1027,3 +1073,15 @@ static struct rpc_program rpcb_program = { | |||
1027 | .version = rpcb_version, | 1073 | .version = rpcb_version, |
1028 | .stats = &rpcb_stats, | 1074 | .stats = &rpcb_stats, |
1029 | }; | 1075 | }; |
1076 | |||
1077 | /** | ||
1078 | * cleanup_rpcb_clnt - remove xprtsock's sysctls, unregister | ||
1079 | * | ||
1080 | */ | ||
1081 | void cleanup_rpcb_clnt(void) | ||
1082 | { | ||
1083 | if (rpcb_local_clnt4) | ||
1084 | rpc_shutdown_client(rpcb_local_clnt4); | ||
1085 | if (rpcb_local_clnt) | ||
1086 | rpc_shutdown_client(rpcb_local_clnt); | ||
1087 | } | ||
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index cef74ba0666c..aae6907fd546 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -210,6 +210,7 @@ void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qnam | |||
210 | { | 210 | { |
211 | __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); | 211 | __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); |
212 | } | 212 | } |
213 | EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue); | ||
213 | 214 | ||
214 | void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) | 215 | void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) |
215 | { | 216 | { |
@@ -385,6 +386,20 @@ static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct r | |||
385 | } | 386 | } |
386 | 387 | ||
387 | /* | 388 | /* |
389 | * Tests whether rpc queue is empty | ||
390 | */ | ||
391 | int rpc_queue_empty(struct rpc_wait_queue *queue) | ||
392 | { | ||
393 | int res; | ||
394 | |||
395 | spin_lock_bh(&queue->lock); | ||
396 | res = queue->qlen; | ||
397 | spin_unlock_bh(&queue->lock); | ||
398 | return (res == 0); | ||
399 | } | ||
400 | EXPORT_SYMBOL_GPL(rpc_queue_empty); | ||
401 | |||
402 | /* | ||
388 | * Wake up a task on a specific queue | 403 | * Wake up a task on a specific queue |
389 | */ | 404 | */ |
390 | void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) | 405 | void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) |
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index 8cce92189019..f438347d817b 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
@@ -24,6 +24,8 @@ | |||
24 | 24 | ||
25 | extern struct cache_detail ip_map_cache, unix_gid_cache; | 25 | extern struct cache_detail ip_map_cache, unix_gid_cache; |
26 | 26 | ||
27 | extern void cleanup_rpcb_clnt(void); | ||
28 | |||
27 | static int __init | 29 | static int __init |
28 | init_sunrpc(void) | 30 | init_sunrpc(void) |
29 | { | 31 | { |
@@ -53,6 +55,7 @@ out: | |||
53 | static void __exit | 55 | static void __exit |
54 | cleanup_sunrpc(void) | 56 | cleanup_sunrpc(void) |
55 | { | 57 | { |
58 | cleanup_rpcb_clnt(); | ||
56 | rpcauth_remove_module(); | 59 | rpcauth_remove_module(); |
57 | cleanup_socket_xprt(); | 60 | cleanup_socket_xprt(); |
58 | svc_cleanup_xprt_sock(); | 61 | svc_cleanup_xprt_sock(); |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index b845e2293dfe..1c924ee0a1ef 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -16,8 +16,6 @@ | |||
16 | 16 | ||
17 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT | 17 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT |
18 | 18 | ||
19 | #define SVC_MAX_WAKING 5 | ||
20 | |||
21 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); | 19 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); |
22 | static int svc_deferred_recv(struct svc_rqst *rqstp); | 20 | static int svc_deferred_recv(struct svc_rqst *rqstp); |
23 | static struct cache_deferred_req *svc_defer(struct cache_req *req); | 21 | static struct cache_deferred_req *svc_defer(struct cache_req *req); |
@@ -306,7 +304,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
306 | struct svc_pool *pool; | 304 | struct svc_pool *pool; |
307 | struct svc_rqst *rqstp; | 305 | struct svc_rqst *rqstp; |
308 | int cpu; | 306 | int cpu; |
309 | int thread_avail; | ||
310 | 307 | ||
311 | if (!(xprt->xpt_flags & | 308 | if (!(xprt->xpt_flags & |
312 | ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) | 309 | ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) |
@@ -318,6 +315,12 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
318 | 315 | ||
319 | spin_lock_bh(&pool->sp_lock); | 316 | spin_lock_bh(&pool->sp_lock); |
320 | 317 | ||
318 | if (!list_empty(&pool->sp_threads) && | ||
319 | !list_empty(&pool->sp_sockets)) | ||
320 | printk(KERN_ERR | ||
321 | "svc_xprt_enqueue: " | ||
322 | "threads and transports both waiting??\n"); | ||
323 | |||
321 | if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { | 324 | if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { |
322 | /* Don't enqueue dead transports */ | 325 | /* Don't enqueue dead transports */ |
323 | dprintk("svc: transport %p is dead, not enqueued\n", xprt); | 326 | dprintk("svc: transport %p is dead, not enqueued\n", xprt); |
@@ -358,15 +361,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
358 | } | 361 | } |
359 | 362 | ||
360 | process: | 363 | process: |
361 | /* Work out whether threads are available */ | 364 | if (!list_empty(&pool->sp_threads)) { |
362 | thread_avail = !list_empty(&pool->sp_threads); /* threads are asleep */ | ||
363 | if (pool->sp_nwaking >= SVC_MAX_WAKING) { | ||
364 | /* too many threads are runnable and trying to wake up */ | ||
365 | thread_avail = 0; | ||
366 | pool->sp_stats.overloads_avoided++; | ||
367 | } | ||
368 | |||
369 | if (thread_avail) { | ||
370 | rqstp = list_entry(pool->sp_threads.next, | 365 | rqstp = list_entry(pool->sp_threads.next, |
371 | struct svc_rqst, | 366 | struct svc_rqst, |
372 | rq_list); | 367 | rq_list); |
@@ -381,8 +376,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
381 | svc_xprt_get(xprt); | 376 | svc_xprt_get(xprt); |
382 | rqstp->rq_reserved = serv->sv_max_mesg; | 377 | rqstp->rq_reserved = serv->sv_max_mesg; |
383 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | 378 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); |
384 | rqstp->rq_waking = 1; | ||
385 | pool->sp_nwaking++; | ||
386 | pool->sp_stats.threads_woken++; | 379 | pool->sp_stats.threads_woken++; |
387 | BUG_ON(xprt->xpt_pool != pool); | 380 | BUG_ON(xprt->xpt_pool != pool); |
388 | wake_up(&rqstp->rq_wait); | 381 | wake_up(&rqstp->rq_wait); |
@@ -651,11 +644,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
651 | return -EINTR; | 644 | return -EINTR; |
652 | 645 | ||
653 | spin_lock_bh(&pool->sp_lock); | 646 | spin_lock_bh(&pool->sp_lock); |
654 | if (rqstp->rq_waking) { | ||
655 | rqstp->rq_waking = 0; | ||
656 | pool->sp_nwaking--; | ||
657 | BUG_ON(pool->sp_nwaking < 0); | ||
658 | } | ||
659 | xprt = svc_xprt_dequeue(pool); | 647 | xprt = svc_xprt_dequeue(pool); |
660 | if (xprt) { | 648 | if (xprt) { |
661 | rqstp->rq_xprt = xprt; | 649 | rqstp->rq_xprt = xprt; |
@@ -1204,16 +1192,15 @@ static int svc_pool_stats_show(struct seq_file *m, void *p) | |||
1204 | struct svc_pool *pool = p; | 1192 | struct svc_pool *pool = p; |
1205 | 1193 | ||
1206 | if (p == SEQ_START_TOKEN) { | 1194 | if (p == SEQ_START_TOKEN) { |
1207 | seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n"); | 1195 | seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n"); |
1208 | return 0; | 1196 | return 0; |
1209 | } | 1197 | } |
1210 | 1198 | ||
1211 | seq_printf(m, "%u %lu %lu %lu %lu %lu\n", | 1199 | seq_printf(m, "%u %lu %lu %lu %lu\n", |
1212 | pool->sp_id, | 1200 | pool->sp_id, |
1213 | pool->sp_stats.packets, | 1201 | pool->sp_stats.packets, |
1214 | pool->sp_stats.sockets_queued, | 1202 | pool->sp_stats.sockets_queued, |
1215 | pool->sp_stats.threads_woken, | 1203 | pool->sp_stats.threads_woken, |
1216 | pool->sp_stats.overloads_avoided, | ||
1217 | pool->sp_stats.threads_timedout); | 1204 | pool->sp_stats.threads_timedout); |
1218 | 1205 | ||
1219 | return 0; | 1206 | return 0; |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 4a8f6558718a..d8c041114497 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
@@ -655,23 +655,25 @@ static struct unix_gid *unix_gid_lookup(uid_t uid) | |||
655 | return NULL; | 655 | return NULL; |
656 | } | 656 | } |
657 | 657 | ||
658 | static int unix_gid_find(uid_t uid, struct group_info **gip, | 658 | static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp) |
659 | struct svc_rqst *rqstp) | ||
660 | { | 659 | { |
661 | struct unix_gid *ug = unix_gid_lookup(uid); | 660 | struct unix_gid *ug; |
661 | struct group_info *gi; | ||
662 | int ret; | ||
663 | |||
664 | ug = unix_gid_lookup(uid); | ||
662 | if (!ug) | 665 | if (!ug) |
663 | return -EAGAIN; | 666 | return ERR_PTR(-EAGAIN); |
664 | switch (cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle)) { | 667 | ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle); |
668 | switch (ret) { | ||
665 | case -ENOENT: | 669 | case -ENOENT: |
666 | *gip = NULL; | 670 | return ERR_PTR(-ENOENT); |
667 | return 0; | ||
668 | case 0: | 671 | case 0: |
669 | *gip = ug->gi; | 672 | gi = get_group_info(ug->gi); |
670 | get_group_info(*gip); | ||
671 | cache_put(&ug->h, &unix_gid_cache); | 673 | cache_put(&ug->h, &unix_gid_cache); |
672 | return 0; | 674 | return gi; |
673 | default: | 675 | default: |
674 | return -EAGAIN; | 676 | return ERR_PTR(-EAGAIN); |
675 | } | 677 | } |
676 | } | 678 | } |
677 | 679 | ||
@@ -681,6 +683,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp) | |||
681 | struct sockaddr_in *sin; | 683 | struct sockaddr_in *sin; |
682 | struct sockaddr_in6 *sin6, sin6_storage; | 684 | struct sockaddr_in6 *sin6, sin6_storage; |
683 | struct ip_map *ipm; | 685 | struct ip_map *ipm; |
686 | struct group_info *gi; | ||
687 | struct svc_cred *cred = &rqstp->rq_cred; | ||
684 | 688 | ||
685 | switch (rqstp->rq_addr.ss_family) { | 689 | switch (rqstp->rq_addr.ss_family) { |
686 | case AF_INET: | 690 | case AF_INET: |
@@ -721,6 +725,17 @@ svcauth_unix_set_client(struct svc_rqst *rqstp) | |||
721 | ip_map_cached_put(rqstp, ipm); | 725 | ip_map_cached_put(rqstp, ipm); |
722 | break; | 726 | break; |
723 | } | 727 | } |
728 | |||
729 | gi = unix_gid_find(cred->cr_uid, rqstp); | ||
730 | switch (PTR_ERR(gi)) { | ||
731 | case -EAGAIN: | ||
732 | return SVC_DROP; | ||
733 | case -ENOENT: | ||
734 | break; | ||
735 | default: | ||
736 | put_group_info(cred->cr_group_info); | ||
737 | cred->cr_group_info = gi; | ||
738 | } | ||
724 | return SVC_OK; | 739 | return SVC_OK; |
725 | } | 740 | } |
726 | 741 | ||
@@ -817,19 +832,11 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp) | |||
817 | slen = svc_getnl(argv); /* gids length */ | 832 | slen = svc_getnl(argv); /* gids length */ |
818 | if (slen > 16 || (len -= (slen + 2)*4) < 0) | 833 | if (slen > 16 || (len -= (slen + 2)*4) < 0) |
819 | goto badcred; | 834 | goto badcred; |
820 | if (unix_gid_find(cred->cr_uid, &cred->cr_group_info, rqstp) | 835 | cred->cr_group_info = groups_alloc(slen); |
821 | == -EAGAIN) | 836 | if (cred->cr_group_info == NULL) |
822 | return SVC_DROP; | 837 | return SVC_DROP; |
823 | if (cred->cr_group_info == NULL) { | 838 | for (i = 0; i < slen; i++) |
824 | cred->cr_group_info = groups_alloc(slen); | 839 | GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv); |
825 | if (cred->cr_group_info == NULL) | ||
826 | return SVC_DROP; | ||
827 | for (i = 0; i < slen; i++) | ||
828 | GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv); | ||
829 | } else { | ||
830 | for (i = 0; i < slen ; i++) | ||
831 | svc_getnl(argv); | ||
832 | } | ||
833 | if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { | 840 | if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { |
834 | *authp = rpc_autherr_badverf; | 841 | *authp = rpc_autherr_badverf; |
835 | return SVC_DENIED; | 842 | return SVC_DENIED; |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index fd46d42afa89..469de292c23c 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -700,6 +700,10 @@ void xprt_connect(struct rpc_task *task) | |||
700 | } | 700 | } |
701 | if (!xprt_lock_write(xprt, task)) | 701 | if (!xprt_lock_write(xprt, task)) |
702 | return; | 702 | return; |
703 | |||
704 | if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) | ||
705 | xprt->ops->close(xprt); | ||
706 | |||
703 | if (xprt_connected(xprt)) | 707 | if (xprt_connected(xprt)) |
704 | xprt_release_write(xprt, task); | 708 | xprt_release_write(xprt, task); |
705 | else { | 709 | else { |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 04732d09013e..3d739e5d15d8 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -2019,7 +2019,7 @@ static void xs_connect(struct rpc_task *task) | |||
2019 | if (xprt_test_and_set_connecting(xprt)) | 2019 | if (xprt_test_and_set_connecting(xprt)) |
2020 | return; | 2020 | return; |
2021 | 2021 | ||
2022 | if (transport->sock != NULL) { | 2022 | if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { |
2023 | dprintk("RPC: xs_connect delayed xprt %p for %lu " | 2023 | dprintk("RPC: xs_connect delayed xprt %p for %lu " |
2024 | "seconds\n", | 2024 | "seconds\n", |
2025 | xprt, xprt->reestablish_timeout / HZ); | 2025 | xprt, xprt->reestablish_timeout / HZ); |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index c01470e7de15..baa898add287 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -141,62 +141,35 @@ static const struct ieee80211_regdomain us_regdom = { | |||
141 | .reg_rules = { | 141 | .reg_rules = { |
142 | /* IEEE 802.11b/g, channels 1..11 */ | 142 | /* IEEE 802.11b/g, channels 1..11 */ |
143 | REG_RULE(2412-10, 2462+10, 40, 6, 27, 0), | 143 | REG_RULE(2412-10, 2462+10, 40, 6, 27, 0), |
144 | /* IEEE 802.11a, channel 36 */ | 144 | /* IEEE 802.11a, channel 36..48 */ |
145 | REG_RULE(5180-10, 5180+10, 40, 6, 23, 0), | 145 | REG_RULE(5180-10, 5240+10, 40, 6, 17, 0), |
146 | /* IEEE 802.11a, channel 40 */ | ||
147 | REG_RULE(5200-10, 5200+10, 40, 6, 23, 0), | ||
148 | /* IEEE 802.11a, channel 44 */ | ||
149 | REG_RULE(5220-10, 5220+10, 40, 6, 23, 0), | ||
150 | /* IEEE 802.11a, channels 48..64 */ | 146 | /* IEEE 802.11a, channels 48..64 */ |
151 | REG_RULE(5240-10, 5320+10, 40, 6, 23, 0), | 147 | REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS), |
148 | /* IEEE 802.11a, channels 100..124 */ | ||
149 | REG_RULE(5500-10, 5590+10, 40, 6, 20, NL80211_RRF_DFS), | ||
150 | /* IEEE 802.11a, channels 132..144 */ | ||
151 | REG_RULE(5660-10, 5700+10, 40, 6, 20, NL80211_RRF_DFS), | ||
152 | /* IEEE 802.11a, channels 149..165, outdoor */ | 152 | /* IEEE 802.11a, channels 149..165, outdoor */ |
153 | REG_RULE(5745-10, 5825+10, 40, 6, 30, 0), | 153 | REG_RULE(5745-10, 5825+10, 40, 6, 30, 0), |
154 | } | 154 | } |
155 | }; | 155 | }; |
156 | 156 | ||
157 | static const struct ieee80211_regdomain jp_regdom = { | 157 | static const struct ieee80211_regdomain jp_regdom = { |
158 | .n_reg_rules = 3, | 158 | .n_reg_rules = 6, |
159 | .alpha2 = "JP", | 159 | .alpha2 = "JP", |
160 | .reg_rules = { | 160 | .reg_rules = { |
161 | /* IEEE 802.11b/g, channels 1..14 */ | 161 | /* IEEE 802.11b/g, channels 1..11 */ |
162 | REG_RULE(2412-10, 2484+10, 40, 6, 20, 0), | 162 | REG_RULE(2412-10, 2462+10, 40, 6, 20, 0), |
163 | /* IEEE 802.11a, channels 34..48 */ | 163 | /* IEEE 802.11b/g, channels 12..13 */ |
164 | REG_RULE(5170-10, 5240+10, 40, 6, 20, | 164 | REG_RULE(2467-10, 2472+10, 20, 6, 20, 0), |
165 | NL80211_RRF_PASSIVE_SCAN), | 165 | /* IEEE 802.11b/g, channel 14 */ |
166 | REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_NO_OFDM), | ||
167 | /* IEEE 802.11a, channels 36..48 */ | ||
168 | REG_RULE(5180-10, 5240+10, 40, 6, 20, 0), | ||
166 | /* IEEE 802.11a, channels 52..64 */ | 169 | /* IEEE 802.11a, channels 52..64 */ |
167 | REG_RULE(5260-10, 5320+10, 40, 6, 20, | 170 | REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS), |
168 | NL80211_RRF_NO_IBSS | | 171 | /* IEEE 802.11a, channels 100..144 */ |
169 | NL80211_RRF_DFS), | 172 | REG_RULE(5500-10, 5700+10, 40, 6, 23, NL80211_RRF_DFS), |
170 | } | ||
171 | }; | ||
172 | |||
173 | static const struct ieee80211_regdomain eu_regdom = { | ||
174 | .n_reg_rules = 6, | ||
175 | /* | ||
176 | * This alpha2 is bogus, we leave it here just for stupid | ||
177 | * backward compatibility | ||
178 | */ | ||
179 | .alpha2 = "EU", | ||
180 | .reg_rules = { | ||
181 | /* IEEE 802.11b/g, channels 1..13 */ | ||
182 | REG_RULE(2412-10, 2472+10, 40, 6, 20, 0), | ||
183 | /* IEEE 802.11a, channel 36 */ | ||
184 | REG_RULE(5180-10, 5180+10, 40, 6, 23, | ||
185 | NL80211_RRF_PASSIVE_SCAN), | ||
186 | /* IEEE 802.11a, channel 40 */ | ||
187 | REG_RULE(5200-10, 5200+10, 40, 6, 23, | ||
188 | NL80211_RRF_PASSIVE_SCAN), | ||
189 | /* IEEE 802.11a, channel 44 */ | ||
190 | REG_RULE(5220-10, 5220+10, 40, 6, 23, | ||
191 | NL80211_RRF_PASSIVE_SCAN), | ||
192 | /* IEEE 802.11a, channels 48..64 */ | ||
193 | REG_RULE(5240-10, 5320+10, 40, 6, 20, | ||
194 | NL80211_RRF_NO_IBSS | | ||
195 | NL80211_RRF_DFS), | ||
196 | /* IEEE 802.11a, channels 100..140 */ | ||
197 | REG_RULE(5500-10, 5700+10, 40, 6, 30, | ||
198 | NL80211_RRF_NO_IBSS | | ||
199 | NL80211_RRF_DFS), | ||
200 | } | 173 | } |
201 | }; | 174 | }; |
202 | 175 | ||
@@ -206,15 +179,17 @@ static const struct ieee80211_regdomain *static_regdom(char *alpha2) | |||
206 | return &us_regdom; | 179 | return &us_regdom; |
207 | if (alpha2[0] == 'J' && alpha2[1] == 'P') | 180 | if (alpha2[0] == 'J' && alpha2[1] == 'P') |
208 | return &jp_regdom; | 181 | return &jp_regdom; |
182 | /* Use world roaming rules for "EU", since it was a pseudo | ||
183 | domain anyway... */ | ||
209 | if (alpha2[0] == 'E' && alpha2[1] == 'U') | 184 | if (alpha2[0] == 'E' && alpha2[1] == 'U') |
210 | return &eu_regdom; | 185 | return &world_regdom; |
211 | /* Default, as per the old rules */ | 186 | /* Default, world roaming rules */ |
212 | return &us_regdom; | 187 | return &world_regdom; |
213 | } | 188 | } |
214 | 189 | ||
215 | static bool is_old_static_regdom(const struct ieee80211_regdomain *rd) | 190 | static bool is_old_static_regdom(const struct ieee80211_regdomain *rd) |
216 | { | 191 | { |
217 | if (rd == &us_regdom || rd == &jp_regdom || rd == &eu_regdom) | 192 | if (rd == &us_regdom || rd == &jp_regdom || rd == &world_regdom) |
218 | return true; | 193 | return true; |
219 | return false; | 194 | return false; |
220 | } | 195 | } |
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 584eb4826e02..54face3d4424 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c | |||
@@ -479,6 +479,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev, | |||
479 | } | 479 | } |
480 | err = rdev->ops->del_key(&rdev->wiphy, dev, idx, addr); | 480 | err = rdev->ops->del_key(&rdev->wiphy, dev, idx, addr); |
481 | } | 481 | } |
482 | wdev->wext.connect.privacy = false; | ||
482 | /* | 483 | /* |
483 | * Applications using wireless extensions expect to be | 484 | * Applications using wireless extensions expect to be |
484 | * able to delete keys that don't exist, so allow that. | 485 | * able to delete keys that don't exist, so allow that. |