diff options
Diffstat (limited to 'net/ipv4/udp.c')
-rw-r--r-- | net/ipv4/udp.c | 207 |
1 files changed, 124 insertions, 83 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index fb23c2e63b52..198f75b7bdd3 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -110,7 +110,7 @@ | |||
110 | struct udp_table udp_table __read_mostly; | 110 | struct udp_table udp_table __read_mostly; |
111 | EXPORT_SYMBOL(udp_table); | 111 | EXPORT_SYMBOL(udp_table); |
112 | 112 | ||
113 | int sysctl_udp_mem[3] __read_mostly; | 113 | long sysctl_udp_mem[3] __read_mostly; |
114 | EXPORT_SYMBOL(sysctl_udp_mem); | 114 | EXPORT_SYMBOL(sysctl_udp_mem); |
115 | 115 | ||
116 | int sysctl_udp_rmem_min __read_mostly; | 116 | int sysctl_udp_rmem_min __read_mostly; |
@@ -119,7 +119,7 @@ EXPORT_SYMBOL(sysctl_udp_rmem_min); | |||
119 | int sysctl_udp_wmem_min __read_mostly; | 119 | int sysctl_udp_wmem_min __read_mostly; |
120 | EXPORT_SYMBOL(sysctl_udp_wmem_min); | 120 | EXPORT_SYMBOL(sysctl_udp_wmem_min); |
121 | 121 | ||
122 | atomic_t udp_memory_allocated; | 122 | atomic_long_t udp_memory_allocated; |
123 | EXPORT_SYMBOL(udp_memory_allocated); | 123 | EXPORT_SYMBOL(udp_memory_allocated); |
124 | 124 | ||
125 | #define MAX_UDP_PORTS 65536 | 125 | #define MAX_UDP_PORTS 65536 |
@@ -189,7 +189,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num, | |||
189 | * @sk: socket struct in question | 189 | * @sk: socket struct in question |
190 | * @snum: port number to look up | 190 | * @snum: port number to look up |
191 | * @saddr_comp: AF-dependent comparison of bound local IP addresses | 191 | * @saddr_comp: AF-dependent comparison of bound local IP addresses |
192 | * @hash2_nulladdr: AF-dependant hash value in secondary hash chains, | 192 | * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, |
193 | * with NULL address | 193 | * with NULL address |
194 | */ | 194 | */ |
195 | int udp_lib_get_port(struct sock *sk, unsigned short snum, | 195 | int udp_lib_get_port(struct sock *sk, unsigned short snum, |
@@ -430,7 +430,7 @@ begin: | |||
430 | 430 | ||
431 | if (result) { | 431 | if (result) { |
432 | exact_match: | 432 | exact_match: |
433 | if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) | 433 | if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) |
434 | result = NULL; | 434 | result = NULL; |
435 | else if (unlikely(compute_score2(result, net, saddr, sport, | 435 | else if (unlikely(compute_score2(result, net, saddr, sport, |
436 | daddr, hnum, dif) < badness)) { | 436 | daddr, hnum, dif) < badness)) { |
@@ -500,7 +500,7 @@ begin: | |||
500 | goto begin; | 500 | goto begin; |
501 | 501 | ||
502 | if (result) { | 502 | if (result) { |
503 | if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) | 503 | if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) |
504 | result = NULL; | 504 | result = NULL; |
505 | else if (unlikely(compute_score(result, net, saddr, hnum, sport, | 505 | else if (unlikely(compute_score(result, net, saddr, hnum, sport, |
506 | daddr, dport, dif) < badness)) { | 506 | daddr, dport, dif) < badness)) { |
@@ -578,7 +578,7 @@ found: | |||
578 | void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) | 578 | void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) |
579 | { | 579 | { |
580 | struct inet_sock *inet; | 580 | struct inet_sock *inet; |
581 | struct iphdr *iph = (struct iphdr *)skb->data; | 581 | const struct iphdr *iph = (const struct iphdr *)skb->data; |
582 | struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); | 582 | struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); |
583 | const int type = icmp_hdr(skb)->type; | 583 | const int type = icmp_hdr(skb)->type; |
584 | const int code = icmp_hdr(skb)->code; | 584 | const int code = icmp_hdr(skb)->code; |
@@ -663,75 +663,71 @@ void udp_flush_pending_frames(struct sock *sk) | |||
663 | EXPORT_SYMBOL(udp_flush_pending_frames); | 663 | EXPORT_SYMBOL(udp_flush_pending_frames); |
664 | 664 | ||
665 | /** | 665 | /** |
666 | * udp4_hwcsum_outgoing - handle outgoing HW checksumming | 666 | * udp4_hwcsum - handle outgoing HW checksumming |
667 | * @sk: socket we are sending on | ||
668 | * @skb: sk_buff containing the filled-in UDP header | 667 | * @skb: sk_buff containing the filled-in UDP header |
669 | * (checksum field must be zeroed out) | 668 | * (checksum field must be zeroed out) |
669 | * @src: source IP address | ||
670 | * @dst: destination IP address | ||
670 | */ | 671 | */ |
671 | static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, | 672 | static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) |
672 | __be32 src, __be32 dst, int len) | ||
673 | { | 673 | { |
674 | unsigned int offset; | ||
675 | struct udphdr *uh = udp_hdr(skb); | 674 | struct udphdr *uh = udp_hdr(skb); |
675 | struct sk_buff *frags = skb_shinfo(skb)->frag_list; | ||
676 | int offset = skb_transport_offset(skb); | ||
677 | int len = skb->len - offset; | ||
678 | int hlen = len; | ||
676 | __wsum csum = 0; | 679 | __wsum csum = 0; |
677 | 680 | ||
678 | if (skb_queue_len(&sk->sk_write_queue) == 1) { | 681 | if (!frags) { |
679 | /* | 682 | /* |
680 | * Only one fragment on the socket. | 683 | * Only one fragment on the socket. |
681 | */ | 684 | */ |
682 | skb->csum_start = skb_transport_header(skb) - skb->head; | 685 | skb->csum_start = skb_transport_header(skb) - skb->head; |
683 | skb->csum_offset = offsetof(struct udphdr, check); | 686 | skb->csum_offset = offsetof(struct udphdr, check); |
684 | uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0); | 687 | uh->check = ~csum_tcpudp_magic(src, dst, len, |
688 | IPPROTO_UDP, 0); | ||
685 | } else { | 689 | } else { |
686 | /* | 690 | /* |
687 | * HW-checksum won't work as there are two or more | 691 | * HW-checksum won't work as there are two or more |
688 | * fragments on the socket so that all csums of sk_buffs | 692 | * fragments on the socket so that all csums of sk_buffs |
689 | * should be together | 693 | * should be together |
690 | */ | 694 | */ |
691 | offset = skb_transport_offset(skb); | 695 | do { |
692 | skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); | 696 | csum = csum_add(csum, frags->csum); |
697 | hlen -= frags->len; | ||
698 | } while ((frags = frags->next)); | ||
693 | 699 | ||
700 | csum = skb_checksum(skb, offset, hlen, csum); | ||
694 | skb->ip_summed = CHECKSUM_NONE; | 701 | skb->ip_summed = CHECKSUM_NONE; |
695 | 702 | ||
696 | skb_queue_walk(&sk->sk_write_queue, skb) { | ||
697 | csum = csum_add(csum, skb->csum); | ||
698 | } | ||
699 | |||
700 | uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); | 703 | uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); |
701 | if (uh->check == 0) | 704 | if (uh->check == 0) |
702 | uh->check = CSUM_MANGLED_0; | 705 | uh->check = CSUM_MANGLED_0; |
703 | } | 706 | } |
704 | } | 707 | } |
705 | 708 | ||
706 | /* | 709 | static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) |
707 | * Push out all pending data as one UDP datagram. Socket is locked. | ||
708 | */ | ||
709 | static int udp_push_pending_frames(struct sock *sk) | ||
710 | { | 710 | { |
711 | struct udp_sock *up = udp_sk(sk); | 711 | struct sock *sk = skb->sk; |
712 | struct inet_sock *inet = inet_sk(sk); | 712 | struct inet_sock *inet = inet_sk(sk); |
713 | struct flowi *fl = &inet->cork.fl; | ||
714 | struct sk_buff *skb; | ||
715 | struct udphdr *uh; | 713 | struct udphdr *uh; |
716 | int err = 0; | 714 | int err = 0; |
717 | int is_udplite = IS_UDPLITE(sk); | 715 | int is_udplite = IS_UDPLITE(sk); |
716 | int offset = skb_transport_offset(skb); | ||
717 | int len = skb->len - offset; | ||
718 | __wsum csum = 0; | 718 | __wsum csum = 0; |
719 | 719 | ||
720 | /* Grab the skbuff where UDP header space exists. */ | ||
721 | if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) | ||
722 | goto out; | ||
723 | |||
724 | /* | 720 | /* |
725 | * Create a UDP header | 721 | * Create a UDP header |
726 | */ | 722 | */ |
727 | uh = udp_hdr(skb); | 723 | uh = udp_hdr(skb); |
728 | uh->source = fl->fl_ip_sport; | 724 | uh->source = inet->inet_sport; |
729 | uh->dest = fl->fl_ip_dport; | 725 | uh->dest = fl4->fl4_dport; |
730 | uh->len = htons(up->len); | 726 | uh->len = htons(len); |
731 | uh->check = 0; | 727 | uh->check = 0; |
732 | 728 | ||
733 | if (is_udplite) /* UDP-Lite */ | 729 | if (is_udplite) /* UDP-Lite */ |
734 | csum = udplite_csum_outgoing(sk, skb); | 730 | csum = udplite_csum(skb); |
735 | 731 | ||
736 | else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */ | 732 | else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */ |
737 | 733 | ||
@@ -740,20 +736,20 @@ static int udp_push_pending_frames(struct sock *sk) | |||
740 | 736 | ||
741 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ | 737 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ |
742 | 738 | ||
743 | udp4_hwcsum_outgoing(sk, skb, fl->fl4_src, fl->fl4_dst, up->len); | 739 | udp4_hwcsum(skb, fl4->saddr, fl4->daddr); |
744 | goto send; | 740 | goto send; |
745 | 741 | ||
746 | } else /* `normal' UDP */ | 742 | } else |
747 | csum = udp_csum_outgoing(sk, skb); | 743 | csum = udp_csum(skb); |
748 | 744 | ||
749 | /* add protocol-dependent pseudo-header */ | 745 | /* add protocol-dependent pseudo-header */ |
750 | uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len, | 746 | uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len, |
751 | sk->sk_protocol, csum); | 747 | sk->sk_protocol, csum); |
752 | if (uh->check == 0) | 748 | if (uh->check == 0) |
753 | uh->check = CSUM_MANGLED_0; | 749 | uh->check = CSUM_MANGLED_0; |
754 | 750 | ||
755 | send: | 751 | send: |
756 | err = ip_push_pending_frames(sk); | 752 | err = ip_send_skb(skb); |
757 | if (err) { | 753 | if (err) { |
758 | if (err == -ENOBUFS && !inet->recverr) { | 754 | if (err == -ENOBUFS && !inet->recverr) { |
759 | UDP_INC_STATS_USER(sock_net(sk), | 755 | UDP_INC_STATS_USER(sock_net(sk), |
@@ -763,6 +759,26 @@ send: | |||
763 | } else | 759 | } else |
764 | UDP_INC_STATS_USER(sock_net(sk), | 760 | UDP_INC_STATS_USER(sock_net(sk), |
765 | UDP_MIB_OUTDATAGRAMS, is_udplite); | 761 | UDP_MIB_OUTDATAGRAMS, is_udplite); |
762 | return err; | ||
763 | } | ||
764 | |||
765 | /* | ||
766 | * Push out all pending data as one UDP datagram. Socket is locked. | ||
767 | */ | ||
768 | static int udp_push_pending_frames(struct sock *sk) | ||
769 | { | ||
770 | struct udp_sock *up = udp_sk(sk); | ||
771 | struct inet_sock *inet = inet_sk(sk); | ||
772 | struct flowi4 *fl4 = &inet->cork.fl.u.ip4; | ||
773 | struct sk_buff *skb; | ||
774 | int err = 0; | ||
775 | |||
776 | skb = ip_finish_skb(sk, fl4); | ||
777 | if (!skb) | ||
778 | goto out; | ||
779 | |||
780 | err = udp_send_skb(skb, fl4); | ||
781 | |||
766 | out: | 782 | out: |
767 | up->len = 0; | 783 | up->len = 0; |
768 | up->pending = 0; | 784 | up->pending = 0; |
@@ -774,6 +790,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
774 | { | 790 | { |
775 | struct inet_sock *inet = inet_sk(sk); | 791 | struct inet_sock *inet = inet_sk(sk); |
776 | struct udp_sock *up = udp_sk(sk); | 792 | struct udp_sock *up = udp_sk(sk); |
793 | struct flowi4 fl4_stack; | ||
794 | struct flowi4 *fl4; | ||
777 | int ulen = len; | 795 | int ulen = len; |
778 | struct ipcm_cookie ipc; | 796 | struct ipcm_cookie ipc; |
779 | struct rtable *rt = NULL; | 797 | struct rtable *rt = NULL; |
@@ -785,6 +803,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
785 | int err, is_udplite = IS_UDPLITE(sk); | 803 | int err, is_udplite = IS_UDPLITE(sk); |
786 | int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; | 804 | int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; |
787 | int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); | 805 | int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); |
806 | struct sk_buff *skb; | ||
807 | struct ip_options_data opt_copy; | ||
788 | 808 | ||
789 | if (len > 0xFFFF) | 809 | if (len > 0xFFFF) |
790 | return -EMSGSIZE; | 810 | return -EMSGSIZE; |
@@ -797,8 +817,11 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
797 | return -EOPNOTSUPP; | 817 | return -EOPNOTSUPP; |
798 | 818 | ||
799 | ipc.opt = NULL; | 819 | ipc.opt = NULL; |
800 | ipc.shtx.flags = 0; | 820 | ipc.tx_flags = 0; |
801 | 821 | ||
822 | getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; | ||
823 | |||
824 | fl4 = &inet->cork.fl.u.ip4; | ||
802 | if (up->pending) { | 825 | if (up->pending) { |
803 | /* | 826 | /* |
804 | * There are pending frames. | 827 | * There are pending frames. |
@@ -845,7 +868,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
845 | ipc.addr = inet->inet_saddr; | 868 | ipc.addr = inet->inet_saddr; |
846 | 869 | ||
847 | ipc.oif = sk->sk_bound_dev_if; | 870 | ipc.oif = sk->sk_bound_dev_if; |
848 | err = sock_tx_timestamp(msg, sk, &ipc.shtx); | 871 | err = sock_tx_timestamp(sk, &ipc.tx_flags); |
849 | if (err) | 872 | if (err) |
850 | return err; | 873 | return err; |
851 | if (msg->msg_controllen) { | 874 | if (msg->msg_controllen) { |
@@ -856,22 +879,32 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
856 | free = 1; | 879 | free = 1; |
857 | connected = 0; | 880 | connected = 0; |
858 | } | 881 | } |
859 | if (!ipc.opt) | 882 | if (!ipc.opt) { |
860 | ipc.opt = inet->opt; | 883 | struct ip_options_rcu *inet_opt; |
884 | |||
885 | rcu_read_lock(); | ||
886 | inet_opt = rcu_dereference(inet->inet_opt); | ||
887 | if (inet_opt) { | ||
888 | memcpy(&opt_copy, inet_opt, | ||
889 | sizeof(*inet_opt) + inet_opt->opt.optlen); | ||
890 | ipc.opt = &opt_copy.opt; | ||
891 | } | ||
892 | rcu_read_unlock(); | ||
893 | } | ||
861 | 894 | ||
862 | saddr = ipc.addr; | 895 | saddr = ipc.addr; |
863 | ipc.addr = faddr = daddr; | 896 | ipc.addr = faddr = daddr; |
864 | 897 | ||
865 | if (ipc.opt && ipc.opt->srr) { | 898 | if (ipc.opt && ipc.opt->opt.srr) { |
866 | if (!daddr) | 899 | if (!daddr) |
867 | return -EINVAL; | 900 | return -EINVAL; |
868 | faddr = ipc.opt->faddr; | 901 | faddr = ipc.opt->opt.faddr; |
869 | connected = 0; | 902 | connected = 0; |
870 | } | 903 | } |
871 | tos = RT_TOS(inet->tos); | 904 | tos = RT_TOS(inet->tos); |
872 | if (sock_flag(sk, SOCK_LOCALROUTE) || | 905 | if (sock_flag(sk, SOCK_LOCALROUTE) || |
873 | (msg->msg_flags & MSG_DONTROUTE) || | 906 | (msg->msg_flags & MSG_DONTROUTE) || |
874 | (ipc.opt && ipc.opt->is_strictroute)) { | 907 | (ipc.opt && ipc.opt->opt.is_strictroute)) { |
875 | tos |= RTO_ONLINK; | 908 | tos |= RTO_ONLINK; |
876 | connected = 0; | 909 | connected = 0; |
877 | } | 910 | } |
@@ -888,22 +921,19 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
888 | rt = (struct rtable *)sk_dst_check(sk, 0); | 921 | rt = (struct rtable *)sk_dst_check(sk, 0); |
889 | 922 | ||
890 | if (rt == NULL) { | 923 | if (rt == NULL) { |
891 | struct flowi fl = { .oif = ipc.oif, | ||
892 | .mark = sk->sk_mark, | ||
893 | .nl_u = { .ip4_u = | ||
894 | { .daddr = faddr, | ||
895 | .saddr = saddr, | ||
896 | .tos = tos } }, | ||
897 | .proto = sk->sk_protocol, | ||
898 | .flags = inet_sk_flowi_flags(sk), | ||
899 | .uli_u = { .ports = | ||
900 | { .sport = inet->inet_sport, | ||
901 | .dport = dport } } }; | ||
902 | struct net *net = sock_net(sk); | 924 | struct net *net = sock_net(sk); |
903 | 925 | ||
904 | security_sk_classify_flow(sk, &fl); | 926 | fl4 = &fl4_stack; |
905 | err = ip_route_output_flow(net, &rt, &fl, sk, 1); | 927 | flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, |
906 | if (err) { | 928 | RT_SCOPE_UNIVERSE, sk->sk_protocol, |
929 | inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP, | ||
930 | faddr, saddr, dport, inet->inet_sport); | ||
931 | |||
932 | security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); | ||
933 | rt = ip_route_output_flow(net, fl4, sk); | ||
934 | if (IS_ERR(rt)) { | ||
935 | err = PTR_ERR(rt); | ||
936 | rt = NULL; | ||
907 | if (err == -ENETUNREACH) | 937 | if (err == -ENETUNREACH) |
908 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); | 938 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); |
909 | goto out; | 939 | goto out; |
@@ -921,9 +951,20 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
921 | goto do_confirm; | 951 | goto do_confirm; |
922 | back_from_confirm: | 952 | back_from_confirm: |
923 | 953 | ||
924 | saddr = rt->rt_src; | 954 | saddr = fl4->saddr; |
925 | if (!ipc.addr) | 955 | if (!ipc.addr) |
926 | daddr = ipc.addr = rt->rt_dst; | 956 | daddr = ipc.addr = fl4->daddr; |
957 | |||
958 | /* Lockless fast path for the non-corking case. */ | ||
959 | if (!corkreq) { | ||
960 | skb = ip_make_skb(sk, fl4, getfrag, msg->msg_iov, ulen, | ||
961 | sizeof(struct udphdr), &ipc, &rt, | ||
962 | msg->msg_flags); | ||
963 | err = PTR_ERR(skb); | ||
964 | if (skb && !IS_ERR(skb)) | ||
965 | err = udp_send_skb(skb, fl4); | ||
966 | goto out; | ||
967 | } | ||
927 | 968 | ||
928 | lock_sock(sk); | 969 | lock_sock(sk); |
929 | if (unlikely(up->pending)) { | 970 | if (unlikely(up->pending)) { |
@@ -938,18 +979,18 @@ back_from_confirm: | |||
938 | /* | 979 | /* |
939 | * Now cork the socket to pend data. | 980 | * Now cork the socket to pend data. |
940 | */ | 981 | */ |
941 | inet->cork.fl.fl4_dst = daddr; | 982 | fl4 = &inet->cork.fl.u.ip4; |
942 | inet->cork.fl.fl_ip_dport = dport; | 983 | fl4->daddr = daddr; |
943 | inet->cork.fl.fl4_src = saddr; | 984 | fl4->saddr = saddr; |
944 | inet->cork.fl.fl_ip_sport = inet->inet_sport; | 985 | fl4->fl4_dport = dport; |
986 | fl4->fl4_sport = inet->inet_sport; | ||
945 | up->pending = AF_INET; | 987 | up->pending = AF_INET; |
946 | 988 | ||
947 | do_append_data: | 989 | do_append_data: |
948 | up->len += ulen; | 990 | up->len += ulen; |
949 | getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; | 991 | err = ip_append_data(sk, fl4, getfrag, msg->msg_iov, ulen, |
950 | err = ip_append_data(sk, getfrag, msg->msg_iov, ulen, | 992 | sizeof(struct udphdr), &ipc, &rt, |
951 | sizeof(struct udphdr), &ipc, &rt, | 993 | corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); |
952 | corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); | ||
953 | if (err) | 994 | if (err) |
954 | udp_flush_pending_frames(sk); | 995 | udp_flush_pending_frames(sk); |
955 | else if (!corkreq) | 996 | else if (!corkreq) |
@@ -989,6 +1030,7 @@ EXPORT_SYMBOL(udp_sendmsg); | |||
989 | int udp_sendpage(struct sock *sk, struct page *page, int offset, | 1030 | int udp_sendpage(struct sock *sk, struct page *page, int offset, |
990 | size_t size, int flags) | 1031 | size_t size, int flags) |
991 | { | 1032 | { |
1033 | struct inet_sock *inet = inet_sk(sk); | ||
992 | struct udp_sock *up = udp_sk(sk); | 1034 | struct udp_sock *up = udp_sk(sk); |
993 | int ret; | 1035 | int ret; |
994 | 1036 | ||
@@ -1013,7 +1055,8 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset, | |||
1013 | return -EINVAL; | 1055 | return -EINVAL; |
1014 | } | 1056 | } |
1015 | 1057 | ||
1016 | ret = ip_append_page(sk, page, offset, size, flags); | 1058 | ret = ip_append_page(sk, &inet->cork.fl.u.ip4, |
1059 | page, offset, size, flags); | ||
1017 | if (ret == -EOPNOTSUPP) { | 1060 | if (ret == -EOPNOTSUPP) { |
1018 | release_sock(sk); | 1061 | release_sock(sk); |
1019 | return sock_no_sendpage(sk->sk_socket, page, offset, | 1062 | return sock_no_sendpage(sk->sk_socket, page, offset, |
@@ -1206,6 +1249,9 @@ csum_copy_err: | |||
1206 | 1249 | ||
1207 | if (noblock) | 1250 | if (noblock) |
1208 | return -EAGAIN; | 1251 | return -EAGAIN; |
1252 | |||
1253 | /* starting over for a new packet */ | ||
1254 | msg->msg_flags &= ~MSG_TRUNC; | ||
1209 | goto try_again; | 1255 | goto try_again; |
1210 | } | 1256 | } |
1211 | 1257 | ||
@@ -1413,7 +1459,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
1413 | } | 1459 | } |
1414 | } | 1460 | } |
1415 | 1461 | ||
1416 | if (sk->sk_filter) { | 1462 | if (rcu_dereference_raw(sk->sk_filter)) { |
1417 | if (udp_lib_checksum_complete(skb)) | 1463 | if (udp_lib_checksum_complete(skb)) |
1418 | goto drop; | 1464 | goto drop; |
1419 | } | 1465 | } |
@@ -1899,6 +1945,7 @@ struct proto udp_prot = { | |||
1899 | .compat_setsockopt = compat_udp_setsockopt, | 1945 | .compat_setsockopt = compat_udp_setsockopt, |
1900 | .compat_getsockopt = compat_udp_getsockopt, | 1946 | .compat_getsockopt = compat_udp_getsockopt, |
1901 | #endif | 1947 | #endif |
1948 | .clear_sk = sk_prot_clear_portaddr_nulls, | ||
1902 | }; | 1949 | }; |
1903 | EXPORT_SYMBOL(udp_prot); | 1950 | EXPORT_SYMBOL(udp_prot); |
1904 | 1951 | ||
@@ -2046,7 +2093,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, | |||
2046 | __u16 srcp = ntohs(inet->inet_sport); | 2093 | __u16 srcp = ntohs(inet->inet_sport); |
2047 | 2094 | ||
2048 | seq_printf(f, "%5d: %08X:%04X %08X:%04X" | 2095 | seq_printf(f, "%5d: %08X:%04X %08X:%04X" |
2049 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n", | 2096 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n", |
2050 | bucket, src, srcp, dest, destp, sp->sk_state, | 2097 | bucket, src, srcp, dest, destp, sp->sk_state, |
2051 | sk_wmem_alloc_get(sp), | 2098 | sk_wmem_alloc_get(sp), |
2052 | sk_rmem_alloc_get(sp), | 2099 | sk_rmem_alloc_get(sp), |
@@ -2162,16 +2209,10 @@ void __init udp_table_init(struct udp_table *table, const char *name) | |||
2162 | 2209 | ||
2163 | void __init udp_init(void) | 2210 | void __init udp_init(void) |
2164 | { | 2211 | { |
2165 | unsigned long nr_pages, limit; | 2212 | unsigned long limit; |
2166 | 2213 | ||
2167 | udp_table_init(&udp_table, "UDP"); | 2214 | udp_table_init(&udp_table, "UDP"); |
2168 | /* Set the pressure threshold up by the same strategy of TCP. It is a | 2215 | limit = nr_free_buffer_pages() / 8; |
2169 | * fraction of global memory that is up to 1/2 at 256 MB, decreasing | ||
2170 | * toward zero with the amount of memory, with a floor of 128 pages. | ||
2171 | */ | ||
2172 | nr_pages = totalram_pages - totalhigh_pages; | ||
2173 | limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); | ||
2174 | limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); | ||
2175 | limit = max(limit, 128UL); | 2216 | limit = max(limit, 128UL); |
2176 | sysctl_udp_mem[0] = limit / 4 * 3; | 2217 | sysctl_udp_mem[0] = limit / 4 * 3; |
2177 | sysctl_udp_mem[1] = limit; | 2218 | sysctl_udp_mem[1] = limit; |
@@ -2200,7 +2241,7 @@ int udp4_ufo_send_check(struct sk_buff *skb) | |||
2200 | return 0; | 2241 | return 0; |
2201 | } | 2242 | } |
2202 | 2243 | ||
2203 | struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features) | 2244 | struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features) |
2204 | { | 2245 | { |
2205 | struct sk_buff *segs = ERR_PTR(-EINVAL); | 2246 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
2206 | unsigned int mss; | 2247 | unsigned int mss; |
@@ -2228,7 +2269,7 @@ struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features) | |||
2228 | /* Do software UFO. Complete and fill in the UDP checksum as HW cannot | 2269 | /* Do software UFO. Complete and fill in the UDP checksum as HW cannot |
2229 | * do checksum of UDP packets sent as multiple IP fragments. | 2270 | * do checksum of UDP packets sent as multiple IP fragments. |
2230 | */ | 2271 | */ |
2231 | offset = skb->csum_start - skb_headroom(skb); | 2272 | offset = skb_checksum_start_offset(skb); |
2232 | csum = skb_checksum(skb, offset, skb->len - offset, 0); | 2273 | csum = skb_checksum(skb, offset, skb->len - offset, 0); |
2233 | offset += skb->csum_offset; | 2274 | offset += skb->csum_offset; |
2234 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); | 2275 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); |