diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-10 13:30:08 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-10 13:30:08 -0400 |
commit | 3dd392a407d15250a501fa109cc1f93fee95ef85 (patch) | |
tree | c1faca3fa8bd0f7c8790b3e0887229b4a5a90e8b /net/ipv4 | |
parent | b27a43c1e90582facad44de67d02bc9e9f900289 (diff) | |
parent | d403a6484f0341bf0624d17ece46f24f741b6a92 (diff) |
Merge branch 'linus' into x86/pat2
Conflicts:
arch/x86/mm/init_64.c
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/tcp_hybla.c | 6 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 3 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 2 | ||||
-rw-r--r-- | net/ipv4/udp.c | 62 |
4 files changed, 41 insertions, 32 deletions
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c index bfcbd148a89d..c209e054a634 100644 --- a/net/ipv4/tcp_hybla.c +++ b/net/ipv4/tcp_hybla.c | |||
@@ -150,7 +150,11 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | |||
150 | ca->snd_cwnd_cents -= 128; | 150 | ca->snd_cwnd_cents -= 128; |
151 | tp->snd_cwnd_cnt = 0; | 151 | tp->snd_cwnd_cnt = 0; |
152 | } | 152 | } |
153 | 153 | /* check when cwnd has not been incremented for a while */ | |
154 | if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tp->snd_cwnd) { | ||
155 | tp->snd_cwnd++; | ||
156 | tp->snd_cwnd_cnt = 0; | ||
157 | } | ||
154 | /* clamp down slowstart cwnd to ssthresh value. */ | 158 | /* clamp down slowstart cwnd to ssthresh value. */ |
155 | if (is_slowstart) | 159 | if (is_slowstart) |
156 | tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); | 160 | tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 67ccce2a96bd..7abc6b80d47d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -4879,7 +4879,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
4879 | goto no_ack; | 4879 | goto no_ack; |
4880 | } | 4880 | } |
4881 | 4881 | ||
4882 | __tcp_ack_snd_check(sk, 0); | 4882 | if (!copied_early || tp->rcv_nxt != tp->rcv_wup) |
4883 | __tcp_ack_snd_check(sk, 0); | ||
4883 | no_ack: | 4884 | no_ack: |
4884 | #ifdef CONFIG_NET_DMA | 4885 | #ifdef CONFIG_NET_DMA |
4885 | if (copied_early) | 4886 | if (copied_early) |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 1b4fee20fc93..011478e46c40 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -618,7 +618,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, | |||
618 | ]; | 618 | ]; |
619 | } rep; | 619 | } rep; |
620 | struct ip_reply_arg arg; | 620 | struct ip_reply_arg arg; |
621 | struct net *net = dev_net(skb->dev); | 621 | struct net *net = dev_net(skb->dst->dev); |
622 | 622 | ||
623 | memset(&rep.th, 0, sizeof(struct tcphdr)); | 623 | memset(&rep.th, 0, sizeof(struct tcphdr)); |
624 | memset(&arg, 0, sizeof(arg)); | 624 | memset(&arg, 0, sizeof(arg)); |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 8e42fbbd5761..57e26fa66185 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -951,6 +951,27 @@ int udp_disconnect(struct sock *sk, int flags) | |||
951 | return 0; | 951 | return 0; |
952 | } | 952 | } |
953 | 953 | ||
954 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | ||
955 | { | ||
956 | int is_udplite = IS_UDPLITE(sk); | ||
957 | int rc; | ||
958 | |||
959 | if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) { | ||
960 | /* Note that an ENOMEM error is charged twice */ | ||
961 | if (rc == -ENOMEM) | ||
962 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, | ||
963 | is_udplite); | ||
964 | goto drop; | ||
965 | } | ||
966 | |||
967 | return 0; | ||
968 | |||
969 | drop: | ||
970 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | ||
971 | kfree_skb(skb); | ||
972 | return -1; | ||
973 | } | ||
974 | |||
954 | /* returns: | 975 | /* returns: |
955 | * -1: error | 976 | * -1: error |
956 | * 0: success | 977 | * 0: success |
@@ -989,9 +1010,7 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
989 | up->encap_rcv != NULL) { | 1010 | up->encap_rcv != NULL) { |
990 | int ret; | 1011 | int ret; |
991 | 1012 | ||
992 | bh_unlock_sock(sk); | ||
993 | ret = (*up->encap_rcv)(sk, skb); | 1013 | ret = (*up->encap_rcv)(sk, skb); |
994 | bh_lock_sock(sk); | ||
995 | if (ret <= 0) { | 1014 | if (ret <= 0) { |
996 | UDP_INC_STATS_BH(sock_net(sk), | 1015 | UDP_INC_STATS_BH(sock_net(sk), |
997 | UDP_MIB_INDATAGRAMS, | 1016 | UDP_MIB_INDATAGRAMS, |
@@ -1044,17 +1063,16 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
1044 | goto drop; | 1063 | goto drop; |
1045 | } | 1064 | } |
1046 | 1065 | ||
1047 | if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { | 1066 | rc = 0; |
1048 | /* Note that an ENOMEM error is charged twice */ | ||
1049 | if (rc == -ENOMEM) { | ||
1050 | UDP_INC_STATS_BH(sock_net(sk), | ||
1051 | UDP_MIB_RCVBUFERRORS, is_udplite); | ||
1052 | atomic_inc(&sk->sk_drops); | ||
1053 | } | ||
1054 | goto drop; | ||
1055 | } | ||
1056 | 1067 | ||
1057 | return 0; | 1068 | bh_lock_sock(sk); |
1069 | if (!sock_owned_by_user(sk)) | ||
1070 | rc = __udp_queue_rcv_skb(sk, skb); | ||
1071 | else | ||
1072 | sk_add_backlog(sk, skb); | ||
1073 | bh_unlock_sock(sk); | ||
1074 | |||
1075 | return rc; | ||
1058 | 1076 | ||
1059 | drop: | 1077 | drop: |
1060 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | 1078 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
@@ -1092,15 +1110,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
1092 | skb1 = skb_clone(skb, GFP_ATOMIC); | 1110 | skb1 = skb_clone(skb, GFP_ATOMIC); |
1093 | 1111 | ||
1094 | if (skb1) { | 1112 | if (skb1) { |
1095 | int ret = 0; | 1113 | int ret = udp_queue_rcv_skb(sk, skb1); |
1096 | |||
1097 | bh_lock_sock(sk); | ||
1098 | if (!sock_owned_by_user(sk)) | ||
1099 | ret = udp_queue_rcv_skb(sk, skb1); | ||
1100 | else | ||
1101 | sk_add_backlog(sk, skb1); | ||
1102 | bh_unlock_sock(sk); | ||
1103 | |||
1104 | if (ret > 0) | 1114 | if (ret > 0) |
1105 | /* we should probably re-process instead | 1115 | /* we should probably re-process instead |
1106 | * of dropping packets here. */ | 1116 | * of dropping packets here. */ |
@@ -1195,13 +1205,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | |||
1195 | uh->dest, inet_iif(skb), udptable); | 1205 | uh->dest, inet_iif(skb), udptable); |
1196 | 1206 | ||
1197 | if (sk != NULL) { | 1207 | if (sk != NULL) { |
1198 | int ret = 0; | 1208 | int ret = udp_queue_rcv_skb(sk, skb); |
1199 | bh_lock_sock(sk); | ||
1200 | if (!sock_owned_by_user(sk)) | ||
1201 | ret = udp_queue_rcv_skb(sk, skb); | ||
1202 | else | ||
1203 | sk_add_backlog(sk, skb); | ||
1204 | bh_unlock_sock(sk); | ||
1205 | sock_put(sk); | 1209 | sock_put(sk); |
1206 | 1210 | ||
1207 | /* a return value > 0 means to resubmit the input, but | 1211 | /* a return value > 0 means to resubmit the input, but |
@@ -1494,7 +1498,7 @@ struct proto udp_prot = { | |||
1494 | .sendmsg = udp_sendmsg, | 1498 | .sendmsg = udp_sendmsg, |
1495 | .recvmsg = udp_recvmsg, | 1499 | .recvmsg = udp_recvmsg, |
1496 | .sendpage = udp_sendpage, | 1500 | .sendpage = udp_sendpage, |
1497 | .backlog_rcv = udp_queue_rcv_skb, | 1501 | .backlog_rcv = __udp_queue_rcv_skb, |
1498 | .hash = udp_lib_hash, | 1502 | .hash = udp_lib_hash, |
1499 | .unhash = udp_lib_unhash, | 1503 | .unhash = udp_lib_unhash, |
1500 | .get_port = udp_v4_get_port, | 1504 | .get_port = udp_v4_get_port, |