aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/arp.c7
-rw-r--r--net/ipv4/fib_trie.c3
-rw-r--r--net/ipv4/ip_input.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c17
-rw-r--r--net/ipv4/route.c36
-rw-r--r--net/ipv4/tcp.c15
-rw-r--r--net/ipv4/tcp_minisocks.c3
-rw-r--r--net/ipv4/tcp_output.c3
8 files changed, 66 insertions, 21 deletions
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 8a3881e28aca..c29d75d8f1b1 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -801,11 +801,8 @@ static int arp_process(struct sk_buff *skb)
801 * cache. 801 * cache.
802 */ 802 */
803 803
804 /* 804 /* Special case: IPv4 duplicate address detection packet (RFC2131) */
805 * Special case: IPv4 duplicate address detection packet (RFC2131) 805 if (sip == 0) {
806 * and Gratuitous ARP/ARP Announce. (RFC3927, Section 2.4)
807 */
808 if (sip == 0 || tip == sip) {
809 if (arp->ar_op == htons(ARPOP_REQUEST) && 806 if (arp->ar_op == htons(ARPOP_REQUEST) &&
810 inet_addr_type(net, tip) == RTN_LOCAL && 807 inet_addr_type(net, tip) == RTN_LOCAL &&
811 !arp_ignore(in_dev, sip, tip)) 808 !arp_ignore(in_dev, sip, tip))
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 012cf5a68581..00a54b246dfe 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1021,6 +1021,9 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
1021 (struct node *)tn, wasfull); 1021 (struct node *)tn, wasfull);
1022 1022
1023 tp = node_parent((struct node *) tn); 1023 tp = node_parent((struct node *) tn);
1024 if (!tp)
1025 rcu_assign_pointer(t->trie, (struct node *)tn);
1026
1024 tnode_free_flush(); 1027 tnode_free_flush();
1025 if (!tp) 1028 if (!tp)
1026 break; 1029 break;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 490ce20faf38..db46b4b5b2b9 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -440,6 +440,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
440 /* Remove any debris in the socket control block */ 440 /* Remove any debris in the socket control block */
441 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 441 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
442 442
443 /* Must drop socket now because of tproxy. */
444 skb_orphan(skb);
445
443 return NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, dev, NULL, 446 return NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, dev, NULL,
444 ip_rcv_finish); 447 ip_rcv_finish);
445 448
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 155c008626c8..09172a65d9b6 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -191,7 +191,8 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb,
191 ct, ctinfo); 191 ct, ctinfo);
192 /* Tell TCP window tracking about seq change */ 192 /* Tell TCP window tracking about seq change */
193 nf_conntrack_tcp_update(skb, ip_hdrlen(skb), 193 nf_conntrack_tcp_update(skb, ip_hdrlen(skb),
194 ct, CTINFO2DIR(ctinfo)); 194 ct, CTINFO2DIR(ctinfo),
195 (int)rep_len - (int)match_len);
195 196
196 nf_conntrack_event_cache(IPCT_NATSEQADJ, ct); 197 nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
197 } 198 }
@@ -377,6 +378,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
377 struct tcphdr *tcph; 378 struct tcphdr *tcph;
378 int dir; 379 int dir;
379 __be32 newseq, newack; 380 __be32 newseq, newack;
381 s16 seqoff, ackoff;
380 struct nf_conn_nat *nat = nfct_nat(ct); 382 struct nf_conn_nat *nat = nfct_nat(ct);
381 struct nf_nat_seq *this_way, *other_way; 383 struct nf_nat_seq *this_way, *other_way;
382 384
@@ -390,15 +392,18 @@ nf_nat_seq_adjust(struct sk_buff *skb,
390 392
391 tcph = (void *)skb->data + ip_hdrlen(skb); 393 tcph = (void *)skb->data + ip_hdrlen(skb);
392 if (after(ntohl(tcph->seq), this_way->correction_pos)) 394 if (after(ntohl(tcph->seq), this_way->correction_pos))
393 newseq = htonl(ntohl(tcph->seq) + this_way->offset_after); 395 seqoff = this_way->offset_after;
394 else 396 else
395 newseq = htonl(ntohl(tcph->seq) + this_way->offset_before); 397 seqoff = this_way->offset_before;
396 398
397 if (after(ntohl(tcph->ack_seq) - other_way->offset_before, 399 if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
398 other_way->correction_pos)) 400 other_way->correction_pos))
399 newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_after); 401 ackoff = other_way->offset_after;
400 else 402 else
401 newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before); 403 ackoff = other_way->offset_before;
404
405 newseq = htonl(ntohl(tcph->seq) + seqoff);
406 newack = htonl(ntohl(tcph->ack_seq) - ackoff);
402 407
403 inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0); 408 inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
404 inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0); 409 inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
@@ -413,7 +418,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
413 if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo)) 418 if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo))
414 return 0; 419 return 0;
415 420
416 nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir); 421 nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir, seqoff);
417 422
418 return 1; 423 return 1;
419} 424}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cd76b3cb7092..278f46f5011b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1085,8 +1085,35 @@ restart:
1085 now = jiffies; 1085 now = jiffies;
1086 1086
1087 if (!rt_caching(dev_net(rt->u.dst.dev))) { 1087 if (!rt_caching(dev_net(rt->u.dst.dev))) {
1088 rt_drop(rt); 1088 /*
1089 return 0; 1089 * If we're not caching, just tell the caller we
1090 * were successful and don't touch the route. The
1091 * caller hold the sole reference to the cache entry, and
1092 * it will be released when the caller is done with it.
1093 * If we drop it here, the callers have no way to resolve routes
1094 * when we're not caching. Instead, just point *rp at rt, so
1095 * the caller gets a single use out of the route
1096 * Note that we do rt_free on this new route entry, so that
1097 * once its refcount hits zero, we are still able to reap it
1098 * (Thanks Alexey)
1099 * Note also the rt_free uses call_rcu. We don't actually
1100 * need rcu protection here, this is just our path to get
1101 * on the route gc list.
1102 */
1103
1104 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
1105 int err = arp_bind_neighbour(&rt->u.dst);
1106 if (err) {
1107 if (net_ratelimit())
1108 printk(KERN_WARNING
1109 "Neighbour table failure & not caching routes.\n");
1110 rt_drop(rt);
1111 return err;
1112 }
1113 }
1114
1115 rt_free(rt);
1116 goto skip_hashing;
1090 } 1117 }
1091 1118
1092 rthp = &rt_hash_table[hash].chain; 1119 rthp = &rt_hash_table[hash].chain;
@@ -1203,7 +1230,8 @@ restart:
1203#if RT_CACHE_DEBUG >= 2 1230#if RT_CACHE_DEBUG >= 2
1204 if (rt->u.dst.rt_next) { 1231 if (rt->u.dst.rt_next) {
1205 struct rtable *trt; 1232 struct rtable *trt;
1206 printk(KERN_DEBUG "rt_cache @%02x: %pI4", hash, &rt->rt_dst); 1233 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1234 hash, &rt->rt_dst);
1207 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next) 1235 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
1208 printk(" . %pI4", &trt->rt_dst); 1236 printk(" . %pI4", &trt->rt_dst);
1209 printk("\n"); 1237 printk("\n");
@@ -1217,6 +1245,8 @@ restart:
1217 rcu_assign_pointer(rt_hash_table[hash].chain, rt); 1245 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1218 1246
1219 spin_unlock_bh(rt_hash_lock_addr(hash)); 1247 spin_unlock_bh(rt_hash_lock_addr(hash));
1248
1249skip_hashing:
1220 if (rp) 1250 if (rp)
1221 *rp = rt; 1251 *rp = rt;
1222 else 1252 else
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 17b89c523f9d..7870a535dac6 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -903,13 +903,17 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
903 iov++; 903 iov++;
904 904
905 while (seglen > 0) { 905 while (seglen > 0) {
906 int copy; 906 int copy = 0;
907 int max = size_goal;
907 908
908 skb = tcp_write_queue_tail(sk); 909 skb = tcp_write_queue_tail(sk);
910 if (tcp_send_head(sk)) {
911 if (skb->ip_summed == CHECKSUM_NONE)
912 max = mss_now;
913 copy = max - skb->len;
914 }
909 915
910 if (!tcp_send_head(sk) || 916 if (copy <= 0) {
911 (copy = size_goal - skb->len) <= 0) {
912
913new_segment: 917new_segment:
914 /* Allocate new segment. If the interface is SG, 918 /* Allocate new segment. If the interface is SG,
915 * allocate skb fitting to single page. 919 * allocate skb fitting to single page.
@@ -930,6 +934,7 @@ new_segment:
930 934
931 skb_entail(sk, skb); 935 skb_entail(sk, skb);
932 copy = size_goal; 936 copy = size_goal;
937 max = size_goal;
933 } 938 }
934 939
935 /* Try to append data to the end of skb. */ 940 /* Try to append data to the end of skb. */
@@ -1028,7 +1033,7 @@ new_segment:
1028 if ((seglen -= copy) == 0 && iovlen == 0) 1033 if ((seglen -= copy) == 0 && iovlen == 0)
1029 goto out; 1034 goto out;
1030 1035
1031 if (skb->len < size_goal || (flags & MSG_OOB)) 1036 if (skb->len < max || (flags & MSG_OOB))
1032 continue; 1037 continue;
1033 1038
1034 if (forced_push(tp)) { 1039 if (forced_push(tp)) {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 43bbba7926ee..f8d67ccc64f3 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -128,7 +128,8 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
128 goto kill_with_rst; 128 goto kill_with_rst;
129 129
130 /* Dup ACK? */ 130 /* Dup ACK? */
131 if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || 131 if (!th->ack ||
132 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
132 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { 133 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
133 inet_twsk_put(tw); 134 inet_twsk_put(tw);
134 return TCP_TW_SUCCESS; 135 return TCP_TW_SUCCESS;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 416fc4c2e7eb..5bdf08d312d9 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -725,7 +725,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
725static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, 725static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
726 unsigned int mss_now) 726 unsigned int mss_now)
727{ 727{
728 if (skb->len <= mss_now || !sk_can_gso(sk)) { 728 if (skb->len <= mss_now || !sk_can_gso(sk) ||
729 skb->ip_summed == CHECKSUM_NONE) {
729 /* Avoid the costly divide in the normal 730 /* Avoid the costly divide in the normal
730 * non-TSO case. 731 * non-TSO case.
731 */ 732 */