aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/esp4.c17
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c1
-rw-r--r--net/ipv4/ipvs/ip_vs_app.c2
-rw-r--r--net/ipv4/netfilter/Kconfig10
-rw-r--r--net/ipv4/netfilter/arp_tables.c14
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c13
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c48
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_icmp.c3
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_tcp.c27
-rw-r--r--net/ipv4/netfilter/ip_tables.c17
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_output.c13
13 files changed, 131 insertions, 38 deletions
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 1b5a09d1b90b..1b18ce66e7b7 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -5,6 +5,7 @@
5#include <net/esp.h> 5#include <net/esp.h>
6#include <asm/scatterlist.h> 6#include <asm/scatterlist.h>
7#include <linux/crypto.h> 7#include <linux/crypto.h>
8#include <linux/kernel.h>
8#include <linux/pfkeyv2.h> 9#include <linux/pfkeyv2.h>
9#include <linux/random.h> 10#include <linux/random.h>
10#include <net/icmp.h> 11#include <net/icmp.h>
@@ -42,10 +43,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
42 esp = x->data; 43 esp = x->data;
43 alen = esp->auth.icv_trunc_len; 44 alen = esp->auth.icv_trunc_len;
44 tfm = esp->conf.tfm; 45 tfm = esp->conf.tfm;
45 blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3; 46 blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4);
46 clen = (clen + 2 + blksize-1)&~(blksize-1); 47 clen = ALIGN(clen + 2, blksize);
47 if (esp->conf.padlen) 48 if (esp->conf.padlen)
48 clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1); 49 clen = ALIGN(clen, esp->conf.padlen);
49 50
50 if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) 51 if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0)
51 goto error; 52 goto error;
@@ -143,7 +144,7 @@ static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struc
143 struct ip_esp_hdr *esph; 144 struct ip_esp_hdr *esph;
144 struct esp_data *esp = x->data; 145 struct esp_data *esp = x->data;
145 struct sk_buff *trailer; 146 struct sk_buff *trailer;
146 int blksize = crypto_tfm_alg_blocksize(esp->conf.tfm); 147 int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
147 int alen = esp->auth.icv_trunc_len; 148 int alen = esp->auth.icv_trunc_len;
148 int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; 149 int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen;
149 int nfrags; 150 int nfrags;
@@ -304,16 +305,16 @@ static int esp_post_input(struct xfrm_state *x, struct xfrm_decap_state *decap,
304static u32 esp4_get_max_size(struct xfrm_state *x, int mtu) 305static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
305{ 306{
306 struct esp_data *esp = x->data; 307 struct esp_data *esp = x->data;
307 u32 blksize = crypto_tfm_alg_blocksize(esp->conf.tfm); 308 u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
308 309
309 if (x->props.mode) { 310 if (x->props.mode) {
310 mtu = (mtu + 2 + blksize-1)&~(blksize-1); 311 mtu = ALIGN(mtu + 2, blksize);
311 } else { 312 } else {
312 /* The worst case. */ 313 /* The worst case. */
313 mtu += 2 + blksize; 314 mtu = ALIGN(mtu + 2, 4) + blksize - 4;
314 } 315 }
315 if (esp->conf.padlen) 316 if (esp->conf.padlen)
316 mtu = (mtu + esp->conf.padlen-1)&~(esp->conf.padlen-1); 317 mtu = ALIGN(mtu, esp->conf.padlen);
317 318
318 return mtu + x->props.header_len + esp->auth.icv_trunc_len; 319 return mtu + x->props.header_len + esp->auth.icv_trunc_len;
319} 320}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index fe3c6d3d0c91..94468a76c5b4 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -494,7 +494,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
494EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune); 494EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
495 495
496struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, 496struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
497 const unsigned int __nocast priority) 497 const gfp_t priority)
498{ 498{
499 struct sock *newsk = sk_clone(sk, priority); 499 struct sock *newsk = sk_clone(sk, priority);
500 500
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index f9076ef3a1a8..a010e9a68811 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -111,6 +111,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
111 tw->tw_prot = sk->sk_prot_creator; 111 tw->tw_prot = sk->sk_prot_creator;
112 atomic_set(&tw->tw_refcnt, 1); 112 atomic_set(&tw->tw_refcnt, 1);
113 inet_twsk_dead_node_init(tw); 113 inet_twsk_dead_node_init(tw);
114 __module_get(tw->tw_prot->owner);
114 } 115 }
115 116
116 return tw; 117 return tw;
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c
index 6e092dadb388..fc6f95aaa969 100644
--- a/net/ipv4/ipvs/ip_vs_app.c
+++ b/net/ipv4/ipvs/ip_vs_app.c
@@ -604,7 +604,7 @@ static struct file_operations ip_vs_app_fops = {
604/* 604/*
605 * Replace a segment of data with a new segment 605 * Replace a segment of data with a new segment
606 */ 606 */
607int ip_vs_skb_replace(struct sk_buff *skb, int pri, 607int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
608 char *o_buf, int o_len, char *n_buf, int n_len) 608 char *o_buf, int o_len, char *n_buf, int n_len)
609{ 609{
610 struct iphdr *iph; 610 struct iphdr *iph;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 2cd7e7d1ac90..7d917e4ce1d9 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -139,9 +139,10 @@ config IP_NF_AMANDA
139 139
140config IP_NF_PPTP 140config IP_NF_PPTP
141 tristate 'PPTP protocol support' 141 tristate 'PPTP protocol support'
142 depends on IP_NF_CONNTRACK
142 help 143 help
143 This module adds support for PPTP (Point to Point Tunnelling 144 This module adds support for PPTP (Point to Point Tunnelling
144 Protocol, RFC2637) conncection tracking and NAT. 145 Protocol, RFC2637) connection tracking and NAT.
145 146
146 If you are running PPTP sessions over a stateful firewall or NAT 147 If you are running PPTP sessions over a stateful firewall or NAT
147 box, you may want to enable this feature. 148 box, you may want to enable this feature.
@@ -498,9 +499,14 @@ config IP_NF_TARGET_LOG
498 To compile it as a module, choose M here. If unsure, say N. 499 To compile it as a module, choose M here. If unsure, say N.
499 500
500config IP_NF_TARGET_ULOG 501config IP_NF_TARGET_ULOG
501 tristate "ULOG target support" 502 tristate "ULOG target support (OBSOLETE)"
502 depends on IP_NF_IPTABLES 503 depends on IP_NF_IPTABLES
503 ---help--- 504 ---help---
505
506 This option enables the old IPv4-only "ipt_ULOG" implementation
507 which has been obsoleted by the new "nfnetlink_log" code (see
508 CONFIG_NETFILTER_NETLINK_LOG).
509
504 This option adds a `ULOG' target, which allows you to create rules in 510 This option adds a `ULOG' target, which allows you to create rules in
505 any iptables table. The packet is passed to a userspace logging 511 any iptables table. The packet is passed to a userspace logging
506 daemon using netlink multicast sockets; unlike the LOG target 512 daemon using netlink multicast sockets; unlike the LOG target
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index fa1634256680..a7969286e6e7 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -716,8 +716,10 @@ static int translate_table(const char *name,
716 } 716 }
717 717
718 /* And one copy for every other CPU */ 718 /* And one copy for every other CPU */
719 for (i = 1; i < num_possible_cpus(); i++) { 719 for_each_cpu(i) {
720 memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i, 720 if (i == 0)
721 continue;
722 memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i,
721 newinfo->entries, 723 newinfo->entries,
722 SMP_ALIGN(newinfo->size)); 724 SMP_ALIGN(newinfo->size));
723 } 725 }
@@ -767,7 +769,7 @@ static void get_counters(const struct arpt_table_info *t,
767 unsigned int cpu; 769 unsigned int cpu;
768 unsigned int i; 770 unsigned int i;
769 771
770 for (cpu = 0; cpu < num_possible_cpus(); cpu++) { 772 for_each_cpu(cpu) {
771 i = 0; 773 i = 0;
772 ARPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), 774 ARPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu),
773 t->size, 775 t->size,
@@ -885,7 +887,8 @@ static int do_replace(void __user *user, unsigned int len)
885 return -ENOMEM; 887 return -ENOMEM;
886 888
887 newinfo = vmalloc(sizeof(struct arpt_table_info) 889 newinfo = vmalloc(sizeof(struct arpt_table_info)
888 + SMP_ALIGN(tmp.size) * num_possible_cpus()); 890 + SMP_ALIGN(tmp.size) *
891 (highest_possible_processor_id()+1));
889 if (!newinfo) 892 if (!newinfo)
890 return -ENOMEM; 893 return -ENOMEM;
891 894
@@ -1158,7 +1161,8 @@ int arpt_register_table(struct arpt_table *table,
1158 = { 0, 0, 0, { 0 }, { 0 }, { } }; 1161 = { 0, 0, 0, { 0 }, { 0 }, { } };
1159 1162
1160 newinfo = vmalloc(sizeof(struct arpt_table_info) 1163 newinfo = vmalloc(sizeof(struct arpt_table_info)
1161 + SMP_ALIGN(repl->size) * num_possible_cpus()); 1164 + SMP_ALIGN(repl->size) *
1165 (highest_possible_processor_id()+1));
1162 if (!newinfo) { 1166 if (!newinfo) {
1163 ret = -ENOMEM; 1167 ret = -ENOMEM;
1164 return ret; 1168 return ret;
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index ea65dd3e517a..07a80b56e8dc 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -1119,7 +1119,7 @@ void __ip_ct_refresh_acct(struct ip_conntrack *ct,
1119 unsigned long extra_jiffies, 1119 unsigned long extra_jiffies,
1120 int do_acct) 1120 int do_acct)
1121{ 1121{
1122 int do_event = 0; 1122 int event = 0;
1123 1123
1124 IP_NF_ASSERT(ct->timeout.data == (unsigned long)ct); 1124 IP_NF_ASSERT(ct->timeout.data == (unsigned long)ct);
1125 IP_NF_ASSERT(skb); 1125 IP_NF_ASSERT(skb);
@@ -1129,13 +1129,13 @@ void __ip_ct_refresh_acct(struct ip_conntrack *ct,
1129 /* If not in hash table, timer will not be active yet */ 1129 /* If not in hash table, timer will not be active yet */
1130 if (!is_confirmed(ct)) { 1130 if (!is_confirmed(ct)) {
1131 ct->timeout.expires = extra_jiffies; 1131 ct->timeout.expires = extra_jiffies;
1132 do_event = 1; 1132 event = IPCT_REFRESH;
1133 } else { 1133 } else {
1134 /* Need del_timer for race avoidance (may already be dying). */ 1134 /* Need del_timer for race avoidance (may already be dying). */
1135 if (del_timer(&ct->timeout)) { 1135 if (del_timer(&ct->timeout)) {
1136 ct->timeout.expires = jiffies + extra_jiffies; 1136 ct->timeout.expires = jiffies + extra_jiffies;
1137 add_timer(&ct->timeout); 1137 add_timer(&ct->timeout);
1138 do_event = 1; 1138 event = IPCT_REFRESH;
1139 } 1139 }
1140 } 1140 }
1141 1141
@@ -1144,14 +1144,17 @@ void __ip_ct_refresh_acct(struct ip_conntrack *ct,
1144 ct->counters[CTINFO2DIR(ctinfo)].packets++; 1144 ct->counters[CTINFO2DIR(ctinfo)].packets++;
1145 ct->counters[CTINFO2DIR(ctinfo)].bytes += 1145 ct->counters[CTINFO2DIR(ctinfo)].bytes +=
1146 ntohs(skb->nh.iph->tot_len); 1146 ntohs(skb->nh.iph->tot_len);
1147 if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
1148 || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
1149 event |= IPCT_COUNTER_FILLING;
1147 } 1150 }
1148#endif 1151#endif
1149 1152
1150 write_unlock_bh(&ip_conntrack_lock); 1153 write_unlock_bh(&ip_conntrack_lock);
1151 1154
1152 /* must be unlocked when calling event cache */ 1155 /* must be unlocked when calling event cache */
1153 if (do_event) 1156 if (event)
1154 ip_conntrack_event_cache(IPCT_REFRESH, skb); 1157 ip_conntrack_event_cache(event, skb);
1155} 1158}
1156 1159
1157#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ 1160#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index b08a432efcf8..166e6069f121 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -177,11 +177,11 @@ ctnetlink_dump_counters(struct sk_buff *skb, const struct ip_conntrack *ct,
177 struct nfattr *nest_count = NFA_NEST(skb, type); 177 struct nfattr *nest_count = NFA_NEST(skb, type);
178 u_int64_t tmp; 178 u_int64_t tmp;
179 179
180 tmp = cpu_to_be64(ct->counters[dir].packets); 180 tmp = htonl(ct->counters[dir].packets);
181 NFA_PUT(skb, CTA_COUNTERS_PACKETS, sizeof(u_int64_t), &tmp); 181 NFA_PUT(skb, CTA_COUNTERS32_PACKETS, sizeof(u_int32_t), &tmp);
182 182
183 tmp = cpu_to_be64(ct->counters[dir].bytes); 183 tmp = htonl(ct->counters[dir].bytes);
184 NFA_PUT(skb, CTA_COUNTERS_BYTES, sizeof(u_int64_t), &tmp); 184 NFA_PUT(skb, CTA_COUNTERS32_BYTES, sizeof(u_int32_t), &tmp);
185 185
186 NFA_NEST_END(skb, nest_count); 186 NFA_NEST_END(skb, nest_count);
187 187
@@ -833,7 +833,8 @@ out:
833static inline int 833static inline int
834ctnetlink_change_status(struct ip_conntrack *ct, struct nfattr *cda[]) 834ctnetlink_change_status(struct ip_conntrack *ct, struct nfattr *cda[])
835{ 835{
836 unsigned long d, status = *(u_int32_t *)NFA_DATA(cda[CTA_STATUS-1]); 836 unsigned long d;
837 unsigned status = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_STATUS-1]));
837 d = ct->status ^ status; 838 d = ct->status ^ status;
838 839
839 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) 840 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
@@ -948,6 +949,31 @@ ctnetlink_change_timeout(struct ip_conntrack *ct, struct nfattr *cda[])
948 return 0; 949 return 0;
949} 950}
950 951
952static inline int
953ctnetlink_change_protoinfo(struct ip_conntrack *ct, struct nfattr *cda[])
954{
955 struct nfattr *tb[CTA_PROTOINFO_MAX], *attr = cda[CTA_PROTOINFO-1];
956 struct ip_conntrack_protocol *proto;
957 u_int16_t npt = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
958 int err = 0;
959
960 if (nfattr_parse_nested(tb, CTA_PROTOINFO_MAX, attr) < 0)
961 goto nfattr_failure;
962
963 proto = ip_conntrack_proto_find_get(npt);
964 if (!proto)
965 return -EINVAL;
966
967 if (proto->from_nfattr)
968 err = proto->from_nfattr(tb, ct);
969 ip_conntrack_proto_put(proto);
970
971 return err;
972
973nfattr_failure:
974 return -ENOMEM;
975}
976
951static int 977static int
952ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[]) 978ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[])
953{ 979{
@@ -973,6 +999,12 @@ ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[])
973 return err; 999 return err;
974 } 1000 }
975 1001
1002 if (cda[CTA_PROTOINFO-1]) {
1003 err = ctnetlink_change_protoinfo(ct, cda);
1004 if (err < 0)
1005 return err;
1006 }
1007
976 DEBUGP("all done\n"); 1008 DEBUGP("all done\n");
977 return 0; 1009 return 0;
978} 1010}
@@ -1002,6 +1034,12 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
1002 if (err < 0) 1034 if (err < 0)
1003 goto err; 1035 goto err;
1004 1036
1037 if (cda[CTA_PROTOINFO-1]) {
1038 err = ctnetlink_change_protoinfo(ct, cda);
1039 if (err < 0)
1040 return err;
1041 }
1042
1005 ct->helper = ip_conntrack_helper_find_get(rtuple); 1043 ct->helper = ip_conntrack_helper_find_get(rtuple);
1006 1044
1007 add_timer(&ct->timeout); 1045 add_timer(&ct->timeout);
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
index 838d1d69b36e..98f0015dd255 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
@@ -296,8 +296,7 @@ static int icmp_nfattr_to_tuple(struct nfattr *tb[],
296 struct ip_conntrack_tuple *tuple) 296 struct ip_conntrack_tuple *tuple)
297{ 297{
298 if (!tb[CTA_PROTO_ICMP_TYPE-1] 298 if (!tb[CTA_PROTO_ICMP_TYPE-1]
299 || !tb[CTA_PROTO_ICMP_CODE-1] 299 || !tb[CTA_PROTO_ICMP_CODE-1])
300 || !tb[CTA_PROTO_ICMP_ID-1])
301 return -1; 300 return -1;
302 301
303 tuple->dst.u.icmp.type = 302 tuple->dst.u.icmp.type =
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
index 121760d6cc50..d6701cafbcc2 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
@@ -341,17 +341,43 @@ static int tcp_print_conntrack(struct seq_file *s,
341static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa, 341static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa,
342 const struct ip_conntrack *ct) 342 const struct ip_conntrack *ct)
343{ 343{
344 struct nfattr *nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP);
345
344 read_lock_bh(&tcp_lock); 346 read_lock_bh(&tcp_lock);
345 NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t), 347 NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t),
346 &ct->proto.tcp.state); 348 &ct->proto.tcp.state);
347 read_unlock_bh(&tcp_lock); 349 read_unlock_bh(&tcp_lock);
348 350
351 NFA_NEST_END(skb, nest_parms);
352
349 return 0; 353 return 0;
350 354
351nfattr_failure: 355nfattr_failure:
352 read_unlock_bh(&tcp_lock); 356 read_unlock_bh(&tcp_lock);
353 return -1; 357 return -1;
354} 358}
359
360static int nfattr_to_tcp(struct nfattr *cda[], struct ip_conntrack *ct)
361{
362 struct nfattr *attr = cda[CTA_PROTOINFO_TCP-1];
363 struct nfattr *tb[CTA_PROTOINFO_TCP_MAX];
364
365 if (nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr) < 0)
366 goto nfattr_failure;
367
368 if (!tb[CTA_PROTOINFO_TCP_STATE-1])
369 return -EINVAL;
370
371 write_lock_bh(&tcp_lock);
372 ct->proto.tcp.state =
373 *(u_int8_t *)NFA_DATA(tb[CTA_PROTOINFO_TCP_STATE-1]);
374 write_unlock_bh(&tcp_lock);
375
376 return 0;
377
378nfattr_failure:
379 return -1;
380}
355#endif 381#endif
356 382
357static unsigned int get_conntrack_index(const struct tcphdr *tcph) 383static unsigned int get_conntrack_index(const struct tcphdr *tcph)
@@ -1123,6 +1149,7 @@ struct ip_conntrack_protocol ip_conntrack_protocol_tcp =
1123#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ 1149#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
1124 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) 1150 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
1125 .to_nfattr = tcp_to_nfattr, 1151 .to_nfattr = tcp_to_nfattr,
1152 .from_nfattr = nfattr_to_tcp,
1126 .tuple_to_nfattr = ip_ct_port_tuple_to_nfattr, 1153 .tuple_to_nfattr = ip_ct_port_tuple_to_nfattr,
1127 .nfattr_to_tuple = ip_ct_port_nfattr_to_tuple, 1154 .nfattr_to_tuple = ip_ct_port_nfattr_to_tuple,
1128#endif 1155#endif
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index eef99a1b5de6..75c27e92f6ab 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -27,6 +27,7 @@
27#include <asm/semaphore.h> 27#include <asm/semaphore.h>
28#include <linux/proc_fs.h> 28#include <linux/proc_fs.h>
29#include <linux/err.h> 29#include <linux/err.h>
30#include <linux/cpumask.h>
30 31
31#include <linux/netfilter_ipv4/ip_tables.h> 32#include <linux/netfilter_ipv4/ip_tables.h>
32 33
@@ -921,8 +922,10 @@ translate_table(const char *name,
921 } 922 }
922 923
923 /* And one copy for every other CPU */ 924 /* And one copy for every other CPU */
924 for (i = 1; i < num_possible_cpus(); i++) { 925 for_each_cpu(i) {
925 memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i, 926 if (i == 0)
927 continue;
928 memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i,
926 newinfo->entries, 929 newinfo->entries,
927 SMP_ALIGN(newinfo->size)); 930 SMP_ALIGN(newinfo->size));
928 } 931 }
@@ -943,7 +946,7 @@ replace_table(struct ipt_table *table,
943 struct ipt_entry *table_base; 946 struct ipt_entry *table_base;
944 unsigned int i; 947 unsigned int i;
945 948
946 for (i = 0; i < num_possible_cpus(); i++) { 949 for_each_cpu(i) {
947 table_base = 950 table_base =
948 (void *)newinfo->entries 951 (void *)newinfo->entries
949 + TABLE_OFFSET(newinfo, i); 952 + TABLE_OFFSET(newinfo, i);
@@ -990,7 +993,7 @@ get_counters(const struct ipt_table_info *t,
990 unsigned int cpu; 993 unsigned int cpu;
991 unsigned int i; 994 unsigned int i;
992 995
993 for (cpu = 0; cpu < num_possible_cpus(); cpu++) { 996 for_each_cpu(cpu) {
994 i = 0; 997 i = 0;
995 IPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), 998 IPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu),
996 t->size, 999 t->size,
@@ -1128,7 +1131,8 @@ do_replace(void __user *user, unsigned int len)
1128 return -ENOMEM; 1131 return -ENOMEM;
1129 1132
1130 newinfo = vmalloc(sizeof(struct ipt_table_info) 1133 newinfo = vmalloc(sizeof(struct ipt_table_info)
1131 + SMP_ALIGN(tmp.size) * num_possible_cpus()); 1134 + SMP_ALIGN(tmp.size) *
1135 (highest_possible_processor_id()+1));
1132 if (!newinfo) 1136 if (!newinfo)
1133 return -ENOMEM; 1137 return -ENOMEM;
1134 1138
@@ -1458,7 +1462,8 @@ int ipt_register_table(struct ipt_table *table, const struct ipt_replace *repl)
1458 = { 0, 0, 0, { 0 }, { 0 }, { } }; 1462 = { 0, 0, 0, { 0 }, { 0 }, { } };
1459 1463
1460 newinfo = vmalloc(sizeof(struct ipt_table_info) 1464 newinfo = vmalloc(sizeof(struct ipt_table_info)
1461 + SMP_ALIGN(repl->size) * num_possible_cpus()); 1465 + SMP_ALIGN(repl->size) *
1466 (highest_possible_processor_id()+1));
1462 if (!newinfo) 1467 if (!newinfo)
1463 return -ENOMEM; 1468 return -ENOMEM;
1464 1469
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index b940346de4e7..6d80e063c187 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -136,7 +136,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
136 else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1)) 136 else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1))
137 /* slow start */ 137 /* slow start */
138 ca->cnt = (cwnd * (BICTCP_B-1)) 138 ca->cnt = (cwnd * (BICTCP_B-1))
139 / cwnd-ca->last_max_cwnd; 139 / (cwnd - ca->last_max_cwnd);
140 else 140 else
141 /* linear increase */ 141 /* linear increase */
142 ca->cnt = cwnd / max_increment; 142 ca->cnt = cwnd / max_increment;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index c5b911f9b662..7114031fdc70 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -435,7 +435,16 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
435 int nsize, old_factor; 435 int nsize, old_factor;
436 u16 flags; 436 u16 flags;
437 437
438 BUG_ON(len >= skb->len); 438 if (unlikely(len >= skb->len)) {
439 if (net_ratelimit()) {
440 printk(KERN_DEBUG "TCP: seg_size=%u, mss=%u, seq=%u, "
441 "end_seq=%u, skb->len=%u.\n", len, mss_now,
442 TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
443 skb->len);
444 WARN_ON(1);
445 }
446 return 0;
447 }
439 448
440 nsize = skb_headlen(skb) - len; 449 nsize = skb_headlen(skb) - len;
441 if (nsize < 0) 450 if (nsize < 0)
@@ -1610,7 +1619,7 @@ void tcp_send_fin(struct sock *sk)
1610 * was unread data in the receive queue. This behavior is recommended 1619 * was unread data in the receive queue. This behavior is recommended
1611 * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM 1620 * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM
1612 */ 1621 */
1613void tcp_send_active_reset(struct sock *sk, unsigned int __nocast priority) 1622void tcp_send_active_reset(struct sock *sk, gfp_t priority)
1614{ 1623{
1615 struct tcp_sock *tp = tcp_sk(sk); 1624 struct tcp_sock *tp = tcp_sk(sk);
1616 struct sk_buff *skb; 1625 struct sk_buff *skb;